Rebased to Nova revision 749.

This commit is contained in:
sateesh
2011-02-27 08:07:24 +05:30
321 changed files with 29541 additions and 4436 deletions

View File

@@ -13,3 +13,4 @@ CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem
nova/vcsversion.py
*.DS_Store

View File

@@ -1,36 +1,43 @@
# Format is:
# <preferred e-mail> <other e-mail>
<code@term.ie> <github@anarkystic.com>
<code@term.ie> <termie@preciousroy.local>
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
<matt.dietz@rackspace.com> <mdietz@openstack>
<cbehrens@codestud.com> <chris.behrens@rackspace.com>
<devin.carlen@gmail.com> <devcamcar@illian.local>
<ewan.mellor@citrix.com> <emellor@silver>
<jaypipes@gmail.com> <jpipes@serialcoder>
# <preferred e-mail> <other e-mail 1>
# <preferred e-mail> <other e-mail 2>
<anotherjesse@gmail.com> <jesse@dancelamb>
<anotherjesse@gmail.com> <jesse@gigantor.local>
<anotherjesse@gmail.com> <jesse@ubuntu>
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
<ant@openstack.org> <amesserl@rackspace.com>
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
<brian.lamar@rackspace.com> <brian.lamar@gmail.com>
<bschott@isi.edu> <bfschott@gmail.com>
<cbehrens@codestud.com> <chris.behrens@rackspace.com>
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
<code@term.ie> <github@anarkystic.com>
<code@term.ie> <termie@preciousroy.local>
<corywright@gmail.com> <cory.wright@rackspace.com>
<devin.carlen@gmail.com> <devcamcar@illian.local>
<ewan.mellor@citrix.com> <emellor@silver>
<jaypipes@gmail.com> <jpipes@serialcoder>
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
<justin@fathomdb.com> <justinsb@justinsb-desktop>
<masumotok@nttdata.co.jp> <root@openstack2-api>
<justin@fathomdb.com> <superstack@superstack.org>
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
<masumotok@nttdata.co.jp> <root@openstack2-api>
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
<matt.dietz@rackspace.com> <mdietz@openstack>
<mordred@inaugust.com> <mordred@hudson>
<paul@openstack.org> <pvoccio@castor.local>
<paul@openstack.org> <paul.voccio@rackspace.com>
<paul@openstack.org> <pvoccio@castor.local>
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<rlane@wikimedia.org> <laner@controller>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<soren.hansen@rackspace.com> <soren@linux2go.dk>
<todd@ansolabs.com> <todd@lapex>
<todd@ansolabs.com> <todd@rubidine.com>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
<tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
<ueno.nachi@lab.ntt.co.jp> <nati.ueno@gmail.com>
<ueno.nachi@lab.ntt.co.jp> <nova@u4>
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<rlane@wikimedia.org> <laner@controller>
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
<justin@fathomdb.com> <superstack@superstack.org>
<vishvananda@gmail.com> <vishvananda@yahoo.com>

12
Authors
View File

@@ -4,14 +4,16 @@ Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Bilal Akhtar <bilalakhtar@ubuntu.com>
Brian Schott <bschott@isi.edu> <bfschott@gmail.com>
Brian Lamar <brian.lamar@rackspace.com>
Brian Schott <bschott@isi.edu>
Brian Waldon <brian.waldon@rackspace.com>
Chiradeep Vittal <chiradeep@cloud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Chris Behrens <cbehrens@codestud.com>
Christian Berendt <berendt@b1-systems.de>
Cory Wright <corywright@gmail.com>
David Pravec <David.Pravec@danix.org>
Dan Prince <dan.prince@rackspace.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
@@ -34,6 +36,7 @@ Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Matt Dietz <matt.dietz@rackspace.com>
@@ -42,7 +45,8 @@ Monsyne Dragon <mdragon@rackspace.com>
Monty Taylor <mordred@inaugust.com>
MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
Naveed Massjouni <naveed.massjouni@rackspace.com>
Paul Voccio <paul@openstack.org>
Ricardo Carrillo Cruz <emaildericky@gmail.com>
Rick Clark <rick@openstack.org>
@@ -57,7 +61,7 @@ Soren Hansen <soren.hansen@rackspace.com>
Thierry Carrez <thierry@openstack.org>
Todd Willey <todd@ansolabs.com>
Trey Morris <trey.morris@rackspace.com>
Tushar Patil <tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>

View File

@@ -38,3 +38,4 @@ include nova/tests/db/nova.austin.sqlite
include plugins/xenapi/README
include plugins/xenapi/etc/xapi.d/plugins/objectstore
include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
global-exclude *.pyc

View File

@@ -1,2 +0,0 @@
[python: **.py]

View File

@@ -25,7 +25,6 @@ from eventlet.green import urllib2
import exceptions
import gettext
import logging
import os
import sys
import time
@@ -48,9 +47,11 @@ from nova import utils
from nova import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_integer('ajax_console_idle_timeout', 300,
'Seconds before idle connection destroyed')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
LOG = logging.getLogger('nova.ajax_console_proxy')
LOG.setLevel(logging.DEBUG)
@@ -62,10 +63,16 @@ class AjaxConsoleProxy(object):
def __call__(self, env, start_response):
try:
req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
env['HTTP_HOST'],
env['PATH_INFO'],
env['QUERY_STRING'])
if 'QUERY_STRING' in env:
req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'],
env['HTTP_HOST'],
env['PATH_INFO'],
env['QUERY_STRING'])
else:
req_url = '%s://%s%s' % (env['wsgi.url_scheme'],
env['HTTP_HOST'],
env['PATH_INFO'])
if 'HTTP_REFERER' in env:
auth_url = env['HTTP_REFERER']
else:
@@ -130,6 +137,7 @@ class AjaxConsoleProxy(object):
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
server = wsgi.Server()
acp = AjaxConsoleProxy()
acp.register_listeners()

View File

@@ -36,14 +36,22 @@ gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import utils
from nova import version
from nova import wsgi
logging.basicConfig()
LOG = logging.getLogger('nova.api')
LOG.setLevel(logging.DEBUG)
FLAGS = flags.FLAGS
flags.DEFINE_string('ec2_listen', "0.0.0.0",
'IP address for EC2 API to listen')
flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
flags.DEFINE_string('osapi_listen', "0.0.0.0",
'IP address for OpenStack API to listen')
flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
API_ENDPOINTS = ['ec2', 'osapi']
@@ -57,21 +65,15 @@ def run_app(paste_config_file):
LOG.debug(_("No paste configuration for app: %s"), api)
continue
LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
wsgi.paste_config_to_flags(config, {
"verbose": FLAGS.verbose,
"%s_host" % api: config.get('host', '0.0.0.0'),
"%s_port" % api: getattr(FLAGS, "%s_port" % api)})
LOG.info(_("Running %s API"), api)
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app, getattr(FLAGS, "%s_port" % api),
getattr(FLAGS, "%s_host" % api)))
apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
getattr(FLAGS, "%s_listen" % api)))
if len(apps) == 0:
LOG.error(_("No known API applications configured in %s."),
paste_config_file)
return
# NOTE(todd): redo logging config, verbose could be set in paste config
logging.basicConfig()
server = wsgi.Server()
for app in apps:
server.start(*app)
@@ -79,9 +81,15 @@ def run_app(paste_config_file):
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
LOG.audit(_("Starting nova-api node (version %s)"),
version.version_string_with_vcs())
LOG.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
flag_get = FLAGS.get(flag, None)
LOG.debug("%(flag)s : %(flag_get)s" % locals())
conf = wsgi.paste_config_file('nova-api.conf')
if conf:
run_app(conf)

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Combined starter script for Nova services."""
import eventlet
eventlet.monkey_patch()
import gettext
import os
import sys
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
from nova import wsgi
FLAGS = flags.FLAGS
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.basicConfig()
compute = service.Service.create(binary='nova-compute')
network = service.Service.create(binary='nova-network')
volume = service.Service.create(binary='nova-volume')
scheduler = service.Service.create(binary='nova-scheduler')
#objectstore = service.Service.create(binary='nova-objectstore')
service.serve(compute, network, volume, scheduler)
apps = []
paste_config_file = wsgi.paste_config_file('nova-api.conf')
for api in ['osapi', 'ec2']:
config = wsgi.load_paste_configuration(paste_config_file, api)
if config is None:
continue
wsgi.paste_config_to_flags(config, {
"verbose": FLAGS.verbose,
"%s_host" % api: config.get('host', '0.0.0.0'),
"%s_port" % api: getattr(FLAGS, "%s_port" % api)})
app = wsgi.load_paste_app(paste_config_file, api)
apps.append((app, getattr(FLAGS, "%s_port" % api),
getattr(FLAGS, "%s_host" % api)))
if len(apps) > 0:
logging.basicConfig()
server = wsgi.Server()
for app in apps:
server.start(*app)
server.wait()

View File

@@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
service.serve()
service.wait()

View File

@@ -35,10 +35,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
service.serve()
service.wait()

View File

@@ -102,19 +102,10 @@ def main():
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
logging.basicConfig()
logging.setup()
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
if int(os.environ.get('TESTING', '0')):
FLAGS.fake_rabbit = True
FLAGS.network_size = 16
FLAGS.connection_type = 'fake'
FLAGS.fake_network = True
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
FLAGS.num_networks = 5
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..',
'nova.sqlite'))
FLAGS.sql_connection = 'sqlite:///%s' % path
from nova.tests import fake_flags
action = argv[1]
if action in ['add', 'del', 'old']:
mac = argv[2]

View File

@@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api import direct
@@ -44,10 +45,15 @@ from nova.compute import api as compute_api
FLAGS = flags.FLAGS
flags.DEFINE_integer('direct_port', 8001, 'Direct API port')
flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
direct.register_service('compute', compute_api.API())
direct.register_service('reflect', direct.Reflection())

View File

@@ -41,6 +41,7 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import utils
from nova.objectstore import image
@@ -92,6 +93,7 @@ def main():
"""Main entry point."""
utils.default_flagfile()
argv = FLAGS(sys.argv)
logging.setup()
images = get_images()
if len(argv) == 2:

View File

@@ -41,9 +41,6 @@ from nova import utils
from nova import twistd
from nova.compute import monitor
# TODO(todd): shouldn't this be done with flags? And what about verbose?
logging.getLogger('boto').setLevel(logging.WARN)
LOG = logging.getLogger('nova.instancemonitor')

View File

@@ -86,8 +86,6 @@ from nova.auth import manager
from nova.cloudpipe import pipelib
from nova.db import migration
logging.basicConfig()
FLAGS = flags.FLAGS
flags.DECLARE('fixed_range', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
@@ -95,6 +93,9 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('vlan_start', 'nova.network.manager')
flags.DECLARE('vpn_start', 'nova.network.manager')
flags.DECLARE('fixed_range_v6', 'nova.network.manager')
flags.DEFINE_flag(flags.HelpFlag())
flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
def param2id(object_id):
@@ -433,6 +434,37 @@ class ProjectCommands(object):
"nova-api server on this host.")
class FixedIpCommands(object):
"""Class for managing fixed ip."""
def list(self, host=None):
"""Lists all fixed ips (optionally by host) arguments: [host]"""
ctxt = context.get_admin_context()
if host == None:
fixed_ips = db.fixed_ip_get_all(ctxt)
else:
fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host)
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (_('network'),
_('IP address'),
_('MAC address'),
_('hostname'),
_('host'))
for fixed_ip in fixed_ips:
hostname = None
host = None
mac_address = None
if fixed_ip['instance']:
instance = fixed_ip['instance']
hostname = instance['hostname']
host = instance['host']
mac_address = instance['mac_address']
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (
fixed_ip['network']['cidr'],
fixed_ip['address'],
mac_address, hostname, host)
class FloatingIpCommands(object):
"""Class for managing floating ip."""
@@ -472,8 +504,8 @@ class NetworkCommands(object):
"""Class for managing networks."""
def create(self, fixed_range=None, num_networks=None,
network_size=None, vlan_start=None, vpn_start=None,
fixed_range_v6=None):
network_size=None, vlan_start=None,
vpn_start=None, fixed_range_v6=None, label='public'):
"""Creates fixed ips for host by range
arguments: [fixed_range=FLAG], [num_networks=FLAG],
[network_size=FLAG], [vlan_start=FLAG],
@@ -495,9 +527,22 @@ class NetworkCommands(object):
cidr=fixed_range,
num_networks=int(num_networks),
network_size=int(network_size),
cidr_v6=fixed_range_v6,
vlan_start=int(vlan_start),
vpn_start=int(vpn_start))
vpn_start=int(vpn_start),
cidr_v6=fixed_range_v6,
label=label)
def list(self):
"""List all created networks"""
print "%-18s\t%-15s\t%-15s\t%-15s" % (_('network'),
_('netmask'),
_('start address'),
'DNS')
for network in db.network_get_all(context.get_admin_context()):
print "%-18s\t%-15s\t%-15s\t%-15s" % (network.cidr,
network.netmask,
network.dhcp_start,
network.dns)
class ServiceCommands(object):
@@ -508,7 +553,7 @@ class ServiceCommands(object):
args: [host] [service]"""
ctxt = context.get_admin_context()
now = datetime.datetime.utcnow()
services = db.service_get_all(ctxt)
services = db.service_get_all(ctxt) + db.service_get_all(ctxt, True)
if host:
services = [s for s in services if s['host'] == host]
if service:
@@ -622,6 +667,7 @@ CATEGORIES = [
('role', RoleCommands),
('shell', ShellCommands),
('vpn', VpnCommands),
('fixed', FixedIpCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands),
('service', ServiceCommands),
@@ -665,6 +711,7 @@ def main():
"""Parse options and call the appropriate class/method."""
utils.default_flagfile()
argv = FLAGS(sys.argv)
logging.setup()
script_name = argv.pop(0)
if len(argv) < 1:

View File

@@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
service.serve()
service.wait()

View File

@@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
service.serve()
service.wait()

View File

@@ -36,10 +36,14 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import service
from nova import utils
if __name__ == '__main__':
utils.default_flagfile()
flags.FLAGS(sys.argv)
logging.setup()
service.serve()
service.wait()

View File

@@ -66,7 +66,7 @@ if [ "$CMD" == "install" ]; then
sudo apt-get install -y user-mode-linux kvm libvirt-bin
sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server
sudo apt-get install -y lvm2 iscsitarget open-iscsi
sudo apt-get install -y socat
sudo apt-get install -y socat unzip
echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget
sudo /etc/init.d/iscsitarget restart
sudo modprobe kvm
@@ -111,8 +111,7 @@ if [ "$CMD" == "run" ]; then
--nodaemon
--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
--network_manager=nova.network.manager.$NET_MAN
--cc_host=$HOST_IP
--routing_source_ip=$HOST_IP
--my_ip=$HOST_IP
--sql_connection=$SQL_CONN
--auth_driver=nova.auth.$AUTH
--libvirt_type=$LIBVIRT_TYPE
@@ -151,7 +150,6 @@ NOVA_CONF_EOF
mkdir -p $NOVA_DIR/instances
rm -rf $NOVA_DIR/networks
mkdir -p $NOVA_DIR/networks
$NOVA_DIR/tools/clean-vlans
if [ ! -d "$NOVA_DIR/images" ]; then
ln -s $DIR/images $NOVA_DIR/images
fi
@@ -169,10 +167,14 @@ NOVA_CONF_EOF
# create a project called 'admin' with project manager of 'admin'
$NOVA_DIR/bin/nova-manage project create admin admin
# export environment variables for project 'admin' and user 'admin'
$NOVA_DIR/bin/nova-manage project environment admin admin $NOVA_DIR/novarc
$NOVA_DIR/bin/nova-manage project zipfile admin admin $NOVA_DIR/nova.zip
unzip -o $NOVA_DIR/nova.zip -d $NOVA_DIR/
# create a small network
$NOVA_DIR/bin/nova-manage network create 10.0.0.0/8 1 32
# create some floating ips
$NOVA_DIR/bin/nova-manage floating create `hostname` 10.6.0.0/27
# nova api crashes if we start it with a regular screen command,
# so send the start command by forcing text into the window.
screen_it api "$NOVA_DIR/bin/nova-api"

View File

@@ -1 +0,0 @@
ENABLED=true

View File

@@ -1 +0,0 @@
ENABLED=true

View File

@@ -1,5 +0,0 @@
-----------------------------------------------
Welcome to your OpenStack installation!
-----------------------------------------------

View File

@@ -1,170 +0,0 @@
# Master configuration file for the QEMU driver.
# All settings described here are optional - if omitted, sensible
# defaults are used.
# VNC is configured to listen on 127.0.0.1 by default.
# To make it listen on all public interfaces, uncomment
# this next option.
#
# NB, strong recommendation to enable TLS + x509 certificate
# verification when allowing public access
#
# vnc_listen = "0.0.0.0"
# Enable use of TLS encryption on the VNC server. This requires
# a VNC client which supports the VeNCrypt protocol extension.
# Examples include vinagre, virt-viewer, virt-manager and vencrypt
# itself. UltraVNC, RealVNC, TightVNC do not support this
#
# It is necessary to setup CA and issue a server certificate
# before enabling this.
#
# vnc_tls = 1
# Use of TLS requires that x509 certificates be issued. The
# default it to keep them in /etc/pki/libvirt-vnc. This directory
# must contain
#
# ca-cert.pem - the CA master certificate
# server-cert.pem - the server certificate signed with ca-cert.pem
# server-key.pem - the server private key
#
# This option allows the certificate directory to be changed
#
# vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
# The default TLS configuration only uses certificates for the server
# allowing the client to verify the server's identity and establish
# and encrypted channel.
#
# It is possible to use x509 certificates for authentication too, by
# issuing a x509 certificate to every client who needs to connect.
#
# Enabling this option will reject any client who does not have a
# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
#
# vnc_tls_x509_verify = 1
# The default VNC password. Only 8 letters are significant for
# VNC passwords. This parameter is only used if the per-domain
# XML config does not already provide a password. To allow
# access without passwords, leave this commented out. An empty
# string will still enable passwords, but be rejected by QEMU
# effectively preventing any use of VNC. Obviously change this
# example here before you set this
#
# vnc_password = "XYZ12345"
# Enable use of SASL encryption on the VNC server. This requires
# a VNC client which supports the SASL protocol extension.
# Examples include vinagre, virt-viewer and virt-manager
# itself. UltraVNC, RealVNC, TightVNC do not support this
#
# It is necessary to configure /etc/sasl2/qemu.conf to choose
# the desired SASL plugin (eg, GSSPI for Kerberos)
#
# vnc_sasl = 1
# The default SASL configuration file is located in /etc/sasl2/
# When running libvirtd unprivileged, it may be desirable to
# override the configs in this location. Set this parameter to
# point to the directory, and create a qemu.conf in that location
#
# vnc_sasl_dir = "/some/directory/sasl2"
# The default security driver is SELinux. If SELinux is disabled
# on the host, then the security driver will automatically disable
# itself. If you wish to disable QEMU SELinux security driver while
# leaving SELinux enabled for the host in general, then set this
# to 'none' instead
#
# security_driver = "selinux"
# The user ID for QEMU processes run by the system instance
user = "root"
# The group ID for QEMU processes run by the system instance
group = "root"
# Whether libvirt should dynamically change file ownership
# to match the configured user/group above. Defaults to 1.
# Set to 0 to disable file ownership changes.
#dynamic_ownership = 1
# What cgroup controllers to make use of with QEMU guests
#
# - 'cpu' - use for schedular tunables
# - 'devices' - use for device whitelisting
#
# NB, even if configured here, they won't be used unless
# the adminsitrator has mounted cgroups. eg
#
# mkdir /dev/cgroup
# mount -t cgroup -o devices,cpu none /dev/cgroup
#
# They can be mounted anywhere, and different controlers
# can be mounted in different locations. libvirt will detect
# where they are located.
#
# cgroup_controllers = [ "cpu", "devices" ]
# This is the basic set of devices allowed / required by
# all virtual machines.
#
# As well as this, any configured block backed disks,
# all sound device, and all PTY devices are allowed.
#
# This will only need setting if newer QEMU suddenly
# wants some device we don't already know a bout.
#
#cgroup_device_acl = [
# "/dev/null", "/dev/full", "/dev/zero",
# "/dev/random", "/dev/urandom",
# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
# "/dev/rtc", "/dev/hpet", "/dev/net/tun",
#]
# The default format for Qemu/KVM guest save images is raw; that is, the
# memory from the domain is dumped out directly to a file. If you have
# guests with a large amount of memory, however, this can take up quite
# a bit of space. If you would like to compress the images while they
# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
# for save_image_format. Note that this means you slow down the process of
# saving a domain in order to save disk space; the list above is in descending
# order by performance and ascending order by compression ratio.
#
# save_image_format = "raw"
# If provided by the host and a hugetlbfs mount point is configured,
# a guest may request huge page backing. When this mount point is
# unspecified here, determination of a host mount point in /proc/mounts
# will be attempted. Specifying an explicit mount overrides detection
# of the same in /proc/mounts. Setting the mount point to "" will
# disable guest hugepage backing.
#
# NB, within this mount point, guests will create memory backing files
# in a location of $MOUNTPOINT/libvirt/qemu
# hugetlbfs_mount = "/dev/hugepages"
# mac_filter enables MAC addressed based filtering on bridge ports.
# This currently requires ebtables to be installed.
#
# mac_filter = 1
# By default, PCI devices below non-ACS switch are not allowed to be assigned
# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
# be assigned to guests.
#
# relaxed_acs_check = 1

View File

@@ -1,463 +0,0 @@
# This is an example configuration file for the LVM2 system.
# It contains the default settings that would be used if there was no
# /etc/lvm/lvm.conf file.
#
# Refer to 'man lvm.conf' for further information including the file layout.
#
# To put this file in a different directory and override /etc/lvm set
# the environment variable LVM_SYSTEM_DIR before running the tools.
# This section allows you to configure which block devices should
# be used by the LVM system.
devices {
# Where do you want your volume groups to appear ?
dir = "/dev"
# An array of directories that contain the device nodes you wish
# to use with LVM2.
scan = [ "/dev" ]
# If several entries in the scanned directories correspond to the
# same block device and the tools need to display a name for device,
# all the pathnames are matched against each item in the following
# list of regular expressions in turn and the first match is used.
preferred_names = [ ]
# Try to avoid using undescriptive /dev/dm-N names, if present.
# preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
# A filter that tells LVM2 to only use a restricted set of devices.
# The filter consists of an array of regular expressions. These
# expressions can be delimited by a character of your choice, and
# prefixed with either an 'a' (for accept) or 'r' (for reject).
# The first expression found to match a device name determines if
# the device will be accepted or rejected (ignored). Devices that
# don't match any patterns are accepted.
# Be careful if there there are symbolic links or multiple filesystem
# entries for the same device as each name is checked separately against
# the list of patterns. The effect is that if any name matches any 'a'
# pattern, the device is accepted; otherwise if any name matches any 'r'
# pattern it is rejected; otherwise it is accepted.
# Don't have more than one filter line active at once: only one gets used.
# Run vgscan after you change this parameter to ensure that
# the cache file gets regenerated (see below).
# If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
# By default we accept every block device:
filter = [ "r|/dev/etherd/.*|", "r|/dev/block/.*|", "a/.*/" ]
# Exclude the cdrom drive
# filter = [ "r|/dev/cdrom|" ]
# When testing I like to work with just loopback devices:
# filter = [ "a/loop/", "r/.*/" ]
# Or maybe all loops and ide drives except hdc:
# filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
# Use anchors if you want to be really specific
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
# The results of the filtering are cached on disk to avoid
# rescanning dud devices (which can take a very long time).
# By default this cache is stored in the /etc/lvm/cache directory
# in a file called '.cache'.
# It is safe to delete the contents: the tools regenerate it.
# (The old setting 'cache' is still respected if neither of
# these new ones is present.)
cache_dir = "/etc/lvm/cache"
cache_file_prefix = ""
# You can turn off writing this cache file by setting this to 0.
write_cache_state = 1
# Advanced settings.
# List of pairs of additional acceptable block device types found
# in /proc/devices with maximum (non-zero) number of partitions.
# types = [ "fd", 16 ]
# If sysfs is mounted (2.6 kernels) restrict device scanning to
# the block devices it believes are valid.
# 1 enables; 0 disables.
sysfs_scan = 1
# By default, LVM2 will ignore devices used as components of
# software RAID (md) devices by looking for md superblocks.
# 1 enables; 0 disables.
md_component_detection = 1
# By default, if a PV is placed directly upon an md device, LVM2
# will align its data blocks with the md device's stripe-width.
# 1 enables; 0 disables.
md_chunk_alignment = 1
# By default, the start of a PV's data area will be a multiple of
# the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
# - minimum_io_size - the smallest request the device can perform
# w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
# - optimal_io_size - the device's preferred unit of receiving I/O
# (e.g. MD's stripe width)
# minimum_io_size is used if optimal_io_size is undefined (0).
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
# This setting takes precedence over md_chunk_alignment.
# 1 enables; 0 disables.
data_alignment_detection = 1
# Alignment (in KB) of start of data area when creating a new PV.
# If a PV is placed directly upon an md device and md_chunk_alignment or
# data_alignment_detection is enabled this parameter is ignored.
# Set to 0 for the default alignment of 64KB or page size, if larger.
data_alignment = 0
# By default, the start of the PV's aligned data area will be shifted by
# the 'alignment_offset' exposed in sysfs. This offset is often 0 but
# may be non-zero; e.g.: certain 4KB sector drives that compensate for
# windows partitioning will have an alignment_offset of 3584 bytes
# (sector 7 is the lowest aligned logical block, the 4KB sectors start
# at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
# 1 enables; 0 disables.
data_alignment_offset_detection = 1
# If, while scanning the system for PVs, LVM2 encounters a device-mapper
# device that has its I/O suspended, it waits for it to become accessible.
# Set this to 1 to skip such devices. This should only be needed
# in recovery situations.
ignore_suspended_devices = 0
}
# This section that allows you to configure the nature of the
# information that LVM2 reports.
log {
# Controls the messages sent to stdout or stderr.
# There are three levels of verbosity, 3 being the most verbose.
verbose = 0
# Should we send log messages through syslog?
# 1 is yes; 0 is no.
syslog = 1
# Should we log error and debug messages to a file?
# By default there is no log file.
#file = "/var/log/lvm2.log"
# Should we overwrite the log file each time the program is run?
# By default we append.
overwrite = 0
# What level of log messages should we send to the log file and/or syslog?
# There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
# 7 is the most verbose (LOG_DEBUG).
level = 0
# Format of output messages
# Whether or not (1 or 0) to indent messages according to their severity
indent = 1
# Whether or not (1 or 0) to display the command name on each line output
command_names = 0
# A prefix to use before the message text (but after the command name,
# if selected). Default is two spaces, so you can see/grep the severity
# of each message.
prefix = " "
# To make the messages look similar to the original LVM tools use:
# indent = 0
# command_names = 1
# prefix = " -- "
# Set this if you want log messages during activation.
# Don't use this in low memory situations (can deadlock).
# activation = 0
}
# Configuration of metadata backups and archiving. In LVM2 when we
# talk about a 'backup' we mean making a copy of the metadata for the
# *current* system. The 'archive' contains old metadata configurations.
# Backups are stored in a human readeable text format.
backup {
# Should we maintain a backup of the current metadata configuration ?
# Use 1 for Yes; 0 for No.
# Think very hard before turning this off!
backup = 1
# Where shall we keep it ?
# Remember to back up this directory regularly!
backup_dir = "/etc/lvm/backup"
# Should we maintain an archive of old metadata configurations.
# Use 1 for Yes; 0 for No.
# On by default. Think very hard before turning this off.
archive = 1
# Where should archived files go ?
# Remember to back up this directory regularly!
archive_dir = "/etc/lvm/archive"
# What is the minimum number of archive files you wish to keep ?
retain_min = 10
# What is the minimum time you wish to keep an archive file for ?
retain_days = 30
}
# Settings for the running LVM2 in shell (readline) mode.
shell {
# Number of lines of history to store in ~/.lvm_history
history_size = 100
}
# Miscellaneous global LVM2 settings
global {
# The file creation mask for any files and directories created.
# Interpreted as octal if the first digit is zero.
umask = 077
# Allow other users to read the files
#umask = 022
# Enabling test mode means that no changes to the on disk metadata
# will be made. Equivalent to having the -t option on every
# command. Defaults to off.
test = 0
# Default value for --units argument
units = "h"
# Since version 2.02.54, the tools distinguish between powers of
# 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
# KB, MB, GB).
# If you have scripts that depend on the old behaviour, set this to 0
# temporarily until you update them.
si_unit_consistency = 1
# Whether or not to communicate with the kernel device-mapper.
# Set to 0 if you want to use the tools to manipulate LVM metadata
# without activating any logical volumes.
# If the device-mapper kernel driver is not present in your kernel
# setting this to 0 should suppress the error messages.
activation = 1
# If we can't communicate with device-mapper, should we try running
# the LVM1 tools?
# This option only applies to 2.4 kernels and is provided to help you
# switch between device-mapper kernels and LVM1 kernels.
# The LVM1 tools need to be installed with .lvm1 suffices
# e.g. vgscan.lvm1 and they will stop working after you start using
# the new lvm2 on-disk metadata format.
# The default value is set when the tools are built.
# fallback_to_lvm1 = 0
# The default metadata format that commands should use - "lvm1" or "lvm2".
# The command line override is -M1 or -M2.
# Defaults to "lvm2".
# format = "lvm2"
# Location of proc filesystem
proc = "/proc"
# Type of locking to use. Defaults to local file-based locking (1).
# Turn locking off by setting to 0 (dangerous: risks metadata corruption
# if LVM2 commands get run concurrently).
# Type 2 uses the external shared library locking_library.
# Type 3 uses built-in clustered locking.
# Type 4 uses read-only locking which forbids any operations that might
# change metadata.
locking_type = 1
# Set to 0 to fail when a lock request cannot be satisfied immediately.
wait_for_locks = 1
# If using external locking (type 2) and initialisation fails,
# with this set to 1 an attempt will be made to use the built-in
# clustered locking.
# If you are using a customised locking_library you should set this to 0.
fallback_to_clustered_locking = 1
# If an attempt to initialise type 2 or type 3 locking failed, perhaps
# because cluster components such as clvmd are not running, with this set
# to 1 an attempt will be made to use local file-based locking (type 1).
# If this succeeds, only commands against local volume groups will proceed.
# Volume Groups marked as clustered will be ignored.
fallback_to_local_locking = 1
# Local non-LV directory that holds file-based locks while commands are
# in progress. A directory like /tmp that may get wiped on reboot is OK.
locking_dir = "/var/lock/lvm"
# Whenever there are competing read-only and read-write access requests for
# a volume group's metadata, instead of always granting the read-only
# requests immediately, delay them to allow the read-write requests to be
# serviced. Without this setting, write access may be stalled by a high
# volume of read-only requests.
# NB. This option only affects locking_type = 1 viz. local file-based
# locking.
prioritise_write_locks = 1
# Other entries can go here to allow you to load shared libraries
# e.g. if support for LVM1 metadata was compiled as a shared library use
# format_libraries = "liblvm2format1.so"
# Full pathnames can be given.
# Search this directory first for shared libraries.
# library_dir = "/lib/lvm2"
# The external locking library to load if locking_type is set to 2.
# locking_library = "liblvm2clusterlock.so"
}
activation {
# Set to 0 to disable udev syncronisation (if compiled into the binaries).
# Processes will not wait for notification from udev.
# They will continue irrespective of any possible udev processing
# in the background. You should only use this if udev is not running
# or has rules that ignore the devices LVM2 creates.
# The command line argument --nodevsync takes precedence over this setting.
# If set to 1 when udev is not running, and there are LVM2 processes
# waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
udev_sync = 1
# How to fill in missing stripes if activating an incomplete volume.
# Using "error" will make inaccessible parts of the device return
# I/O errors on access. You can instead use a device path, in which
# case, that device will be used to in place of missing stripes.
# But note that using anything other than "error" with mirrored
# or snapshotted volumes is likely to result in data corruption.
missing_stripe_filler = "error"
# How much stack (in KB) to reserve for use while devices suspended
reserved_stack = 256
# How much memory (in KB) to reserve for use while devices suspended
reserved_memory = 8192
# Nice value used while devices suspended
process_priority = -18
# If volume_list is defined, each LV is only activated if there is a
# match against the list.
# "vgname" and "vgname/lvname" are matched exactly.
# "@tag" matches any tag set in the LV or VG.
# "@*" matches if any tag defined on the host is also set in the LV or VG
#
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
# Size (in KB) of each copy operation when mirroring
mirror_region_size = 512
# Setting to use when there is no readahead value stored in the metadata.
#
# "none" - Disable readahead.
# "auto" - Use default value chosen by kernel.
readahead = "auto"
# 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
# how a device failure affecting a mirror is handled.
# A mirror is composed of mirror images (copies) and a log.
# A disk log ensures that a mirror does not need to be re-synced
# (all copies made the same) every time a machine reboots or crashes.
#
# In the event of a failure, the specified policy will be used to determine
# what happens. This applies to automatic repairs (when the mirror is being
# monitored by dmeventd) and to manual lvconvert --repair when
# --use-policies is given.
#
# "remove" - Simply remove the faulty device and run without it. If
# the log device fails, the mirror would convert to using
# an in-memory log. This means the mirror will not
# remember its sync status across crashes/reboots and
# the entire mirror will be re-synced. If a
# mirror image fails, the mirror will convert to a
# non-mirrored device if there is only one remaining good
# copy.
#
# "allocate" - Remove the faulty device and try to allocate space on
# a new device to be a replacement for the failed device.
# Using this policy for the log is fast and maintains the
# ability to remember sync state through crashes/reboots.
# Using this policy for a mirror device is slow, as it
# requires the mirror to resynchronize the devices, but it
# will preserve the mirror characteristic of the device.
# This policy acts like "remove" if no suitable device and
# space can be allocated for the replacement.
#
# "allocate_anywhere" - Not yet implemented. Useful to place the log device
# temporarily on same physical volume as one of the mirror
# images. This policy is not recommended for mirror devices
# since it would break the redundant nature of the mirror. This
# policy acts like "remove" if no suitable device and space can
# be allocated for the replacement.
mirror_log_fault_policy = "allocate"
mirror_device_fault_policy = "remove"
}
####################
# Advanced section #
####################
# Metadata settings
#
# metadata {
# Default number of copies of metadata to hold on each PV. 0, 1 or 2.
# You might want to override it from the command line with 0
# when running pvcreate on new PVs which are to be added to large VGs.
# pvmetadatacopies = 1
# Approximate default size of on-disk metadata areas in sectors.
# You should increase this if you have large volume groups or
# you want to retain a large on-disk history of your metadata changes.
# pvmetadatasize = 255
# List of directories holding live copies of text format metadata.
# These directories must not be on logical volumes!
# It's possible to use LVM2 with a couple of directories here,
# preferably on different (non-LV) filesystems, and with no other
# on-disk metadata (pvmetadatacopies = 0). Or this can be in
# addition to on-disk metadata areas.
# The feature was originally added to simplify testing and is not
# supported under low memory situations - the machine could lock up.
#
# Never edit any files in these directories by hand unless you
# you are absolutely sure you know what you are doing! Use
# the supplied toolset to make changes (e.g. vgcfgrestore).
# dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
#}
# Event daemon
#
dmeventd {
# mirror_library is the library used when monitoring a mirror device.
#
# "libdevmapper-event-lvm2mirror.so" attempts to recover from
# failures. It removes failed devices from a volume group and
# reconfigures a mirror as necessary. If no mirror library is
# provided, mirrors are not monitored through dmeventd.
mirror_library = "libdevmapper-event-lvm2mirror.so"
# snapshot_library is the library used when monitoring a snapshot device.
#
# "libdevmapper-event-lvm2snapshot.so" monitors the filling of
# snapshots and emits a warning through syslog, when the use of
# snapshot exceedes 80%. The warning is repeated when 85%, 90% and
# 95% of the snapshot are filled.
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
}

View File

@@ -1,28 +0,0 @@
--ec2_url=http://192.168.255.1:8773/services/Cloud
--rabbit_host=192.168.255.1
--redis_host=192.168.255.1
--s3_host=192.168.255.1
--vpn_ip=192.168.255.1
--datastore_path=/var/lib/nova/keeper
--networks_path=/var/lib/nova/networks
--instances_path=/var/lib/nova/instances
--buckets_path=/var/lib/nova/objectstore/buckets
--images_path=/var/lib/nova/objectstore/images
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--vlan_start=2000
--vlan_end=3000
--private_range=192.168.0.0/16
--public_range=10.0.0.0/24
--volume_group=vgdata
--storage_dev=/dev/sdc
--bridge_dev=eth2
--aoe_eth_dev=eth2
--public_interface=vlan0
--default_kernel=aki-DEFAULT
--default_ramdisk=ari-DEFAULT
--vpn_image_id=ami-cloudpipe
--daemonize
--verbose
--syslog
--prefix=nova

View File

@@ -1,3 +0,0 @@
[Boto]
debug = 0
num_retries = 1

View File

@@ -1,35 +0,0 @@
<domain type='%(type)s'>
<name>%(name)s</name>
<os>
<type>hvm</type>
<kernel>%(basepath)s/kernel</kernel>
<initrd>%(basepath)s/ramdisk</initrd>
<cmdline>root=/dev/vda1 console=ttyS0</cmdline>
</os>
<features>
<acpi/>
</features>
<memory>%(memory_kb)s</memory>
<vcpu>%(vcpus)s</vcpu>
<devices>
<disk type='file'>
<source file='%(basepath)s/disk'/>
<target dev='vda' bus='virtio'/>
</disk>
<interface type='bridge'>
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
<!--
<filterref filter="nova-instance-%(name)s">
<parameter name="IP" value="%(ip_address)s" />
<parameter name="DHCPSERVER" value="%(dhcp_server)s" />
</filterref>
-->
</interface>
<serial type="file">
<source path='%(basepath)s/console.log'/>
<target port='1'/>
</serial>
</devices>
</domain>

View File

@@ -1,137 +0,0 @@
#
# The MySQL database server configuration file.
#
# You can copy this to one of:
# - "/etc/mysql/my.cnf" to set global options,
# - "~/.my.cnf" to set user-specific options.
#
# One can use all long options that the program supports.
# Run program with --help to get a list of available options and with
# --print-defaults to see which it would actually understand and use.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
# This will be passed to all mysql clients
# It has been reported that passwords should be enclosed with ticks/quotes
# escpecially if they contain "#" chars...
# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
# Here is entries for some specific programs
# The following values assume you have at least 32M ram
# This was formally known as [safe_mysqld]. Both versions are currently parsed.
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
#
# * Basic Settings
#
#
# * IMPORTANT
# If you make changes to these settings and your system uses apparmor, you may
# also need to also adjust /etc/apparmor.d/usr.sbin.mysqld.
#
user = mysql
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
skip-external-locking
#
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
# bind-address = 127.0.0.1
#
# * Fine Tuning
#
innodb_buffer_pool_size = 12G
#innodb_log_file_size = 256M
innodb_log_buffer_size=4M
innodb_flush_log_at_trx_commit=2
innodb_thread_concurrency=8
innodb_flush_method=O_DIRECT
key_buffer = 128M
max_allowed_packet = 256M
thread_stack = 8196K
thread_cache_size = 32
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover = BACKUP
max_connections = 1000
table_cache = 1024
#thread_concurrency = 10
#
# * Query Cache Configuration
#
query_cache_limit = 32M
query_cache_size = 256M
#
# * Logging and Replication
#
# Both location gets rotated by the cronjob.
# Be aware that this log type is a performance killer.
# As of 5.1 you can enable the log at runtime!
#general_log_file = /var/log/mysql/mysql.log
#general_log = 1
log_error = /var/log/mysql/error.log
# Here you can see queries with especially long duration
log_slow_queries = /var/log/mysql/mysql-slow.log
long_query_time = 2
#log-queries-not-using-indexes
#
# The following can be used as easy to replay backup logs or for replication.
# note: if you are setting up a replication slave, see README.Debian about
# other settings you may need to change.
server-id = 1
log_bin = /var/log/mysql/mysql-bin.log
expire_logs_days = 10
max_binlog_size = 50M
#binlog_do_db = include_database_name
#binlog_ignore_db = include_database_name
#
# * InnoDB
#
sync_binlog=1
# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
# Read the manual for more InnoDB related options. There are many!
#
# * Security Features
#
# Read the manual, too, if you want chroot!
# chroot = /var/lib/mysql/
#
# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
#
# ssl-ca=/etc/mysql/cacert.pem
# ssl-cert=/etc/mysql/server-cert.pem
# ssl-key=/etc/mysql/server-key.pem
[mysqldump]
quick
quote-names
max_allowed_packet = 256M
[mysql]
#no-auto-rehash # faster start of mysql but no tab completition
[isamchk]
key_buffer = 128M
#
# * IMPORTANT: Additional settings that can override those from this file!
# The files must end with '.cnf', otherwise they'll be ignored.
#
!includedir /etc/mysql/conf.d/

View File

@@ -1,187 +0,0 @@
#! /bin/sh
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): This script sets up some reasonable defaults for iptables and
# creates nova-specific chains. If you use this script you should
# run nova-network and nova-compute with --use_nova_chains=True
# NOTE(vish): If you run public nova-api on a different port, make sure to
# change the port here
if [ -f /etc/default/nova-iptables ] ; then
. /etc/default/nova-iptables
fi
export LC_ALL=C
API_PORT=${API_PORT:-"8773"}
if [ ! -n "$IP" ]; then
# NOTE(vish): IP address is what address the services ALLOW on.
# This will just get the first ip in the list, so if you
# have more than one eth device set up, this will fail, and
# you should explicitly pass in the ip of the instance
IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
fi
if [ ! -n "$PRIVATE_RANGE" ]; then
#NOTE(vish): PRIVATE_RANGE: range is ALLOW to access DHCP
PRIVATE_RANGE="192.168.0.0/12"
fi
if [ ! -n "$MGMT_IP" ]; then
# NOTE(vish): Management IP is the ip over which to allow ssh traffic. It
# will also allow traffic to nova-api
MGMT_IP="$IP"
fi
if [ ! -n "$DMZ_IP" ]; then
# NOTE(vish): DMZ IP is the ip over which to allow api & objectstore access
DMZ_IP="$IP"
fi
clear_nova_iptables() {
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
iptables -F
iptables -t nat -F
iptables -F services
iptables -X services
# HACK: re-adding fail2ban rules :(
iptables -N fail2ban-ssh
iptables -A INPUT -p tcp -m multiport --dports 22 -j fail2ban-ssh
iptables -A fail2ban-ssh -j RETURN
}
load_nova_iptables() {
iptables -P INPUT DROP
iptables -A INPUT -m state --state INVALID -j DROP
iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
# NOTE(ja): allow localhost for everything
iptables -A INPUT -d 127.0.0.1/32 -j ACCEPT
# NOTE(ja): 22 only allowed MGMT_IP before, but we widened it to any
# address, since ssh should be listening only on internal
# before we re-add this rule we will need to add
# flexibility for RSYNC between omega/stingray
iptables -A INPUT -m tcp -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -m udp -p udp --dport 123 -j ACCEPT
iptables -A INPUT -p icmp -j ACCEPT
iptables -N services
iptables -A INPUT -j services
iptables -A INPUT -p tcp -j REJECT --reject-with tcp-reset
iptables -A INPUT -j REJECT --reject-with icmp-port-unreachable
iptables -P FORWARD DROP
iptables -A FORWARD -m state --state INVALID -j DROP
iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu
# NOTE(vish): DROP on output is too restrictive for now. We need to add
# in a bunch of more specific output rules to use it.
# iptables -P OUTPUT DROP
iptables -A OUTPUT -m state --state INVALID -j DROP
iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
if [ -n "$GANGLIA" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 8649 -j ACCEPT
iptables -A services -m udp -p udp -d $IP --dport 8649 -j ACCEPT
fi
# if [ -n "$WEB" ] || [ -n "$ALL" ]; then
# # NOTE(vish): This opens up ports for web access, allowing web-based
# # dashboards to work.
# iptables -A services -m tcp -p tcp -d $IP --dport 80 -j ACCEPT
# iptables -A services -m tcp -p tcp -d $IP --dport 443 -j ACCEPT
# fi
if [ -n "$OBJECTSTORE" ] || [ -n "$ALL" ]; then
# infrastructure
iptables -A services -m tcp -p tcp -d $IP --dport 3333 -j ACCEPT
# clients
iptables -A services -m tcp -p tcp -d $DMZ_IP --dport 3333 -j ACCEPT
fi
if [ -n "$API" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport $API_PORT -j ACCEPT
if [ "$IP" != "$DMZ_IP" ]; then
iptables -A services -m tcp -p tcp -d $DMZ_IP --dport $API_PORT -j ACCEPT
fi
if [ "$IP" != "$MGMT_IP" ] && [ "$DMZ_IP" != "$MGMT_IP" ]; then
iptables -A services -m tcp -p tcp -d $MGMT_IP --dport $API_PORT -j ACCEPT
fi
fi
if [ -n "$REDIS" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 6379 -j ACCEPT
fi
if [ -n "$MYSQL" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 3306 -j ACCEPT
fi
if [ -n "$RABBITMQ" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 4369 -j ACCEPT
iptables -A services -m tcp -p tcp -d $IP --dport 5672 -j ACCEPT
iptables -A services -m tcp -p tcp -d $IP --dport 53284 -j ACCEPT
fi
if [ -n "$DNSMASQ" ] || [ -n "$ALL" ]; then
# NOTE(vish): this could theoretically be setup per network
# for each host, but it seems like overkill
iptables -A services -m tcp -p tcp -s $PRIVATE_RANGE --dport 53 -j ACCEPT
iptables -A services -m udp -p udp -s $PRIVATE_RANGE --dport 53 -j ACCEPT
iptables -A services -m udp -p udp --dport 67 -j ACCEPT
fi
if [ -n "$LDAP" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 389 -j ACCEPT
fi
if [ -n "$ISCSI" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 3260 -j ACCEPT
iptables -A services -m tcp -p tcp -d 127.0.0.0/16 --dport 3260 -j ACCEPT
fi
}
case "$1" in
start)
echo "Starting nova-iptables: "
load_nova_iptables
;;
stop)
echo "Clearing nova-iptables: "
clear_nova_iptables
;;
restart)
echo "Restarting nova-iptables: "
clear_nova_iptables
load_nova_iptables
;;
*)
echo "Usage: $NAME {start|stop|restart}" >&2
exit 1
;;
esac
exit 0

View File

@@ -1,19 +0,0 @@
#!/bin/sh
# FILE: /etc/udev/scripts/iscsidev.sh
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname"
target_name=$(cat ${file})
# This is not an open-scsi drive
if [ -z "${target_name}" ]; then
exit 1
fi
echo "${target_name##*:}"

View File

@@ -1,6 +0,0 @@
#!/bin/bash
/root/slap.sh
mysql -e "DROP DATABASE nova"
mysql -e "CREATE DATABASE nova"
mysql -e "GRANT ALL on nova.* to nova@'%' identified by 'TODO:CHANGEME:CMON'"
touch /root/installed

View File

@@ -1,261 +0,0 @@
#!/usr/bin/env bash
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS
apt-get install -y slapd ldap-utils python-ldap
cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF
#
# LDAP Public Key Patch schema for use with openssh-ldappubkey
# Author: Eric AUGE <eau@phear.org>
#
# Based on the proposal of : Mark Ruijter
#
# octetString SYNTAX
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
DESC 'MANDATORY: OpenSSH Public key'
EQUALITY octetStringMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
# printableString SYNTAX yes|no
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
DESC 'MANDATORY: OpenSSH LPK objectclass'
MAY ( sshPublicKey $ uid )
)
LPK_SCHEMA_EOF
cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
#
# Person object for Nova
# inetorgperson with extra attributes
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
#
#
# using internet experimental oid arc as per BP64 3.1
objectidentifier novaSchema 1.3.6.1.3.1.666.666
objectidentifier novaAttrs novaSchema:3
objectidentifier novaOCs novaSchema:4
attributetype (
novaAttrs:1
NAME 'accessKey'
DESC 'Key for accessing data'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:2
NAME 'secretKey'
DESC 'Secret key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:3
NAME 'keyFingerprint'
DESC 'Fingerprint of private key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:4
NAME 'isAdmin'
DESC 'Is user an administrator?'
EQUALITY booleanMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
SINGLE-VALUE
)
attributetype (
novaAttrs:5
NAME 'projectManager'
DESC 'Project Managers of a project'
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
)
objectClass (
novaOCs:1
NAME 'novaUser'
DESC 'access and secret keys'
AUXILIARY
MUST ( uid )
MAY ( accessKey $ secretKey $ isAdmin )
)
objectClass (
novaOCs:2
NAME 'novaKeyPair'
DESC 'Key pair for User'
SUP top
STRUCTURAL
MUST ( cn $ sshPublicKey $ keyFingerprint )
)
objectClass (
novaOCs:3
NAME 'novaProject'
DESC 'Container for project'
SUP groupOfNames
STRUCTURAL
MUST ( cn $ projectManager )
)
NOVA_SCHEMA_EOF
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
# slapd.conf - Configuration file for LDAP SLAPD
##########
# Basics #
##########
include /etc/ldap/schema/core.schema
include /etc/ldap/schema/cosine.schema
include /etc/ldap/schema/inetorgperson.schema
include /etc/ldap/schema/openssh-lpk_openldap.schema
include /etc/ldap/schema/nova.schema
pidfile /var/run/slapd/slapd.pid
argsfile /var/run/slapd/slapd.args
loglevel none
modulepath /usr/lib/ldap
# modulepath /usr/local/libexec/openldap
moduleload back_hdb
##########################
# Database Configuration #
##########################
database hdb
suffix "dc=example,dc=com"
rootdn "cn=Manager,dc=example,dc=com"
rootpw changeme
directory /var/lib/ldap
# directory /usr/local/var/openldap-data
index objectClass,cn eq
########
# ACLs #
########
access to attrs=userPassword
by anonymous auth
by self write
by * none
access to *
by self write
by * none
SLAPD_CONF_EOF
mv /etc/ldap/ldap.conf /etc/ldap/ldap.conf.orig
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
# LDAP Client Settings
URI ldap://localhost
BASE dc=example,dc=com
BINDDN cn=Manager,dc=example,dc=com
SIZELIMIT 0
TIMELIMIT 0
LDAP_CONF_EOF
cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
# This is the root of the directory tree
dn: dc=example,dc=com
description: Example.Com, your trusted non-existent corporation.
dc: example
o: Example.Com
objectClass: top
objectClass: dcObject
objectClass: organization
# Subtree for users
dn: ou=Users,dc=example,dc=com
ou: Users
description: Users
objectClass: organizationalUnit
# Subtree for groups
dn: ou=Groups,dc=example,dc=com
ou: Groups
description: Groups
objectClass: organizationalUnit
# Subtree for system accounts
dn: ou=System,dc=example,dc=com
ou: System
description: Special accounts used by software applications.
objectClass: organizationalUnit
# Special Account for Authentication:
dn: uid=authenticate,ou=System,dc=example,dc=com
uid: authenticate
ou: System
description: Special account for authenticating users
userPassword: {MD5}TODO-000000000000000000000000000==
objectClass: account
objectClass: simpleSecurityObject
# create the sysadmin entry
dn: cn=developers,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: developers
description: IT admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=sysadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: sysadmins
description: IT admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=netadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: netadmins
description: Network admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: cloudadmins
description: Cloud admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=itsec,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: itsec
description: IT security users group
member: uid=admin,ou=Users,dc=example,dc=com
BASE_LDIF_EOF
/etc/init.d/slapd stop
rm -rf /var/lib/ldap/*
rm -rf /etc/ldap/slapd.d/*
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d
cp /usr/share/slapd/DB_CONFIG /var/lib/ldap/DB_CONFIG
slapadd -v -l /etc/ldap/base.ldif
chown -R openldap:openldap /etc/ldap/slapd.d
chown -R openldap:openldap /var/lib/ldap
/etc/init.d/slapd start

View File

@@ -1,8 +0,0 @@
# fileserver.conf
[files]
path /srv/cloud/puppet/files
allow 10.0.0.0/24
[plugins]

View File

@@ -1 +0,0 @@
exec { "update-apt": command => "/usr/bin/apt-get update" }

View File

@@ -1,14 +0,0 @@
class issue {
file { "/etc/issue":
owner => "root",
group => "root",
mode => 444,
source => "puppet://${puppet_server}/files/etc/issue",
}
file { "/etc/issue.net":
owner => "root",
group => "root",
mode => 444,
source => "puppet://${puppet_server}/files/etc/issue",
}
}

View File

@@ -1,34 +0,0 @@
# via http://projects.puppetlabs.com/projects/puppet/wiki/Kernel_Modules_Patterns
define kern_module ($ensure) {
$modulesfile = $operatingsystem ? { ubuntu => "/etc/modules", redhat => "/etc/rc.modules" }
case $operatingsystem {
redhat: { file { "/etc/rc.modules": ensure => file, mode => 755 } }
}
case $ensure {
present: {
exec { "insert_module_${name}":
command => $operatingsystem ? {
ubuntu => "/bin/echo '${name}' >> '${modulesfile}'",
redhat => "/bin/echo '/sbin/modprobe ${name}' >> '${modulesfile}' "
},
unless => "/bin/grep -qFx '${name}' '${modulesfile}'"
}
exec { "/sbin/modprobe ${name}": unless => "/bin/grep -q '^${name} ' '/proc/modules'" }
}
absent: {
exec { "/sbin/modprobe -r ${name}": onlyif => "/bin/grep -q '^${name} ' '/proc/modules'" }
exec { "remove_module_${name}":
command => $operatingsystem ? {
ubuntu => "/usr/bin/perl -ni -e 'print unless /^\\Q${name}\\E\$/' '${modulesfile}'",
redhat => "/usr/bin/perl -ni -e 'print unless /^\\Q/sbin/modprobe ${name}\\E\$/' '${modulesfile}'"
},
onlyif => $operatingsystem ? {
ubuntu => "/bin/grep -qFx '${name}' '${modulesfile}'",
redhat => "/bin/grep -q '^/sbin/modprobe ${name}' '${modulesfile}'"
}
}
}
default: { err ( "unknown ensure value ${ensure}" ) }
}
}

View File

@@ -1,6 +0,0 @@
define loopback($num) {
exec { "mknod -m 0660 /dev/loop${num} b 7 ${num}; chown root:disk /dev/loop${num}":
creates => "/dev/loop${num}",
path => ["/usr/bin", "/usr/sbin", "/bin"]
}
}

View File

@@ -1,8 +0,0 @@
class lvm {
file { "/etc/lvm/lvm.conf":
owner => "root",
group => "root",
mode => 444,
source => "puppet://${puppet_server}/files/etc/lvm.conf",
}
}

View File

@@ -1,8 +0,0 @@
class lvmconf {
file { "/etc/lvm/lvm.conf":
owner => "root", group => "root", mode => 644,
source => "puppet://${puppet_server}/files/etc/lvm/lvm.conf",
ensure => present
}
}

View File

@@ -1,464 +0,0 @@
import "kern_module"
import "apt"
import "loopback"
#$head_node_ip = "undef"
#$rabbit_ip = "undef"
#$vpn_ip = "undef"
#$public_interface = "undef"
#$vlan_start = "5000"
#$vlan_end = "6000"
#$private_range = "10.0.0.0/16"
#$public_range = "192.168.177.0/24"
define nova_iptables($services, $ip="", $private_range="", $mgmt_ip="", $dmz_ip="") {
file { "/etc/init.d/nova-iptables":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/nova-iptables",
}
file { "/etc/default/nova-iptables":
owner => "root", mode => 644,
content => template("nova-iptables.erb")
}
}
define nova_conf_pointer($name) {
file { "/etc/nova/nova-${name}.conf":
owner => "nova", mode => 400,
content => "--flagfile=/etc/nova/nova.conf"
}
}
class novaconf {
file { "/etc/nova/nova.conf":
owner => "nova", mode => 400,
content => template("production/nova-common.conf.erb", "production/nova-${cluster_name}.conf.erb")
}
nova_conf_pointer{'manage': name => 'manage'}
}
class novadata {
package { "rabbitmq-server": ensure => present }
file { "/etc/rabbitmq/rabbitmq.conf":
owner => "root", mode => 644,
content => "NODENAME=rabbit@localhost",
}
service { "rabbitmq-server":
ensure => running,
enable => true,
hasstatus => true,
require => [
File["/etc/rabbitmq/rabbitmq.conf"],
Package["rabbitmq-server"]
]
}
package { "mysql-server": ensure => present }
file { "/etc/mysql/my.cnf":
owner => "root", mode => 644,
source => "puppet://${puppet_server}/files/production/my.cnf",
}
service { "mysql":
ensure => running,
enable => true,
hasstatus => true,
require => [
File["/etc/mysql/my.cnf"],
Package["mysql-server"]
]
}
file { "/root/slap.sh":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/slap.sh",
}
file { "/root/setup_data.sh":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/setup_data.sh",
}
# setup compute data
exec { "setup_data":
command => "/root/setup_data.sh",
path => "/usr/bin:/bin",
unless => "test -f /root/installed",
require => [
Service["mysql"],
File["/root/slap.sh"],
File["/root/setup_data.sh"]
]
}
}
define nscheduler($version) {
package { "nova-scheduler": ensure => $version, require => Exec["update-apt"] }
nova_conf_pointer{'scheduler': name => 'scheduler'}
exec { "update-rc.d -f nova-scheduler remove; update-rc.d nova-scheduler defaults 50":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/init.d/nova-scheduler",
unless => "test -f /etc/rc2.d/S50nova-scheduler"
}
service { "nova-scheduler":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-scheduler"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-scheduler.conf"]
]
}
}
define napi($version, $api_servers, $api_base_port) {
file { "/etc/boto.cfg":
owner => "root", mode => 644,
source => "puppet://${puppet_server}/files/production/boto.cfg",
}
file { "/var/lib/nova/CA/genvpn.sh":
owner => "nova", mode => 755,
source => "puppet://${puppet_server}/files/production/genvpn.sh",
}
package { "python-greenlet": ensure => present }
package { "nova-api": ensure => $version, require => [Exec["update-apt"], Package["python-greenlet"]] }
nova_conf_pointer{'api': name => 'api'}
exec { "update-rc.d -f nova-api remove; update-rc.d nova-api defaults 50":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/init.d/nova-api",
unless => "test -f /etc/rc2.d/S50nova-api"
}
service { "nova-netsync":
start => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock start",
stop => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock stop",
ensure => running,
hasstatus => false,
pattern => "nova-netsync",
require => Service["nova-api"],
subscribe => File["/etc/nova/nova.conf"]
}
service { "nova-api":
start => "monit start all -g nova_api",
stop => "monit stop all -g nova_api",
restart => "monit restart all -g nova_api",
# ensure => running,
# hasstatus => true,
require => Service["monit"],
subscribe => [
Package["nova-objectstore"],
File["/etc/boto.cfg"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-objectstore.conf"]
]
}
# the haproxy & monit's template use $api_servers and $api_base_port
package { "haproxy": ensure => present }
file { "/etc/default/haproxy":
owner => "root", mode => 644,
content => "ENABLED=1",
require => Package['haproxy']
}
file { "/etc/haproxy/haproxy.cfg":
owner => "root", mode => 644,
content => template("/srv/cloud/puppet/templates/haproxy.cfg.erb"),
require => Package['haproxy']
}
service { "haproxy":
ensure => true,
enable => true,
hasstatus => true,
subscribe => [
Package["haproxy"],
File["/etc/default/haproxy"],
File["/etc/haproxy/haproxy.cfg"],
]
}
package { "socat": ensure => present }
file { "/usr/local/bin/gmetric_haproxy.sh":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/ganglia/gmetric_scripts/gmetric_haproxy.sh",
}
cron { "gmetric_haproxy":
command => "/usr/local/bin/gmetric_haproxy.sh",
user => root,
minute => "*/3",
}
package { "monit": ensure => present }
file { "/etc/default/monit":
owner => "root", mode => 644,
content => "startup=1",
require => Package['monit']
}
file { "/etc/monit/monitrc":
owner => "root", mode => 600,
content => template("/srv/cloud/puppet/templates/monitrc-nova-api.erb"),
require => Package['monit']
}
service { "monit":
ensure => true,
pattern => "sbin/monit",
subscribe => [
Package["monit"],
File["/etc/default/monit"],
File["/etc/monit/monitrc"],
]
}
}
define nnetwork($version) {
# kill the default network added by the package
exec { "kill-libvirt-default-net":
command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml",
path => "/usr/bin:/bin",
onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml"
}
# EVIL HACK: custom binary because dnsmasq 2.52 segfaulted accessing dereferenced object
file { "/usr/sbin/dnsmasq":
owner => "root", group => "root",
source => "puppet://${puppet_server}/files/production/dnsmasq",
}
package { "nova-network": ensure => $version, require => Exec["update-apt"] }
nova_conf_pointer{'dhcpbridge': name => 'dhcpbridge'}
nova_conf_pointer{'network': name => "network" }
exec { "update-rc.d -f nova-network remove; update-rc.d nova-network defaults 50":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/init.d/nova-network",
unless => "test -f /etc/rc2.d/S50nova-network"
}
service { "nova-network":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-network"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-network.conf"]
]
}
}
define nobjectstore($version) {
package { "nova-objectstore": ensure => $version, require => Exec["update-apt"] }
nova_conf_pointer{'objectstore': name => 'objectstore'}
exec { "update-rc.d -f nova-objectstore remove; update-rc.d nova-objectstore defaults 50":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/init.d/nova-objectstore",
unless => "test -f /etc/rc2.d/S50nova-objectstore"
}
service { "nova-objectstore":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-objectstore"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-objectstore.conf"]
]
}
}
define ncompute($version) {
include ganglia-python
include ganglia-compute
# kill the default network added by the package
exec { "kill-libvirt-default-net":
command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml",
path => "/usr/bin:/bin",
onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml"
}
# LIBVIRT has to be restarted when ebtables / gawk is installed
service { "libvirt-bin":
ensure => running,
pattern => "sbin/libvirtd",
subscribe => [
Package["ebtables"],
Kern_module["kvm_intel"]
],
require => [
Package["libvirt-bin"],
Package["ebtables"],
Package["gawk"],
Kern_module["kvm_intel"],
File["/dev/kvm"]
]
}
package { "libvirt-bin": ensure => "0.8.3-1ubuntu14~ppalucid2" }
package { "ebtables": ensure => present }
package { "gawk": ensure => present }
# ensure proper permissions on /dev/kvm
file { "/dev/kvm":
owner => "root",
group => "kvm",
mode => 660
}
# require hardware virt
kern_module { "kvm_intel":
ensure => present,
}
# increase loopback devices
file { "/etc/modprobe.d/loop.conf":
owner => "root", mode => 644,
content => "options loop max_loop=40"
}
nova_conf_pointer{'compute': name => 'compute'}
loopback{loop0: num => 0}
loopback{loop1: num => 1}
loopback{loop2: num => 2}
loopback{loop3: num => 3}
loopback{loop4: num => 4}
loopback{loop5: num => 5}
loopback{loop6: num => 6}
loopback{loop7: num => 7}
loopback{loop8: num => 8}
loopback{loop9: num => 9}
loopback{loop10: num => 10}
loopback{loop11: num => 11}
loopback{loop12: num => 12}
loopback{loop13: num => 13}
loopback{loop14: num => 14}
loopback{loop15: num => 15}
loopback{loop16: num => 16}
loopback{loop17: num => 17}
loopback{loop18: num => 18}
loopback{loop19: num => 19}
loopback{loop20: num => 20}
loopback{loop21: num => 21}
loopback{loop22: num => 22}
loopback{loop23: num => 23}
loopback{loop24: num => 24}
loopback{loop25: num => 25}
loopback{loop26: num => 26}
loopback{loop27: num => 27}
loopback{loop28: num => 28}
loopback{loop29: num => 29}
loopback{loop30: num => 30}
loopback{loop31: num => 31}
loopback{loop32: num => 32}
loopback{loop33: num => 33}
loopback{loop34: num => 34}
loopback{loop35: num => 35}
loopback{loop36: num => 36}
loopback{loop37: num => 37}
loopback{loop38: num => 38}
loopback{loop39: num => 39}
package { "python-libvirt": ensure => "0.8.3-1ubuntu14~ppalucid2" }
package { "nova-compute":
ensure => "$version",
require => Package["python-libvirt"]
}
#file { "/usr/share/nova/libvirt.qemu.xml.template":
# owner => "nova", mode => 400,
# source => "puppet://${puppet_server}/files/production/libvirt.qemu.xml.template",
#}
# fix runlevels: using enable => true adds it as 20, which is too early
exec { "update-rc.d -f nova-compute remove":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/rc2.d/S??nova-compute"
}
service { "nova-compute":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-compute"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-compute.conf"],
#File["/usr/share/nova/libvirt.qemu.xml.template"],
Service["libvirt-bin"],
Kern_module["kvm_intel"]
]
}
}
define nvolume($version) {
package { "nova-volume": ensure => $version, require => Exec["update-apt"] }
nova_conf_pointer{'volume': name => 'volume'}
# fix runlevels: using enable => true adds it as 20, which is too early
exec { "update-rc.d -f nova-volume remove":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/rc2.d/S??nova-volume"
}
file { "/etc/default/iscsitarget":
owner => "root", mode => 644,
content => "ISCSITARGET_ENABLE=true"
}
package { "iscsitarget": ensure => present }
file { "/dev/iscsi": ensure => directory } # FIXME(vish): owner / mode?
file { "/usr/sbin/nova-iscsi-dev.sh":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/nova-iscsi-dev.sh"
}
file { "/etc/udev/rules.d/55-openiscsi.rules":
owner => "root", mode => 644,
content => 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/usr/sbin/nova-iscsi-dev.sh %b",SYMLINK+="iscsi/%c%n"'
}
service { "iscsitarget":
ensure => running,
enable => true,
hasstatus => true,
require => [
File["/etc/default/iscsitarget"],
Package["iscsitarget"]
]
}
service { "nova-volume":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-volume"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-volume.conf"]
]
}
}
class novaspool {
# This isn't in release yet
#cron { logspool:
# command => "/usr/bin/nova-logspool /var/log/nova.log /var/lib/nova/spool",
# user => "nova"
#}
#cron { spoolsentry:
# command => "/usr/bin/nova-spoolsentry ${sentry_url} ${sentry_key} /var/lib/nova/spool",
# user => "nova"
#}
}

View File

@@ -1,7 +0,0 @@
class swift {
package { "memcached": ensure => present }
service { "memcached": require => Package['memcached'] }
package { "swift-proxy": ensure => present }
}

View File

@@ -1,120 +0,0 @@
# site.pp
import "templates"
import "classes/*"
node novabase inherits default {
# $puppet_server = "192.168.0.10"
$cluster_name = "openstack001"
$ganglia_udp_send_channel = "openstack001.example.com"
$syslog = "192.168.0.10"
# THIS STUFF ISN'T IN RELEASE YET
#$sentry_url = "http://192.168.0.19/sentry/store/"
#$sentry_key = "TODO:SENTRYPASS"
$local_network = "192.168.0.0/16"
$vpn_ip = "192.168.0.2"
$public_interface = "eth0"
include novanode
# include nova-common
include opsmetrics
# non-nova stuff such as nova-dash inherit from novanode
# novaspool needs a better home
# include novaspool
}
# Builder
node "nova000.example.com" inherits novabase {
$syslog = "server"
include ntp
include syslog-server
}
# Non-Nova nodes
node
"blog.example.com",
"wiki.example.com"
inherits novabase {
include ganglia-python
include ganglia-apache
include ganglia-mysql
}
node "nova001.example.com"
inherits novabase {
include novabase
nova_iptables { nova:
services => [
"ganglia",
"mysql",
"rabbitmq",
"ldap",
"api",
"objectstore",
"nrpe",
],
ip => "192.168.0.10",
}
nobjectstore { nova: version => "0.9.0" }
nscheduler { nova: version => "0.9.0" }
napi { nova:
version => "0.9.0",
api_servers => 10,
api_base_port => 8000
}
}
node "nova002.example.com"
inherits novabase {
include novaconf
nova_iptables { nova:
services => [
"ganglia",
"dnsmasq",
"nrpe"
],
ip => "192.168.4.2",
private_range => "192.168.0.0/16",
}
nnetwork { nova: version => "0.9.0" }
}
node
"nova003.example.com",
"nova004.example.com",
"nova005.example.com",
"nova006.example.com",
"nova007.example.com",
"nova008.example.com",
"nova009.example.com",
"nova010.example.com",
"nova011.example.com",
"nova012.example.com",
"nova013.example.com",
"nova014.example.com",
"nova015.example.com",
"nova016.example.com",
"nova017.example.com",
"nova018.example.com",
"nova019.example.com",
inherits novabase {
include novaconf
ncompute { nova: version => "0.9.0" }
nvolume { nova: version => "0.9.0" }
}
#node
# "nova020.example.com"
# "nova021.example.com"
#inherits novanode {
# include novaconf
#ncompute { nova: version => "0.9.0" }
#}

View File

@@ -1,21 +0,0 @@
# templates.pp
import "classes/*"
class baseclass {
# include dns-client # FIXME: missing resolv.conf.erb??
include issue
}
node default {
$nova_site = "undef"
$nova_ns1 = "undef"
$nova_ns2 = "undef"
# include baseclass
}
# novanode handles the system-level requirements for Nova/Swift nodes
class novanode {
include baseclass
include lvmconf
}

View File

@@ -1,11 +0,0 @@
[main]
logdir=/var/log/puppet
vardir=/var/lib/puppet
ssldir=/var/lib/puppet/ssl
rundir=/var/run/puppet
factpath=$vardir/lib/facter
pluginsync=false
[puppetmasterd]
templatedir=/var/lib/nova/contrib/puppet/templates
autosign=true

View File

@@ -1,39 +0,0 @@
# this config needs haproxy-1.1.28 or haproxy-1.2.1
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
#log loghost local0 info
maxconn 4096
#chroot /usr/share/haproxy
stats socket /var/run/haproxy.sock
user haproxy
group haproxy
daemon
#debug
#quiet
defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
stats enable
stats uri /haproxy
maxconn 2000
contimeout 5000
clitimeout 50000
srvtimeout 50000
listen nova-api 0.0.0.0:8773
option httpchk GET / HTTP/1.0\r\nHost:\ example.com
option forwardfor
reqidel ^X-Forwarded-For:.*
balance roundrobin
<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset -%>
server api_<%= port %> 127.0.0.1:<%= port %> maxconn 1 check
<% end -%>
option httpclose # disable keep-alive

View File

@@ -1,138 +0,0 @@
###############################################################################
## Monit control file
###############################################################################
##
## Comments begin with a '#' and extend through the end of the line. Keywords
## are case insensitive. All path's MUST BE FULLY QUALIFIED, starting with '/'.
##
## Below you will find examples of some frequently used statements. For
## information about the control file, a complete list of statements and
## options please have a look in the monit manual.
##
##
###############################################################################
## Global section
###############################################################################
##
## Start monit in the background (run as a daemon):
#
set daemon 60 # check services at 1-minute intervals
with start delay 30 # optional: delay the first check by half a minute
# (by default check immediately after monit start)
## Set syslog logging with the 'daemon' facility. If the FACILITY option is
## omitted, monit will use 'user' facility by default. If you want to log to
## a stand alone log file instead, specify the path to a log file
#
set logfile syslog facility log_daemon
#
#
### Set the location of monit id file which saves the unique id specific for
### given monit. The id is generated and stored on first monit start.
### By default the file is placed in $HOME/.monit.id.
#
# set idfile /var/.monit.id
#
### Set the location of monit state file which saves the monitoring state
### on each cycle. By default the file is placed in $HOME/.monit.state. If
### state file is stored on persistent filesystem, monit will recover the
### monitoring state across reboots. If it is on temporary filesystem, the
### state will be lost on reboot.
#
# set statefile /var/.monit.state
#
## Set the list of mail servers for alert delivery. Multiple servers may be
## specified using comma separator. By default monit uses port 25 - this
## is possible to override with the PORT option.
#
# set mailserver mail.bar.baz, # primary mailserver
# backup.bar.baz port 10025, # backup mailserver on port 10025
# localhost # fallback relay
#
#
## By default monit will drop alert events if no mail servers are available.
## If you want to keep the alerts for a later delivery retry, you can use the
## EVENTQUEUE statement. The base directory where undelivered alerts will be
## stored is specified by the BASEDIR option. You can limit the maximal queue
## size using the SLOTS option (if omitted, the queue is limited by space
## available in the back end filesystem).
#
# set eventqueue
# basedir /var/monit # set the base directory where events will be stored
# slots 100 # optionaly limit the queue size
#
#
## Send status and events to M/Monit (Monit central management: for more
## informations about M/Monit see http://www.tildeslash.com/mmonit).
#
# set mmonit http://monit:monit@192.168.1.10:8080/collector
#
#
## Monit by default uses the following alert mail format:
##
## --8<--
## From: monit@$HOST # sender
## Subject: monit alert -- $EVENT $SERVICE # subject
##
## $EVENT Service $SERVICE #
## #
## Date: $DATE #
## Action: $ACTION #
## Host: $HOST # body
## Description: $DESCRIPTION #
## #
## Your faithful employee, #
## monit #
## --8<--
##
## You can override this message format or parts of it, such as subject
## or sender using the MAIL-FORMAT statement. Macros such as $DATE, etc.
## are expanded at runtime. For example, to override the sender:
#
# set mail-format { from: monit@foo.bar }
#
#
## You can set alert recipients here whom will receive alerts if/when a
## service defined in this file has errors. Alerts may be restricted on
## events by using a filter as in the second example below.
#
# set alert sysadm@foo.bar # receive all alerts
# set alert manager@foo.bar only on { timeout } # receive just service-
# # timeout alert
#
#
## Monit has an embedded web server which can be used to view status of
## services monitored, the current configuration, actual services parameters
## and manage services from a web interface.
#
set httpd port 2812 and
use address localhost # only accept connection from localhost
allow localhost # allow localhost to connect to the server and
# allow admin:monit # require user 'admin' with password 'monit'
# allow @monit # allow users of group 'monit' to connect (rw)
# allow @users readonly # allow users of group 'users' to connect readonly
#
#
###############################################################################
## Services
###############################################################################
<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset %>
check process nova_api_<%= port %> with pidfile /var/run/nova/nova-api-<%= port %>.pid
group nova_api
start program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock start"
as uid nova
stop program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock stop"
as uid nova
if failed port <%= port %> protocol http
with timeout 15 seconds
for 4 cycles
then restart
if totalmem > 300 Mb then restart
if cpu is greater than 60% for 2 cycles then alert
if cpu > 80% for 3 cycles then restart
if 3 restarts within 5 cycles then timeout
<% end %>

View File

@@ -1,10 +0,0 @@
<% services.each do |service| -%>
<%= service.upcase %>=1
<% end -%>
<% if ip && ip != "" %>IP="<%=ip%>"<% end %>
<% if private_range && private_range != "" %>PRIVATE_RANGE="<%=private_range%>"<% end %>
<% if mgmt_ip && mgmt_ip != "" %>MGMT_IP="<%=mgmt_ip%>"<% end %>
<% if dmz_ip && dmz_ip != "" %>DMZ_IP="<%=dmz_ip%>"<% end %>
# warning: this file is auto-generated by puppet

View File

@@ -1,55 +0,0 @@
# global
--dmz_net=192.168.0.0
--dmz_mask=255.255.0.0
--dmz_cidr=192.168.0.0/16
--ldap_user_dn=cn=Administrators,dc=example,dc=com
--ldap_user_unit=Users
--ldap_user_subtree=ou=Users,dc=example,dc=com
--ldap_project_subtree=ou=Groups,dc=example,dc=com
--role_project_subtree=ou=Groups,dc=example,dc=com
--ldap_cloudadmin=cn=NovaAdmins,ou=Groups,dc=example,dc=com
--ldap_itsec=cn=NovaSecurity,ou=Groups,dc=example,dc=com
--ldap_sysadmin=cn=Administrators,ou=Groups,dc=example,dc=com
--ldap_netadmin=cn=Administrators,ou=Groups,dc=example,dc=com
--ldap_developer=cn=developers,ou=Groups,dc=example,dc=com
--verbose
--daemonize
--syslog
--networks_path=/var/lib/nova/networks
--instances_path=/var/lib/nova/instances
--buckets_path=/var/lib/nova/objectstore/buckets
--images_path=/var/lib/nova/objectstore/images
--scheduler_driver=nova.scheduler.simple.SimpleScheduler
--libvirt_xml_template=/usr/share/nova/libvirt.qemu.xml.template
--credentials_template=/usr/share/nova/novarc.template
--boot_script_template=/usr/share/nova/bootscript.template
--vpn_client_template=/usr/share/nova/client.ovpn.template
--max_cores=40
--max_gigabytes=2000
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--vpn_start=11000
--volume_group=vgdata
--volume_manager=nova.volume.manager.ISCSIManager
--volume_driver=nova.volume.driver.ISCSIDriver
--default_kernel=aki-DEFAULT
--default_ramdisk=ari-DEFAULT
--dhcpbridge=/usr/bin/nova-dhcpbridge
--vpn_image_id=ami-cloudpipe
--dhcpbridge_flagfile=/etc/nova/nova.conf
--credential_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=NOVA/CN=%s-%s
--auth_driver=nova.auth.ldapdriver.LdapDriver
--quota_cores=17
--quota_floating_ips=5
--quota_instances=6
--quota_volumes=10
--quota_gigabytes=100
--use_nova_chains=True
--input_chain=services
--use_project_ca=True
--fixed_ip_disassociate_timeout=300
--api_max_requests=1
--api_listen_ip=127.0.0.1
--user_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=%s-%s-%s
--project_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-ca-%s-%s
--vpn_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-vpn-%s-%s

View File

@@ -1,21 +0,0 @@
--fixed_range=192.168.0.0/16
--iscsi_ip_prefix=192.168.4
--floating_range=10.0.0.0/24
--rabbit_host=192.168.0.10
--s3_host=192.168.0.10
--cc_host=192.168.0.10
--cc_dmz=192.168.24.10
--s3_dmz=192.168.24.10
--ec2_url=http://192.168.0.1:8773/services/Cloud
--vpn_ip=192.168.0.2
--ldap_url=ldap://192.168.0.10
--sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova
--other_sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova
--routing_source_ip=192.168.0.2
--bridge_dev=eth1
--public_interface=eth0
--vlan_start=3100
--num_networks=700
--rabbit_userid=TODO:RABBIT
--rabbit_password=TODO:CHANGEME
--ldap_password=TODO:CHANGEME

406
doc/.autogenerated Normal file
View File

@@ -0,0 +1,406 @@
source/api/nova..adminclient.rst
source/api/nova..api.direct.rst
source/api/nova..api.ec2.admin.rst
source/api/nova..api.ec2.apirequest.rst
source/api/nova..api.ec2.cloud.rst
source/api/nova..api.ec2.metadatarequesthandler.rst
source/api/nova..api.openstack.auth.rst
source/api/nova..api.openstack.backup_schedules.rst
source/api/nova..api.openstack.common.rst
source/api/nova..api.openstack.consoles.rst
source/api/nova..api.openstack.faults.rst
source/api/nova..api.openstack.flavors.rst
source/api/nova..api.openstack.images.rst
source/api/nova..api.openstack.servers.rst
source/api/nova..api.openstack.shared_ip_groups.rst
source/api/nova..api.openstack.zones.rst
source/api/nova..auth.dbdriver.rst
source/api/nova..auth.fakeldap.rst
source/api/nova..auth.ldapdriver.rst
source/api/nova..auth.manager.rst
source/api/nova..auth.signer.rst
source/api/nova..cloudpipe.pipelib.rst
source/api/nova..compute.api.rst
source/api/nova..compute.instance_types.rst
source/api/nova..compute.manager.rst
source/api/nova..compute.monitor.rst
source/api/nova..compute.power_state.rst
source/api/nova..console.api.rst
source/api/nova..console.fake.rst
source/api/nova..console.manager.rst
source/api/nova..console.xvp.rst
source/api/nova..context.rst
source/api/nova..crypto.rst
source/api/nova..db.api.rst
source/api/nova..db.base.rst
source/api/nova..db.migration.rst
source/api/nova..db.sqlalchemy.api.rst
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
source/api/nova..db.sqlalchemy.migration.rst
source/api/nova..db.sqlalchemy.models.rst
source/api/nova..db.sqlalchemy.session.rst
source/api/nova..exception.rst
source/api/nova..fakememcache.rst
source/api/nova..fakerabbit.rst
source/api/nova..flags.rst
source/api/nova..image.glance.rst
source/api/nova..image.local.rst
source/api/nova..image.s3.rst
source/api/nova..image.service.rst
source/api/nova..log.rst
source/api/nova..manager.rst
source/api/nova..network.api.rst
source/api/nova..network.linux_net.rst
source/api/nova..network.manager.rst
source/api/nova..objectstore.bucket.rst
source/api/nova..objectstore.handler.rst
source/api/nova..objectstore.image.rst
source/api/nova..objectstore.stored.rst
source/api/nova..quota.rst
source/api/nova..rpc.rst
source/api/nova..scheduler.chance.rst
source/api/nova..scheduler.driver.rst
source/api/nova..scheduler.manager.rst
source/api/nova..scheduler.simple.rst
source/api/nova..scheduler.zone.rst
source/api/nova..service.rst
source/api/nova..test.rst
source/api/nova..tests.api.openstack.fakes.rst
source/api/nova..tests.api.openstack.test_adminapi.rst
source/api/nova..tests.api.openstack.test_api.rst
source/api/nova..tests.api.openstack.test_auth.rst
source/api/nova..tests.api.openstack.test_common.rst
source/api/nova..tests.api.openstack.test_faults.rst
source/api/nova..tests.api.openstack.test_flavors.rst
source/api/nova..tests.api.openstack.test_images.rst
source/api/nova..tests.api.openstack.test_ratelimiting.rst
source/api/nova..tests.api.openstack.test_servers.rst
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
source/api/nova..tests.api.openstack.test_zones.rst
source/api/nova..tests.api.test_wsgi.rst
source/api/nova..tests.db.fakes.rst
source/api/nova..tests.declare_flags.rst
source/api/nova..tests.fake_flags.rst
source/api/nova..tests.glance.stubs.rst
source/api/nova..tests.hyperv_unittest.rst
source/api/nova..tests.objectstore_unittest.rst
source/api/nova..tests.real_flags.rst
source/api/nova..tests.runtime_flags.rst
source/api/nova..tests.test_access.rst
source/api/nova..tests.test_api.rst
source/api/nova..tests.test_auth.rst
source/api/nova..tests.test_cloud.rst
source/api/nova..tests.test_compute.rst
source/api/nova..tests.test_console.rst
source/api/nova..tests.test_direct.rst
source/api/nova..tests.test_flags.rst
source/api/nova..tests.test_localization.rst
source/api/nova..tests.test_log.rst
source/api/nova..tests.test_middleware.rst
source/api/nova..tests.test_misc.rst
source/api/nova..tests.test_network.rst
source/api/nova..tests.test_quota.rst
source/api/nova..tests.test_rpc.rst
source/api/nova..tests.test_scheduler.rst
source/api/nova..tests.test_service.rst
source/api/nova..tests.test_twistd.rst
source/api/nova..tests.test_virt.rst
source/api/nova..tests.test_volume.rst
source/api/nova..tests.test_xenapi.rst
source/api/nova..tests.xenapi.stubs.rst
source/api/nova..twistd.rst
source/api/nova..utils.rst
source/api/nova..version.rst
source/api/nova..virt.connection.rst
source/api/nova..virt.disk.rst
source/api/nova..virt.fake.rst
source/api/nova..virt.hyperv.rst
source/api/nova..virt.images.rst
source/api/nova..virt.libvirt_conn.rst
source/api/nova..virt.xenapi.fake.rst
source/api/nova..virt.xenapi.network_utils.rst
source/api/nova..virt.xenapi.vm_utils.rst
source/api/nova..virt.xenapi.vmops.rst
source/api/nova..virt.xenapi.volume_utils.rst
source/api/nova..virt.xenapi.volumeops.rst
source/api/nova..virt.xenapi_conn.rst
source/api/nova..volume.api.rst
source/api/nova..volume.driver.rst
source/api/nova..volume.manager.rst
source/api/nova..volume.san.rst
source/api/nova..wsgi.rst
source/api/autoindex.rst
source/api/nova..adminclient.rst
source/api/nova..api.direct.rst
source/api/nova..api.ec2.admin.rst
source/api/nova..api.ec2.apirequest.rst
source/api/nova..api.ec2.cloud.rst
source/api/nova..api.ec2.metadatarequesthandler.rst
source/api/nova..api.openstack.auth.rst
source/api/nova..api.openstack.backup_schedules.rst
source/api/nova..api.openstack.common.rst
source/api/nova..api.openstack.consoles.rst
source/api/nova..api.openstack.faults.rst
source/api/nova..api.openstack.flavors.rst
source/api/nova..api.openstack.images.rst
source/api/nova..api.openstack.servers.rst
source/api/nova..api.openstack.shared_ip_groups.rst
source/api/nova..api.openstack.zones.rst
source/api/nova..auth.dbdriver.rst
source/api/nova..auth.fakeldap.rst
source/api/nova..auth.ldapdriver.rst
source/api/nova..auth.manager.rst
source/api/nova..auth.signer.rst
source/api/nova..cloudpipe.pipelib.rst
source/api/nova..compute.api.rst
source/api/nova..compute.instance_types.rst
source/api/nova..compute.manager.rst
source/api/nova..compute.monitor.rst
source/api/nova..compute.power_state.rst
source/api/nova..console.api.rst
source/api/nova..console.fake.rst
source/api/nova..console.manager.rst
source/api/nova..console.xvp.rst
source/api/nova..context.rst
source/api/nova..crypto.rst
source/api/nova..db.api.rst
source/api/nova..db.base.rst
source/api/nova..db.migration.rst
source/api/nova..db.sqlalchemy.api.rst
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
source/api/nova..db.sqlalchemy.migration.rst
source/api/nova..db.sqlalchemy.models.rst
source/api/nova..db.sqlalchemy.session.rst
source/api/nova..exception.rst
source/api/nova..fakememcache.rst
source/api/nova..fakerabbit.rst
source/api/nova..flags.rst
source/api/nova..image.glance.rst
source/api/nova..image.local.rst
source/api/nova..image.s3.rst
source/api/nova..image.service.rst
source/api/nova..log.rst
source/api/nova..manager.rst
source/api/nova..network.api.rst
source/api/nova..network.linux_net.rst
source/api/nova..network.manager.rst
source/api/nova..objectstore.bucket.rst
source/api/nova..objectstore.handler.rst
source/api/nova..objectstore.image.rst
source/api/nova..objectstore.stored.rst
source/api/nova..quota.rst
source/api/nova..rpc.rst
source/api/nova..scheduler.chance.rst
source/api/nova..scheduler.driver.rst
source/api/nova..scheduler.manager.rst
source/api/nova..scheduler.simple.rst
source/api/nova..scheduler.zone.rst
source/api/nova..service.rst
source/api/nova..test.rst
source/api/nova..tests.api.openstack.fakes.rst
source/api/nova..tests.api.openstack.test_adminapi.rst
source/api/nova..tests.api.openstack.test_api.rst
source/api/nova..tests.api.openstack.test_auth.rst
source/api/nova..tests.api.openstack.test_common.rst
source/api/nova..tests.api.openstack.test_faults.rst
source/api/nova..tests.api.openstack.test_flavors.rst
source/api/nova..tests.api.openstack.test_images.rst
source/api/nova..tests.api.openstack.test_ratelimiting.rst
source/api/nova..tests.api.openstack.test_servers.rst
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
source/api/nova..tests.api.openstack.test_zones.rst
source/api/nova..tests.api.test_wsgi.rst
source/api/nova..tests.db.fakes.rst
source/api/nova..tests.declare_flags.rst
source/api/nova..tests.fake_flags.rst
source/api/nova..tests.glance.stubs.rst
source/api/nova..tests.hyperv_unittest.rst
source/api/nova..tests.objectstore_unittest.rst
source/api/nova..tests.real_flags.rst
source/api/nova..tests.runtime_flags.rst
source/api/nova..tests.test_access.rst
source/api/nova..tests.test_api.rst
source/api/nova..tests.test_auth.rst
source/api/nova..tests.test_cloud.rst
source/api/nova..tests.test_compute.rst
source/api/nova..tests.test_console.rst
source/api/nova..tests.test_direct.rst
source/api/nova..tests.test_flags.rst
source/api/nova..tests.test_localization.rst
source/api/nova..tests.test_log.rst
source/api/nova..tests.test_middleware.rst
source/api/nova..tests.test_misc.rst
source/api/nova..tests.test_network.rst
source/api/nova..tests.test_quota.rst
source/api/nova..tests.test_rpc.rst
source/api/nova..tests.test_scheduler.rst
source/api/nova..tests.test_service.rst
source/api/nova..tests.test_twistd.rst
source/api/nova..tests.test_virt.rst
source/api/nova..tests.test_volume.rst
source/api/nova..tests.test_xenapi.rst
source/api/nova..tests.xenapi.stubs.rst
source/api/nova..twistd.rst
source/api/nova..utils.rst
source/api/nova..version.rst
source/api/nova..virt.connection.rst
source/api/nova..virt.disk.rst
source/api/nova..virt.fake.rst
source/api/nova..virt.hyperv.rst
source/api/nova..virt.images.rst
source/api/nova..virt.libvirt_conn.rst
source/api/nova..virt.xenapi.fake.rst
source/api/nova..virt.xenapi.network_utils.rst
source/api/nova..virt.xenapi.vm_utils.rst
source/api/nova..virt.xenapi.vmops.rst
source/api/nova..virt.xenapi.volume_utils.rst
source/api/nova..virt.xenapi.volumeops.rst
source/api/nova..virt.xenapi_conn.rst
source/api/nova..volume.api.rst
source/api/nova..volume.driver.rst
source/api/nova..volume.manager.rst
source/api/nova..volume.san.rst
source/api/nova..wsgi.rst
source/api/nova..adminclient.rst
source/api/nova..api.direct.rst
source/api/nova..api.ec2.admin.rst
source/api/nova..api.ec2.apirequest.rst
source/api/nova..api.ec2.cloud.rst
source/api/nova..api.ec2.metadatarequesthandler.rst
source/api/nova..api.openstack.auth.rst
source/api/nova..api.openstack.backup_schedules.rst
source/api/nova..api.openstack.common.rst
source/api/nova..api.openstack.consoles.rst
source/api/nova..api.openstack.faults.rst
source/api/nova..api.openstack.flavors.rst
source/api/nova..api.openstack.images.rst
source/api/nova..api.openstack.servers.rst
source/api/nova..api.openstack.shared_ip_groups.rst
source/api/nova..api.openstack.zones.rst
source/api/nova..auth.dbdriver.rst
source/api/nova..auth.fakeldap.rst
source/api/nova..auth.ldapdriver.rst
source/api/nova..auth.manager.rst
source/api/nova..auth.signer.rst
source/api/nova..cloudpipe.pipelib.rst
source/api/nova..compute.api.rst
source/api/nova..compute.instance_types.rst
source/api/nova..compute.manager.rst
source/api/nova..compute.monitor.rst
source/api/nova..compute.power_state.rst
source/api/nova..console.api.rst
source/api/nova..console.fake.rst
source/api/nova..console.manager.rst
source/api/nova..console.xvp.rst
source/api/nova..context.rst
source/api/nova..crypto.rst
source/api/nova..db.api.rst
source/api/nova..db.base.rst
source/api/nova..db.migration.rst
source/api/nova..db.sqlalchemy.api.rst
source/api/nova..db.sqlalchemy.migrate_repo.manage.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
source/api/nova..db.sqlalchemy.migration.rst
source/api/nova..db.sqlalchemy.models.rst
source/api/nova..db.sqlalchemy.session.rst
source/api/nova..exception.rst
source/api/nova..fakememcache.rst
source/api/nova..fakerabbit.rst
source/api/nova..flags.rst
source/api/nova..image.glance.rst
source/api/nova..image.local.rst
source/api/nova..image.s3.rst
source/api/nova..image.service.rst
source/api/nova..log.rst
source/api/nova..manager.rst
source/api/nova..network.api.rst
source/api/nova..network.linux_net.rst
source/api/nova..network.manager.rst
source/api/nova..objectstore.bucket.rst
source/api/nova..objectstore.handler.rst
source/api/nova..objectstore.image.rst
source/api/nova..objectstore.stored.rst
source/api/nova..quota.rst
source/api/nova..rpc.rst
source/api/nova..scheduler.chance.rst
source/api/nova..scheduler.driver.rst
source/api/nova..scheduler.manager.rst
source/api/nova..scheduler.simple.rst
source/api/nova..scheduler.zone.rst
source/api/nova..service.rst
source/api/nova..test.rst
source/api/nova..tests.api.openstack.fakes.rst
source/api/nova..tests.api.openstack.test_adminapi.rst
source/api/nova..tests.api.openstack.test_api.rst
source/api/nova..tests.api.openstack.test_auth.rst
source/api/nova..tests.api.openstack.test_common.rst
source/api/nova..tests.api.openstack.test_faults.rst
source/api/nova..tests.api.openstack.test_flavors.rst
source/api/nova..tests.api.openstack.test_images.rst
source/api/nova..tests.api.openstack.test_ratelimiting.rst
source/api/nova..tests.api.openstack.test_servers.rst
source/api/nova..tests.api.openstack.test_shared_ip_groups.rst
source/api/nova..tests.api.openstack.test_zones.rst
source/api/nova..tests.api.test_wsgi.rst
source/api/nova..tests.db.fakes.rst
source/api/nova..tests.declare_flags.rst
source/api/nova..tests.fake_flags.rst
source/api/nova..tests.glance.stubs.rst
source/api/nova..tests.hyperv_unittest.rst
source/api/nova..tests.objectstore_unittest.rst
source/api/nova..tests.real_flags.rst
source/api/nova..tests.runtime_flags.rst
source/api/nova..tests.test_access.rst
source/api/nova..tests.test_api.rst
source/api/nova..tests.test_auth.rst
source/api/nova..tests.test_cloud.rst
source/api/nova..tests.test_compute.rst
source/api/nova..tests.test_console.rst
source/api/nova..tests.test_direct.rst
source/api/nova..tests.test_flags.rst
source/api/nova..tests.test_localization.rst
source/api/nova..tests.test_log.rst
source/api/nova..tests.test_middleware.rst
source/api/nova..tests.test_misc.rst
source/api/nova..tests.test_network.rst
source/api/nova..tests.test_quota.rst
source/api/nova..tests.test_rpc.rst
source/api/nova..tests.test_scheduler.rst
source/api/nova..tests.test_service.rst
source/api/nova..tests.test_twistd.rst
source/api/nova..tests.test_virt.rst
source/api/nova..tests.test_volume.rst
source/api/nova..tests.test_xenapi.rst
source/api/nova..tests.xenapi.stubs.rst
source/api/nova..twistd.rst
source/api/nova..utils.rst
source/api/nova..version.rst
source/api/nova..virt.connection.rst
source/api/nova..virt.disk.rst
source/api/nova..virt.fake.rst
source/api/nova..virt.hyperv.rst
source/api/nova..virt.images.rst
source/api/nova..virt.libvirt_conn.rst
source/api/nova..virt.xenapi.fake.rst
source/api/nova..virt.xenapi.network_utils.rst
source/api/nova..virt.xenapi.vm_utils.rst
source/api/nova..virt.xenapi.vmops.rst
source/api/nova..virt.xenapi.volume_utils.rst
source/api/nova..virt.xenapi.volumeops.rst
source/api/nova..virt.xenapi_conn.rst
source/api/nova..volume.api.rst
source/api/nova..volume.driver.rst
source/api/nova..volume.manager.rst
source/api/nova..volume.san.rst
source/api/nova..wsgi.rst

4
doc/build/html/.buildinfo vendored Normal file
View File

@@ -0,0 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 2a2fe6198f4be4a4d6f289b09d16d74a
tags: fbb0d17656682115ca4d033fb2f83ba1

View File

@@ -1,88 +0,0 @@
Installation on other distros (like Debian, Fedora or CentOS )
==============================================================
Feel free to add additional notes for additional distributions.
Nova installation on CentOS 5.5
-------------------------------
These are notes for installing OpenStack Compute on CentOS 5.5 and will be updated but are NOT final. Please test for accuracy and edit as you see fit.
The principle botleneck for running nova on centos in python 2.6. Nova is written in python 2.6 and CentOS 5.5. comes with python 2.4. We can not update python system wide as some core utilities (like yum) is dependent on python 2.4. Also very few python 2.6 modules are available in centos/epel repos.
Pre-reqs
--------
Add euca2ools and EPEL repo first.::
cat >/etc/yum.repos.d/euca2ools.repo << EUCA_REPO_CONF_EOF
[eucalyptus]
name=euca2ools
baseurl=http://www.eucalyptussoftware.com/downloads/repo/euca2ools/1.3.1/yum/centos/
enabled=1
gpgcheck=0
EUCA_REPO_CONF_EOF
::
rpm -Uvh 'http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-4.noarch.rpm'
Now install python2.6, kvm and few other libraries through yum::
yum -y install dnsmasq vblade kpartx kvm gawk iptables ebtables bzr screen euca2ools curl rabbitmq-server gcc gcc-c++ autoconf automake swig openldap openldap-servers nginx python26 python26-devel python26-distribute git openssl-devel python26-tools mysql-server qemu kmod-kvm libxml2 libxslt libxslt-devel mysql-devel
Then download the latest aoetools and then build(and install) it, check for the latest version on sourceforge, exact url will change if theres a new release::
wget -c http://sourceforge.net/projects/aoetools/files/aoetools/32/aoetools-32.tar.gz/download
tar -zxvf aoetools-32.tar.gz
cd aoetools-32
make
make install
Add the udev rules for aoetools::
cat > /etc/udev/rules.d/60-aoe.rules << AOE_RULES_EOF
SUBSYSTEM=="aoe", KERNEL=="discover", NAME="etherd/%k", GROUP="disk", MODE="0220"
SUBSYSTEM=="aoe", KERNEL=="err", NAME="etherd/%k", GROUP="disk", MODE="0440"
SUBSYSTEM=="aoe", KERNEL=="interfaces", NAME="etherd/%k", GROUP="disk", MODE="0220"
SUBSYSTEM=="aoe", KERNEL=="revalidate", NAME="etherd/%k", GROUP="disk", MODE="0220"
# aoe block devices
KERNEL=="etherd*", NAME="%k", GROUP="disk"
AOE_RULES_EOF
Load the kernel modules::
modprobe aoe
::
modprobe kvm
Now, install the python modules using easy_install-2.6, this ensures the installation are done against python 2.6
easy_install-2.6 twisted sqlalchemy mox greenlet carrot daemon eventlet tornado IPy routes lxml MySQL-python
python-gflags need to be downloaded and installed manually, use these commands (check the exact url for newer releases ):
::
wget -c "http://python-gflags.googlecode.com/files/python-gflags-1.4.tar.gz"
tar -zxvf python-gflags-1.4.tar.gz
cd python-gflags-1.4
python2.6 setup.py install
cd ..
Same for python2.6-libxml2 module, notice the --with-python and --prefix flags. --with-python ensures we are building it against python2.6 (otherwise it will build against python2.4, which is default)::
wget -c "ftp://xmlsoft.org/libxml2/libxml2-2.7.3.tar.gz"
tar -zxvf libxml2-2.7.3.tar.gz
cd libxml2-2.7.3
./configure --with-python=/usr/bin/python26 --prefix=/usr
make all
make install
cd python
python2.6 setup.py install
cd ..
Once you've done this, continue at Step 3 here: :doc:`../single.node.install`

View File

@@ -1,40 +0,0 @@
Installing on Ubuntu 10.04 (Lucid)
==================================
Step 1: Install dependencies
----------------------------
Grab the latest code from launchpad:
::
bzr clone lp:nova
Here's a script you can use to install (and then run) Nova on Ubuntu or Debian (when using Debian, edit nova.sh to have USE_PPA=0):
.. todo:: give a link to a stable releases page
Step 2: Install dependencies
----------------------------
Nova requires rabbitmq for messaging, so install that first.
*Note:* You must have sudo installed to run these commands as shown here.
::
sudo apt-get install rabbitmq-server
You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
If you're running on Ubuntu 10.04, you'll need to install Twisted and python-gflags which is included in the OpenStack PPA.
::
sudo apt-get install python-software-properties
sudo add-apt-repository ppa:nova-core/trunk
sudo apt-get update
sudo apt-get install python-twisted python-gflags
Once you've done this, continue at Step 3 here: :doc:`../single.node.install`

View File

@@ -1,41 +0,0 @@
Installing on Ubuntu 10.10 (Maverick)
=====================================
Single Machine Installation (Ubuntu 10.10)
While we wouldn't expect you to put OpenStack Compute into production on a non-LTS version of Ubuntu, these instructions are up-to-date with the latest version of Ubuntu.
Make sure you are running Ubuntu 10.10 so that the packages will be available. This install requires more than 70 MB of free disk space.
These instructions are based on Soren Hansen's blog entry, Openstack on Maverick. A script is in progress as well.
Step 1: Install required prerequisites
--------------------------------------
Nova requires rabbitmq for messaging and redis for storing state (for now), so we'll install these first.::
sudo apt-get install rabbitmq-server redis-server
You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue.
Step 2: Install Nova packages available in Maverick Meerkat
-----------------------------------------------------------
Type or copy/paste in the following line to get the packages that you use to run OpenStack Compute.::
sudo apt-get install python-nova
sudo apt-get install nova-api nova-objectstore nova-compute nova-scheduler nova-network euca2ools unzip
You'll see messages starting with "Reading package lists... Done" and you must confirm by typing Y that you want to continue. This operation may take a while as many dependent packages will be installed. Note: there is a dependency problem with python-nova which can be worked around by installing first.
When the installation is complete, you'll see the following lines confirming:::
Adding system user `nova' (UID 106) ...
Adding new user `nova' (UID 106) with group `nogroup' ...
Not creating home directory `/var/lib/nova'.
Setting up nova-scheduler (0.9.1~bzr331-0ubuntu2) ...
* Starting nova scheduler nova-scheduler
WARNING:root:Starting scheduler node
...done.
Processing triggers for libc-bin ...
ldconfig deferred processing now taking place
Processing triggers for python-support ...
Once you've done this, continue at Step 3 here: :doc:`../single.node.install`

View File

@@ -1,23 +0,0 @@
..
Copyright 2010-2011 United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Flags and Flagfiles
===================
* python-gflags
* flagfiles
* list of flags by component (see concepts list)

View File

@@ -1,392 +0,0 @@
Installing Nova on Multiple Servers
===================================
When you move beyond evaluating the technology and into building an actual
production environment, you will need to know how to configure your datacenter
and how to deploy components across your clusters. This guide should help you
through that process.
You can install multiple nodes to increase performance and availability of the OpenStack Compute installation.
This setup is based on an Ubuntu Lucid 10.04 installation with the latest updates. Most of this works around issues that need to be resolved either in packaging or bug-fixing. It also needs to eventually be generalized, but the intent here is to get the multi-node configuration bootstrapped so folks can move forward.
For a starting architecture, these instructions describing installing a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node.
Requirements for a multi-node installation
------------------------------------------
* You need a real database, compatible with SQLAlchemy (mysql, postgresql) There's not a specific reason to choose one over another, it basically depends what you know. MySQL is easier to do High Availability (HA) with, but people may already know PostgreSQL. We should document both configurations, though.
* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
Assumptions
-----------
* Networking is configured between/through the physical machines on a single subnet.
* Installation and execution are both performed by ROOT user.
Scripted Installation
---------------------
A script available to get your OpenStack cloud running quickly. You can copy the file to the server where you want to install OpenStack Compute services - typically you would install a compute node and a cloud controller node.
You must run these scripts with root permissions.
From a server you intend to use as a cloud controller node, use this command to get the cloud controller script. This script is a work-in-progress and the maintainer plans to keep it up, but it is offered "as-is." Feel free to collaborate on it in GitHub - https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/.
::
wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/nova-CC-install-v1.1.sh
Ensure you can execute the script by modifying the permissions on the script file.
::
sudo chmod 755 nova-CC-install-v1.1.sh
::
sudo ./nova-CC-install-v1.1.sh
Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. You can use the nova-NODE-installer.sh script from the above github-hosted project for the compute node installation.
Copy the nova.conf from the cloud controller node to the compute node.
Restart related services::
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
You can go to the `Configuration section`_ for next steps.
Manual Installation - Step-by-Step
----------------------------------
The following sections show you how to install Nova manually with a cloud controller node and a separate compute node. The cloud controller node contains the database plus all nova- services, and the compute node runs nova- services only.
Cloud Controller Installation
`````````````````````````````
On the cloud controller node, you install nova services and the related helper applications, and then configure with the nova.conf file. You will then copy the nova.conf file to the compute node, which you install as a second node in the `Compute Installation`_.
Step 1 - Use apt-get to get the latest code
-------------------------------------------
1. Setup Nova PPA with https://launchpad.net/~nova-core/+archive/trunk. The python-software-properties package is a pre-requisite for setting up the nova package repo:
::
sudo apt-get install python-software-properties
sudo add-apt-repository ppa:nova-core/trunk
2. Run update.
::
sudo apt-get update
3. Install python required packages, nova-packages, and helper apps.
::
sudo apt-get install python-greenlet python-mysqldb python-nova nova-common nova-doc nova-api nova-network nova-objectstore nova-scheduler nova-compute euca2ools unzip
It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
Step 2 Set up configuration file (installed in /etc/nova)
---------------------------------------------------------
1. Nova development has consolidated all config files to nova.conf as of November 2010. There is a default set of options that are already configured in nova.conf:
::
--daemonize=1
--dhcpbridge_flagfile=/etc/nova/nova.conf
--dhcpbridge=/usr/bin/nova-dhcpbridge
--logdir=/var/log/nova
--state_path=/var/lib/nova
The following items ALSO need to be defined in /etc/nova/nova.conf. Ive added some explanation of the variables, as comments CANNOT be in nova.conf. There seems to be an issue with nova-manage not processing the comments/whitespace correctly:
--sql_connection ### Location of Nova SQL DB
--s3_host ### This is where Nova is hosting the objectstore service, which will contain the VM images and buckets
--rabbit_host ### This is where the rabbit AMQP messaging service is hosted
--cc_host ### This is where the the nova-api service lives
--verbose ### Optional but very helpful during initial setup
--ec2_url ### The location to interface nova-api
--network_manager ### Many options here, discussed below. This is how your controller will communicate with additional Nova nodes and VMs:
nova.network.manager.FlatManager # Simple, no-vlan networking type
nova.network.manager. FlatDHCPManager # Flat networking with DHCP
nova.network.manager.VlanManager # Vlan networking with DHCP /DEFAULT/ if no network manager is defined in nova.conf
--fixed_range=<network/prefix> ### This will be the IP network that ALL the projects for future VM guests will reside on. E.g. 192.168.0.0/12
--network_size=<# of addrs> ### This is the total number of IP Addrs to use for VM guests, of all projects. E.g. 5000
The following code can be cut and paste, and edited to your setup:
Note: CC_ADDR=<the external IP address of your cloud controller>
Detailed explanation of the following example is available above.
::
--sql_connection=mysql://root:nova@<CC_ADDR>/nova
--s3_host=<CC_ADDR>
--rabbit_host=<CC_ADDR>
--cc_host=<CC_ADDR>
--verbose
--ec2_url=http://<CC_ADDR>:8773/services/Cloud
--network_manager=nova.network.manager.VlanManager
--fixed_range=<network/prefix>
--network_size=<# of addrs>
2. Create a “nova” group, and set permissions::
addgroup nova
The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. ::
chown -R root:nova /etc/nova
chmod 644 /etc/nova/nova.conf
Step 3 - Setup the SQL DB (MySQL for this setup)
------------------------------------------------
1. First you 'preseed' to bypass all the installation prompts::
bash
MYSQL_PASS=nova
cat <<MYSQL_PRESEED | debconf-set-selections
mysql-server-5.1 mysql-server/root_password password $MYSQL_PASS
mysql-server-5.1 mysql-server/root_password_again password $MYSQL_PASS
mysql-server-5.1 mysql-server/start_on_boot boolean true
MYSQL_PRESEED
2. Install MySQL::
apt-get install -y mysql-server
3. Edit /etc/mysql/my.cnf to change bind-address from localhost to any::
sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
service mysql restart
4. MySQL DB configuration:
Create NOVA database::
mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;'
Update the DB to include user 'root'@'%' with super user privileges::
mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;"
Set mySQL root password::
mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');"
Compute Node Installation
`````````````````````````
Repeat steps 1 and 2 from the Cloud Controller Installation section above, then configure the network for your Compute instances on the Compute node. Copy the nova.conf file from the Cloud Controller node to this node.
Network Configuration
---------------------
If you use FlatManager as your network manager (as opposed to VlanManager that is shown in the nova.conf example above), there are some additional networking changes youll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as its set up for you automatically.
Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following::
< begin /etc/network/interfaces >
# The loopback network interface
auto lo
iface lo inet loopback
# Networking for NOVA
auto br100
iface br100 inet dhcp
bridge_ports eth0
bridge_stp off
bridge_maxwait 0
bridge_fd 0
< end /etc/network/interfaces >
Next, restart networking to apply the changes::
sudo /etc/init.d/networking restart
Configuration
`````````````
On the Compute node, you should continue with these configuration steps.
Step 1 - Set up the Nova environment
------------------------------------
These are the commands you run to update the database if needed, and then set up a user and project::
/usr/bin/python /usr/bin/nova-manage db sync
/usr/bin/python /usr/bin/nova-manage user admin <user_name>
/usr/bin/python /usr/bin/nova-manage project create <project_name> <user_name>
/usr/bin/python /usr/bin/nova-manage network create <project-network> <number-of-networks-in-project> <IPs in project>
Here is an example of what this looks like with real data::
/usr/bin/python /usr/bin/nova-manage db sync
/usr/bin/python /usr/bin/nova-manage user admin dub
/usr/bin/python /usr/bin/nova-manage project create dubproject dub
/usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255
(I chose a /24 since that falls inside my /12 range I set in fixed-range in nova.conf. Currently, there can only be one network, and I am using the max IPs available in a /24. You can choose to use any valid amount that you would like.)
Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
On running the "nova-manage network create" command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. You only need to mark the network as a bridge if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
Step 2 - Create Nova certifications
-----------------------------------
1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions.
::
mkdir p /root/creds
/usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip
2. Unzip them in your home directory, and add them to your environment.
::
unzip /root/creds/novacreds.zip -d /root/creds/
cat /root/creds/novarc >> ~/.bashrc
source ~/.bashrc
Step 3 - Restart all relevant services
--------------------------------------
Restart all six services in total, just to cover the entire spectrum::
libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart
Step 4 - Closing steps, and cleaning up
---------------------------------------
One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs::
euca-authorize -P icmp -t -1:-1 default
euca-authorize -P tcp -p 22 default
Another common issue is you cannot ping or SSH your instances after issusing the 'euca-authorize' commands. Something to look at is the amount of 'dnsmasq' processes that are running. If you have a running instance, check to see that TWO 'dnsmasq' processes are running. If not, perform the following::
killall dnsmasq
service nova-network restart
To avoid issues with KVM and permissions with Nova, run the following commands to ensure we have VM's that are running optimally::
chgrp kvm /dev/kvm
chmod g+rwx /dev/kvm
If you want to use the 10.04 Ubuntu Enterprise Cloud images that are readily available at http://uec-images.ubuntu.com/releases/10.04/release/, you may run into delays with booting. Any server that does not have nova-api running on it needs this iptables entry so that UEC images can get metadata info. On compute nodes, configure the iptables with this next step::
# iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773
Testing the Installation
````````````````````````
You can confirm that your compute node is talking to your cloud controller. From the cloud controller, run this database query::
mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;'
In return, you should see something similar to this::
+---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
| created_at | updated_at | deleted_at | deleted | id | host | binary | topic | report_count | disabled | availability_zone |
+---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
| 2011-01-28 22:52:46 | 2011-02-03 06:55:48 | NULL | 0 | 1 | osdemo02 | nova-network | network | 46064 | 0 | nova |
| 2011-01-28 22:52:48 | 2011-02-03 06:55:57 | NULL | 0 | 2 | osdemo02 | nova-compute | compute | 46056 | 0 | nova |
| 2011-01-28 22:52:52 | 2011-02-03 06:55:50 | NULL | 0 | 3 | osdemo02 | nova-scheduler | scheduler | 46065 | 0 | nova |
| 2011-01-29 23:49:29 | 2011-02-03 06:54:26 | NULL | 0 | 4 | osdemo01 | nova-compute | compute | 37050 | 0 | nova |
| 2011-01-30 23:42:24 | 2011-02-03 06:55:44 | NULL | 0 | 9 | osdemo04 | nova-compute | compute | 28484 | 0 | nova |
| 2011-01-30 21:27:28 | 2011-02-03 06:54:23 | NULL | 0 | 8 | osdemo05 | nova-compute | compute | 29284 | 0 | nova |
+---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
You can see that 'osdemo0{1,2,4,5} are all running 'nova-compute.' When you start spinning up instances, they will allocate on any node that is running nova-compute from this list.
You can then use `euca2ools` to test some items::
euca-describe-images
euca-describe-instances
If you have issues with the API key, you may need to re-source your creds file::
. /root/creds/novarc
If you dont get any immediate errors, youre successfully making calls to your cloud!
Spinning up a VM for Testing
````````````````````````````
(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.)
The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM.
UPDATE: Due to `bug 661159 <https://bugs.launchpad.net/nova/+bug/661159>`_, we cant use images without ramdisks yet, so we cant use the classic Ubuntu cloud images from http://uec-images.ubuntu.com/releases/ yet. For the sake of this tutorial, well use the `ttylinux images from Scott Moser instead <http://smoser.brickies.net/ubuntu/ttylinux-uec/>`_.
Download the image, and publish to your bucket:
::
image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz"
wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image
uec-publish-tarball $image mybucket
This will output three references, an "emi", an "eri" and an "eki." (Image, ramdisk, and kernel) The emi is the one we use to launch instances, so take note of this.
Create a keypair to SSH to the server:
::
euca-add-keypair mykey > mykey.priv
chmod 0600 mykey.priv
Boot your instance:
::
euca-run-instances $emi -k mykey -t m1.tiny
($emi is replaced with the output from the previous command)
Checking status, and confirming communication:
Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM.
::
euca-describe-instances
Once in a "running" state, you can use your SSH key connect:
::
ssh -i mykey.priv root@$ipaddress
When you are ready to terminate the instance, you may do so with the `euca-terminate-instances` command:
::
euca-terminate-instances $instance-id
You can determine the instance-id with `euca-describe-instances`, and the format is "i-" with a series of letter and numbers following: e.g. i-a4g9d.
For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information!
Enjoy your new private cloud, and play responsibly!

View File

@@ -1,362 +0,0 @@
Installing Nova on a Single Host
================================
Nova can be run on a single machine, and it is recommended that new users practice managing this type of installation before graduating to multi node systems.
The fastest way to get a test cloud running is through our :doc:`../quickstart`. But for more detail on installing the system read this doc.
Step 1 and 2: Get the latest Nova code system software
------------------------------------------------------
Depending on your system, the method for accomplishing this varies
.. toctree::
:maxdepth: 1
distros/ubuntu.10.04
distros/ubuntu.10.10
distros/others
Step 3: Build and install Nova services
---------------------------------------
Switch to the base nova source directory.
Then type or copy/paste in the following line to compile the Python code for OpenStack Compute.
::
sudo python setup.py build
sudo python setup.py install
When the installation is complete, you'll see the following lines:
::
Installing nova-network script to /usr/local/bin
Installing nova-volume script to /usr/local/bin
Installing nova-objectstore script to /usr/local/bin
Installing nova-manage script to /usr/local/bin
Installing nova-scheduler script to /usr/local/bin
Installing nova-dhcpbridge script to /usr/local/bin
Installing nova-compute script to /usr/local/bin
Installing nova-instancemonitor script to /usr/local/bin
Installing nova-api script to /usr/local/bin
Installing nova-import-canonical-imagestore script to /usr/local/bin
Installed /usr/local/lib/python2.6/dist-packages/nova-2010.1-py2.6.egg
Processing dependencies for nova==2010.1
Finished processing dependencies for nova==2010.1
Step 4: Create the Nova Database
--------------------------------
Type or copy/paste in the following line to create your nova db::
sudo nova-manage db sync
Step 5: Create a Nova administrator
-----------------------------------
Type or copy/paste in the following line to create a user named "anne."::
sudo nova-manage user admin anne
You see an access key and a secret key export, such as these made-up ones:::
export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd
export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7
Step 6: Create the network
--------------------------
Type or copy/paste in the following line to create a network prior to creating a project.
::
sudo nova-manage network create 10.0.0.0/8 1 64
For this command, the IP address is the cidr notation for your netmask, such as 192.168.1.0/24. The value 1 is the total number of networks you want made, and the 64 value is the total number of ips in all networks.
After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database.
Step 7: Create a project with the user you created
--------------------------------------------------
Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne.
::
sudo nova-manage project create IRT anne
::
Generating RSA private key, 1024 bit long modulus
.....++++++
..++++++
e is 65537 (0x10001)
Using configuration from ./openssl.cnf
Check that the request matches the signature
Signature ok
The Subject's Distinguished Name is as follows
countryName :PRINTABLE:'US'
stateOrProvinceName :PRINTABLE:'California'
localityName :PRINTABLE:'MountainView'
organizationName :PRINTABLE:'AnsoLabs'
organizationalUnitName:PRINTABLE:'NovaDev'
commonName :PRINTABLE:'anne-2010-10-12T21:12:35Z'
Certificate is to be certified until Oct 12 21:12:35 2011 GMT (365 days)
Write out database with 1 new entries
Data Base Updated
Step 8: Unzip the nova.zip
--------------------------
You should have a nova.zip file in your current working directory. Unzip it with this command:
::
unzip nova.zip
You'll see these files extract.
::
Archive: nova.zip
extracting: novarc
extracting: pk.pem
extracting: cert.pem
extracting: nova-vpn.conf
extracting: cacert.pem
Step 9: Source the rc file
--------------------------
Type or copy/paste the following to source the novarc file in your current working directory.
::
. novarc
Step 10: Pat yourself on the back :)
-----------------------------------
Congratulations, your cloud is up and running, youve created an admin user, created a network, retrieved the user's credentials and put them in your environment.
Now you need an image.
Step 11: Get an image
--------------------
To make things easier, we've provided a small image on the Rackspace CDN. Use this command to get it on your server.
::
wget http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
::
--2010-10-12 21:40:55-- http://c2477062.cdn.cloudfiles.rackspacecloud.com/images.tgz
Resolving cblah2.cdn.cloudfiles.rackspacecloud.com... 208.111.196.6, 208.111.196.7
Connecting to cblah2.cdn.cloudfiles.rackspacecloud.com|208.111.196.6|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 58520278 (56M) [application/x-gzip]
Saving to: `images.tgz'
100%[======================================>] 58,520,278 14.1M/s in 3.9s
2010-10-12 21:40:59 (14.1 MB/s) - `images.tgz' saved [58520278/58520278]
Step 12: Decompress the image file
----------------------------------
Use this command to extract the image files:::
tar xvzf images.tgz
You get a directory listing like so:::
images
|-- aki-lucid
| |-- image
| `-- info.json
|-- ami-tiny
| |-- image
| `-- info.json
`-- ari-lucid
|-- image
`-- info.json
Step 13: Send commands to upload sample image to the cloud
----------------------------------------------------------
Type or copy/paste the following commands to create a manifest for the kernel.::
euca-bundle-image -i images/aki-lucid/image -p kernel --kernel true
You should see this in response:::
Checking image
Tarring image
Encrypting image
Splitting image...
Part: kernel.part.0
Generating manifest /tmp/kernel.manifest.xml
Type or copy/paste the following commands to create a manifest for the ramdisk.::
euca-bundle-image -i images/ari-lucid/image -p ramdisk --ramdisk true
You should see this in response:::
Checking image
Tarring image
Encrypting image
Splitting image...
Part: ramdisk.part.0
Generating manifest /tmp/ramdisk.manifest.xml
Type or copy/paste the following commands to upload the kernel bundle.::
euca-upload-bundle -m /tmp/kernel.manifest.xml -b mybucket
You should see this in response:::
Checking bucket: mybucket
Creating bucket: mybucket
Uploading manifest file
Uploading part: kernel.part.0
Uploaded image as mybucket/kernel.manifest.xml
Type or copy/paste the following commands to upload the ramdisk bundle.::
euca-upload-bundle -m /tmp/ramdisk.manifest.xml -b mybucket
You should see this in response:::
Checking bucket: mybucket
Uploading manifest file
Uploading part: ramdisk.part.0
Uploaded image as mybucket/ramdisk.manifest.xml
Type or copy/paste the following commands to register the kernel and get its ID.::
euca-register mybucket/kernel.manifest.xml
You should see this in response:::
IMAGE ami-fcbj2non
Type or copy/paste the following commands to register the ramdisk and get its ID.::
euca-register mybucket/ramdisk.manifest.xml
You should see this in response:::
IMAGE ami-orukptrc
Type or copy/paste the following commands to create a manifest for the machine image associated with the ramdisk and kernel IDs that you got from the previous commands.::
euca-bundle-image -i images/ami-tiny/image -p machine --kernel ami-fcbj2non --ramdisk ami-orukptrc
You should see this in response:::
Checking image
Tarring image
Encrypting image
Splitting image...
Part: machine.part.0
Part: machine.part.1
Part: machine.part.2
Part: machine.part.3
Part: machine.part.4
Generating manifest /tmp/machine.manifest.xml
Type or copy/paste the following commands to upload the machine image bundle.::
euca-upload-bundle -m /tmp/machine.manifest.xml -b mybucket
You should see this in response:::
Checking bucket: mybucket
Uploading manifest file
Uploading part: machine.part.0
Uploading part: machine.part.1
Uploading part: machine.part.2
Uploading part: machine.part.3
Uploading part: machine.part.4
Uploaded image as mybucket/machine.manifest.xml
Type or copy/paste the following commands to register the machine image and get its ID.::
euca-register mybucket/machine.manifest.xml
You should see this in response:::
IMAGE ami-g06qbntt
Type or copy/paste the following commands to register a SSH keypair for use in starting and accessing the instances.::
euca-add-keypair mykey > mykey.priv
chmod 600 mykey.priv
Type or copy/paste the following commands to run an instance using the keypair and IDs that we previously created.::
euca-run-instances ami-g06qbntt --kernel ami-fcbj2non --ramdisk ami-orukptrc -k mykey
You should see this in response:::
RESERVATION r-0at28z12 IRT
INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 scheduling mykey (IRT, None) m1.small 2010-10-18 19:02:10.443599
Type or copy/paste the following commands to watch as the scheduler launches, and completes booting your instance.::
euca-describe-instances
You should see this in response:::
RESERVATION r-0at28z12 IRT
INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 launching mykey (IRT, cloud02) m1.small 2010-10-18 19:02:10.443599
Type or copy/paste the following commands to see when loading is completed and the instance is running.::
euca-describe-instances
You should see this in response:::
RESERVATION r-0at28z12 IRT
INSTANCE i-1b0bh8n ami-g06qbntt 10.0.0.3 10.0.0.3 running mykey (IRT, cloud02) 0 m1.small 2010-10-18 19:02:10.443599
Type or copy/paste the following commands to check that the virtual machine is running.::
virsh list
You should see this in response:::
Id Name State
----------------------------------
1 2842445831 running
Type or copy/paste the following commands to ssh to the instance using your private key.::
ssh -i mykey.priv root@10.0.0.3
Troubleshooting Installation
----------------------------
If you see an "error loading the config file './openssl.cnf'" it means you can copy the openssl.cnf file to the location where Nova expects it and reboot, then try the command again.
::
cp /etc/ssl/openssl.cnf ~
sudo reboot

View File

@@ -0,0 +1,138 @@
.. toctree::
:maxdepth: 1
nova..adminclient.rst
nova..api.direct.rst
nova..api.ec2.admin.rst
nova..api.ec2.apirequest.rst
nova..api.ec2.cloud.rst
nova..api.ec2.metadatarequesthandler.rst
nova..api.openstack.auth.rst
nova..api.openstack.backup_schedules.rst
nova..api.openstack.common.rst
nova..api.openstack.consoles.rst
nova..api.openstack.faults.rst
nova..api.openstack.flavors.rst
nova..api.openstack.images.rst
nova..api.openstack.servers.rst
nova..api.openstack.shared_ip_groups.rst
nova..api.openstack.zones.rst
nova..auth.dbdriver.rst
nova..auth.fakeldap.rst
nova..auth.ldapdriver.rst
nova..auth.manager.rst
nova..auth.signer.rst
nova..cloudpipe.pipelib.rst
nova..compute.api.rst
nova..compute.instance_types.rst
nova..compute.manager.rst
nova..compute.monitor.rst
nova..compute.power_state.rst
nova..console.api.rst
nova..console.fake.rst
nova..console.manager.rst
nova..console.xvp.rst
nova..context.rst
nova..crypto.rst
nova..db.api.rst
nova..db.base.rst
nova..db.migration.rst
nova..db.sqlalchemy.api.rst
nova..db.sqlalchemy.migrate_repo.manage.rst
nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst
nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst
nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst
nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst
nova..db.sqlalchemy.migration.rst
nova..db.sqlalchemy.models.rst
nova..db.sqlalchemy.session.rst
nova..exception.rst
nova..fakememcache.rst
nova..fakerabbit.rst
nova..flags.rst
nova..image.glance.rst
nova..image.local.rst
nova..image.s3.rst
nova..image.service.rst
nova..log.rst
nova..manager.rst
nova..network.api.rst
nova..network.linux_net.rst
nova..network.manager.rst
nova..objectstore.bucket.rst
nova..objectstore.handler.rst
nova..objectstore.image.rst
nova..objectstore.stored.rst
nova..quota.rst
nova..rpc.rst
nova..scheduler.chance.rst
nova..scheduler.driver.rst
nova..scheduler.manager.rst
nova..scheduler.simple.rst
nova..scheduler.zone.rst
nova..service.rst
nova..test.rst
nova..tests.api.openstack.fakes.rst
nova..tests.api.openstack.test_adminapi.rst
nova..tests.api.openstack.test_api.rst
nova..tests.api.openstack.test_auth.rst
nova..tests.api.openstack.test_common.rst
nova..tests.api.openstack.test_faults.rst
nova..tests.api.openstack.test_flavors.rst
nova..tests.api.openstack.test_images.rst
nova..tests.api.openstack.test_ratelimiting.rst
nova..tests.api.openstack.test_servers.rst
nova..tests.api.openstack.test_shared_ip_groups.rst
nova..tests.api.openstack.test_zones.rst
nova..tests.api.test_wsgi.rst
nova..tests.db.fakes.rst
nova..tests.declare_flags.rst
nova..tests.fake_flags.rst
nova..tests.glance.stubs.rst
nova..tests.hyperv_unittest.rst
nova..tests.objectstore_unittest.rst
nova..tests.real_flags.rst
nova..tests.runtime_flags.rst
nova..tests.test_access.rst
nova..tests.test_api.rst
nova..tests.test_auth.rst
nova..tests.test_cloud.rst
nova..tests.test_compute.rst
nova..tests.test_console.rst
nova..tests.test_direct.rst
nova..tests.test_flags.rst
nova..tests.test_localization.rst
nova..tests.test_log.rst
nova..tests.test_middleware.rst
nova..tests.test_misc.rst
nova..tests.test_network.rst
nova..tests.test_quota.rst
nova..tests.test_rpc.rst
nova..tests.test_scheduler.rst
nova..tests.test_service.rst
nova..tests.test_twistd.rst
nova..tests.test_virt.rst
nova..tests.test_volume.rst
nova..tests.test_xenapi.rst
nova..tests.xenapi.stubs.rst
nova..twistd.rst
nova..utils.rst
nova..version.rst
nova..virt.connection.rst
nova..virt.disk.rst
nova..virt.fake.rst
nova..virt.hyperv.rst
nova..virt.images.rst
nova..virt.libvirt_conn.rst
nova..virt.xenapi.fake.rst
nova..virt.xenapi.network_utils.rst
nova..virt.xenapi.vm_utils.rst
nova..virt.xenapi.vmops.rst
nova..virt.xenapi.volume_utils.rst
nova..virt.xenapi.volumeops.rst
nova..virt.xenapi_conn.rst
nova..volume.api.rst
nova..volume.driver.rst
nova..volume.manager.rst
nova..volume.san.rst
nova..wsgi.rst

View File

@@ -0,0 +1,6 @@
The :mod:`nova..adminclient` Module
==============================================================================
.. automodule:: nova..adminclient
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.direct` Module
==============================================================================
.. automodule:: nova..api.direct
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.ec2.admin` Module
==============================================================================
.. automodule:: nova..api.ec2.admin
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.ec2.apirequest` Module
==============================================================================
.. automodule:: nova..api.ec2.apirequest
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.ec2.cloud` Module
==============================================================================
.. automodule:: nova..api.ec2.cloud
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.ec2.metadatarequesthandler` Module
==============================================================================
.. automodule:: nova..api.ec2.metadatarequesthandler
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.auth` Module
==============================================================================
.. automodule:: nova..api.openstack.auth
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.backup_schedules` Module
==============================================================================
.. automodule:: nova..api.openstack.backup_schedules
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.common` Module
==============================================================================
.. automodule:: nova..api.openstack.common
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.consoles` Module
==============================================================================
.. automodule:: nova..api.openstack.consoles
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.faults` Module
==============================================================================
.. automodule:: nova..api.openstack.faults
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.flavors` Module
==============================================================================
.. automodule:: nova..api.openstack.flavors
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.images` Module
==============================================================================
.. automodule:: nova..api.openstack.images
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.servers` Module
==============================================================================
.. automodule:: nova..api.openstack.servers
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.shared_ip_groups` Module
==============================================================================
.. automodule:: nova..api.openstack.shared_ip_groups
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..api.openstack.zones` Module
==============================================================================
.. automodule:: nova..api.openstack.zones
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..auth.dbdriver` Module
==============================================================================
.. automodule:: nova..auth.dbdriver
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..auth.fakeldap` Module
==============================================================================
.. automodule:: nova..auth.fakeldap
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..auth.ldapdriver` Module
==============================================================================
.. automodule:: nova..auth.ldapdriver
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..auth.manager` Module
==============================================================================
.. automodule:: nova..auth.manager
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..auth.signer` Module
==============================================================================
.. automodule:: nova..auth.signer
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..cloudpipe.pipelib` Module
==============================================================================
.. automodule:: nova..cloudpipe.pipelib
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..compute.api` Module
==============================================================================
.. automodule:: nova..compute.api
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..compute.instance_types` Module
==============================================================================
.. automodule:: nova..compute.instance_types
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..compute.manager` Module
==============================================================================
.. automodule:: nova..compute.manager
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..compute.monitor` Module
==============================================================================
.. automodule:: nova..compute.monitor
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..compute.power_state` Module
==============================================================================
.. automodule:: nova..compute.power_state
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..console.api` Module
==============================================================================
.. automodule:: nova..console.api
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..console.fake` Module
==============================================================================
.. automodule:: nova..console.fake
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..console.manager` Module
==============================================================================
.. automodule:: nova..console.manager
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..console.xvp` Module
==============================================================================
.. automodule:: nova..console.xvp
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..context` Module
==============================================================================
.. automodule:: nova..context
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..crypto` Module
==============================================================================
.. automodule:: nova..crypto
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.api` Module
==============================================================================
.. automodule:: nova..db.api
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.base` Module
==============================================================================
.. automodule:: nova..db.base
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.migration` Module
==============================================================================
.. automodule:: nova..db.migration
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.sqlalchemy.api` Module
==============================================================================
.. automodule:: nova..db.sqlalchemy.api
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.sqlalchemy.migrate_repo.manage` Module
==============================================================================
.. automodule:: nova..db.sqlalchemy.migrate_repo.manage
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.sqlalchemy.migrate_repo.versions.001_austin` Module
==============================================================================
.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.001_austin
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.sqlalchemy.migrate_repo.versions.002_bexar` Module
==============================================================================
.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.002_bexar
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks` Module
==============================================================================
.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks
:members:
:undoc-members:
:show-inheritance:

View File

@@ -0,0 +1,6 @@
The :mod:`nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables` Module
==============================================================================
.. automodule:: nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables
:members:
:undoc-members:
:show-inheritance:

Some files were not shown because too many files have changed in this diff Show More