Add sample puppet scripts.

This commit is contained in:
Todd Willey 2010-11-12 14:07:46 -05:00
parent 0f5eddf051
commit 028ce51f9f
31 changed files with 2315 additions and 0 deletions

View File

@ -0,0 +1 @@
ENABLED=true

View File

@ -0,0 +1 @@
ENABLED=true

View File

@ -0,0 +1,5 @@
-----------------------------------------------
Welcome to your OpenStack installation!
-----------------------------------------------

View File

@ -0,0 +1,170 @@
# Master configuration file for the QEMU driver.
# All settings described here are optional - if omitted, sensible
# defaults are used.
# VNC is configured to listen on 127.0.0.1 by default.
# To make it listen on all public interfaces, uncomment
# this next option.
#
# NB, strong recommendation to enable TLS + x509 certificate
# verification when allowing public access
#
# vnc_listen = "0.0.0.0"
# Enable use of TLS encryption on the VNC server. This requires
# a VNC client which supports the VeNCrypt protocol extension.
# Examples include vinagre, virt-viewer, virt-manager and vencrypt
# itself. UltraVNC, RealVNC, TightVNC do not support this
#
# It is necessary to setup CA and issue a server certificate
# before enabling this.
#
# vnc_tls = 1
# Use of TLS requires that x509 certificates be issued. The
# default it to keep them in /etc/pki/libvirt-vnc. This directory
# must contain
#
# ca-cert.pem - the CA master certificate
# server-cert.pem - the server certificate signed with ca-cert.pem
# server-key.pem - the server private key
#
# This option allows the certificate directory to be changed
#
# vnc_tls_x509_cert_dir = "/etc/pki/libvirt-vnc"
# The default TLS configuration only uses certificates for the server
# allowing the client to verify the server's identity and establish
# and encrypted channel.
#
# It is possible to use x509 certificates for authentication too, by
# issuing a x509 certificate to every client who needs to connect.
#
# Enabling this option will reject any client who does not have a
# certificate signed by the CA in /etc/pki/libvirt-vnc/ca-cert.pem
#
# vnc_tls_x509_verify = 1
# The default VNC password. Only 8 letters are significant for
# VNC passwords. This parameter is only used if the per-domain
# XML config does not already provide a password. To allow
# access without passwords, leave this commented out. An empty
# string will still enable passwords, but be rejected by QEMU
# effectively preventing any use of VNC. Obviously change this
# example here before you set this
#
# vnc_password = "XYZ12345"
# Enable use of SASL encryption on the VNC server. This requires
# a VNC client which supports the SASL protocol extension.
# Examples include vinagre, virt-viewer and virt-manager
# itself. UltraVNC, RealVNC, TightVNC do not support this
#
# It is necessary to configure /etc/sasl2/qemu.conf to choose
# the desired SASL plugin (eg, GSSPI for Kerberos)
#
# vnc_sasl = 1
# The default SASL configuration file is located in /etc/sasl2/
# When running libvirtd unprivileged, it may be desirable to
# override the configs in this location. Set this parameter to
# point to the directory, and create a qemu.conf in that location
#
# vnc_sasl_dir = "/some/directory/sasl2"
# The default security driver is SELinux. If SELinux is disabled
# on the host, then the security driver will automatically disable
# itself. If you wish to disable QEMU SELinux security driver while
# leaving SELinux enabled for the host in general, then set this
# to 'none' instead
#
# security_driver = "selinux"
# The user ID for QEMU processes run by the system instance
user = "root"
# The group ID for QEMU processes run by the system instance
group = "root"
# Whether libvirt should dynamically change file ownership
# to match the configured user/group above. Defaults to 1.
# Set to 0 to disable file ownership changes.
#dynamic_ownership = 1
# What cgroup controllers to make use of with QEMU guests
#
# - 'cpu' - use for schedular tunables
# - 'devices' - use for device whitelisting
#
# NB, even if configured here, they won't be used unless
# the adminsitrator has mounted cgroups. eg
#
# mkdir /dev/cgroup
# mount -t cgroup -o devices,cpu none /dev/cgroup
#
# They can be mounted anywhere, and different controlers
# can be mounted in different locations. libvirt will detect
# where they are located.
#
# cgroup_controllers = [ "cpu", "devices" ]
# This is the basic set of devices allowed / required by
# all virtual machines.
#
# As well as this, any configured block backed disks,
# all sound device, and all PTY devices are allowed.
#
# This will only need setting if newer QEMU suddenly
# wants some device we don't already know a bout.
#
#cgroup_device_acl = [
# "/dev/null", "/dev/full", "/dev/zero",
# "/dev/random", "/dev/urandom",
# "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
# "/dev/rtc", "/dev/hpet", "/dev/net/tun",
#]
# The default format for Qemu/KVM guest save images is raw; that is, the
# memory from the domain is dumped out directly to a file. If you have
# guests with a large amount of memory, however, this can take up quite
# a bit of space. If you would like to compress the images while they
# are being saved to disk, you can also set "lzop", "gzip", "bzip2", or "xz"
# for save_image_format. Note that this means you slow down the process of
# saving a domain in order to save disk space; the list above is in descending
# order by performance and ascending order by compression ratio.
#
# save_image_format = "raw"
# If provided by the host and a hugetlbfs mount point is configured,
# a guest may request huge page backing. When this mount point is
# unspecified here, determination of a host mount point in /proc/mounts
# will be attempted. Specifying an explicit mount overrides detection
# of the same in /proc/mounts. Setting the mount point to "" will
# disable guest hugepage backing.
#
# NB, within this mount point, guests will create memory backing files
# in a location of $MOUNTPOINT/libvirt/qemu
# hugetlbfs_mount = "/dev/hugepages"
# mac_filter enables MAC addressed based filtering on bridge ports.
# This currently requires ebtables to be installed.
#
# mac_filter = 1
# By default, PCI devices below non-ACS switch are not allowed to be assigned
# to guests. By setting relaxed_acs_check to 1 such devices will be allowed to
# be assigned to guests.
#
# relaxed_acs_check = 1

View File

@ -0,0 +1,463 @@
# This is an example configuration file for the LVM2 system.
# It contains the default settings that would be used if there was no
# /etc/lvm/lvm.conf file.
#
# Refer to 'man lvm.conf' for further information including the file layout.
#
# To put this file in a different directory and override /etc/lvm set
# the environment variable LVM_SYSTEM_DIR before running the tools.
# This section allows you to configure which block devices should
# be used by the LVM system.
devices {
# Where do you want your volume groups to appear ?
dir = "/dev"
# An array of directories that contain the device nodes you wish
# to use with LVM2.
scan = [ "/dev" ]
# If several entries in the scanned directories correspond to the
# same block device and the tools need to display a name for device,
# all the pathnames are matched against each item in the following
# list of regular expressions in turn and the first match is used.
preferred_names = [ ]
# Try to avoid using undescriptive /dev/dm-N names, if present.
# preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
# A filter that tells LVM2 to only use a restricted set of devices.
# The filter consists of an array of regular expressions. These
# expressions can be delimited by a character of your choice, and
# prefixed with either an 'a' (for accept) or 'r' (for reject).
# The first expression found to match a device name determines if
# the device will be accepted or rejected (ignored). Devices that
# don't match any patterns are accepted.
# Be careful if there there are symbolic links or multiple filesystem
# entries for the same device as each name is checked separately against
# the list of patterns. The effect is that if any name matches any 'a'
# pattern, the device is accepted; otherwise if any name matches any 'r'
# pattern it is rejected; otherwise it is accepted.
# Don't have more than one filter line active at once: only one gets used.
# Run vgscan after you change this parameter to ensure that
# the cache file gets regenerated (see below).
# If it doesn't do what you expect, check the output of 'vgscan -vvvv'.
# By default we accept every block device:
filter = [ "r|/dev/etherd/.*|", "r|/dev/block/.*|", "a/.*/" ]
# Exclude the cdrom drive
# filter = [ "r|/dev/cdrom|" ]
# When testing I like to work with just loopback devices:
# filter = [ "a/loop/", "r/.*/" ]
# Or maybe all loops and ide drives except hdc:
# filter =[ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
# Use anchors if you want to be really specific
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
# The results of the filtering are cached on disk to avoid
# rescanning dud devices (which can take a very long time).
# By default this cache is stored in the /etc/lvm/cache directory
# in a file called '.cache'.
# It is safe to delete the contents: the tools regenerate it.
# (The old setting 'cache' is still respected if neither of
# these new ones is present.)
cache_dir = "/etc/lvm/cache"
cache_file_prefix = ""
# You can turn off writing this cache file by setting this to 0.
write_cache_state = 1
# Advanced settings.
# List of pairs of additional acceptable block device types found
# in /proc/devices with maximum (non-zero) number of partitions.
# types = [ "fd", 16 ]
# If sysfs is mounted (2.6 kernels) restrict device scanning to
# the block devices it believes are valid.
# 1 enables; 0 disables.
sysfs_scan = 1
# By default, LVM2 will ignore devices used as components of
# software RAID (md) devices by looking for md superblocks.
# 1 enables; 0 disables.
md_component_detection = 1
# By default, if a PV is placed directly upon an md device, LVM2
# will align its data blocks with the md device's stripe-width.
# 1 enables; 0 disables.
md_chunk_alignment = 1
# By default, the start of a PV's data area will be a multiple of
# the 'minimum_io_size' or 'optimal_io_size' exposed in sysfs.
# - minimum_io_size - the smallest request the device can perform
# w/o incurring a read-modify-write penalty (e.g. MD's chunk size)
# - optimal_io_size - the device's preferred unit of receiving I/O
# (e.g. MD's stripe width)
# minimum_io_size is used if optimal_io_size is undefined (0).
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
# This setting takes precedence over md_chunk_alignment.
# 1 enables; 0 disables.
data_alignment_detection = 1
# Alignment (in KB) of start of data area when creating a new PV.
# If a PV is placed directly upon an md device and md_chunk_alignment or
# data_alignment_detection is enabled this parameter is ignored.
# Set to 0 for the default alignment of 64KB or page size, if larger.
data_alignment = 0
# By default, the start of the PV's aligned data area will be shifted by
# the 'alignment_offset' exposed in sysfs. This offset is often 0 but
# may be non-zero; e.g.: certain 4KB sector drives that compensate for
# windows partitioning will have an alignment_offset of 3584 bytes
# (sector 7 is the lowest aligned logical block, the 4KB sectors start
# at LBA -1, and consequently sector 63 is aligned on a 4KB boundary).
# 1 enables; 0 disables.
data_alignment_offset_detection = 1
# If, while scanning the system for PVs, LVM2 encounters a device-mapper
# device that has its I/O suspended, it waits for it to become accessible.
# Set this to 1 to skip such devices. This should only be needed
# in recovery situations.
ignore_suspended_devices = 0
}
# This section that allows you to configure the nature of the
# information that LVM2 reports.
log {
# Controls the messages sent to stdout or stderr.
# There are three levels of verbosity, 3 being the most verbose.
verbose = 0
# Should we send log messages through syslog?
# 1 is yes; 0 is no.
syslog = 1
# Should we log error and debug messages to a file?
# By default there is no log file.
#file = "/var/log/lvm2.log"
# Should we overwrite the log file each time the program is run?
# By default we append.
overwrite = 0
# What level of log messages should we send to the log file and/or syslog?
# There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
# 7 is the most verbose (LOG_DEBUG).
level = 0
# Format of output messages
# Whether or not (1 or 0) to indent messages according to their severity
indent = 1
# Whether or not (1 or 0) to display the command name on each line output
command_names = 0
# A prefix to use before the message text (but after the command name,
# if selected). Default is two spaces, so you can see/grep the severity
# of each message.
prefix = " "
# To make the messages look similar to the original LVM tools use:
# indent = 0
# command_names = 1
# prefix = " -- "
# Set this if you want log messages during activation.
# Don't use this in low memory situations (can deadlock).
# activation = 0
}
# Configuration of metadata backups and archiving. In LVM2 when we
# talk about a 'backup' we mean making a copy of the metadata for the
# *current* system. The 'archive' contains old metadata configurations.
# Backups are stored in a human readeable text format.
backup {
# Should we maintain a backup of the current metadata configuration ?
# Use 1 for Yes; 0 for No.
# Think very hard before turning this off!
backup = 1
# Where shall we keep it ?
# Remember to back up this directory regularly!
backup_dir = "/etc/lvm/backup"
# Should we maintain an archive of old metadata configurations.
# Use 1 for Yes; 0 for No.
# On by default. Think very hard before turning this off.
archive = 1
# Where should archived files go ?
# Remember to back up this directory regularly!
archive_dir = "/etc/lvm/archive"
# What is the minimum number of archive files you wish to keep ?
retain_min = 10
# What is the minimum time you wish to keep an archive file for ?
retain_days = 30
}
# Settings for the running LVM2 in shell (readline) mode.
shell {
# Number of lines of history to store in ~/.lvm_history
history_size = 100
}
# Miscellaneous global LVM2 settings
global {
# The file creation mask for any files and directories created.
# Interpreted as octal if the first digit is zero.
umask = 077
# Allow other users to read the files
#umask = 022
# Enabling test mode means that no changes to the on disk metadata
# will be made. Equivalent to having the -t option on every
# command. Defaults to off.
test = 0
# Default value for --units argument
units = "h"
# Since version 2.02.54, the tools distinguish between powers of
# 1024 bytes (e.g. KiB, MiB, GiB) and powers of 1000 bytes (e.g.
# KB, MB, GB).
# If you have scripts that depend on the old behaviour, set this to 0
# temporarily until you update them.
si_unit_consistency = 1
# Whether or not to communicate with the kernel device-mapper.
# Set to 0 if you want to use the tools to manipulate LVM metadata
# without activating any logical volumes.
# If the device-mapper kernel driver is not present in your kernel
# setting this to 0 should suppress the error messages.
activation = 1
# If we can't communicate with device-mapper, should we try running
# the LVM1 tools?
# This option only applies to 2.4 kernels and is provided to help you
# switch between device-mapper kernels and LVM1 kernels.
# The LVM1 tools need to be installed with .lvm1 suffices
# e.g. vgscan.lvm1 and they will stop working after you start using
# the new lvm2 on-disk metadata format.
# The default value is set when the tools are built.
# fallback_to_lvm1 = 0
# The default metadata format that commands should use - "lvm1" or "lvm2".
# The command line override is -M1 or -M2.
# Defaults to "lvm2".
# format = "lvm2"
# Location of proc filesystem
proc = "/proc"
# Type of locking to use. Defaults to local file-based locking (1).
# Turn locking off by setting to 0 (dangerous: risks metadata corruption
# if LVM2 commands get run concurrently).
# Type 2 uses the external shared library locking_library.
# Type 3 uses built-in clustered locking.
# Type 4 uses read-only locking which forbids any operations that might
# change metadata.
locking_type = 1
# Set to 0 to fail when a lock request cannot be satisfied immediately.
wait_for_locks = 1
# If using external locking (type 2) and initialisation fails,
# with this set to 1 an attempt will be made to use the built-in
# clustered locking.
# If you are using a customised locking_library you should set this to 0.
fallback_to_clustered_locking = 1
# If an attempt to initialise type 2 or type 3 locking failed, perhaps
# because cluster components such as clvmd are not running, with this set
# to 1 an attempt will be made to use local file-based locking (type 1).
# If this succeeds, only commands against local volume groups will proceed.
# Volume Groups marked as clustered will be ignored.
fallback_to_local_locking = 1
# Local non-LV directory that holds file-based locks while commands are
# in progress. A directory like /tmp that may get wiped on reboot is OK.
locking_dir = "/var/lock/lvm"
# Whenever there are competing read-only and read-write access requests for
# a volume group's metadata, instead of always granting the read-only
# requests immediately, delay them to allow the read-write requests to be
# serviced. Without this setting, write access may be stalled by a high
# volume of read-only requests.
# NB. This option only affects locking_type = 1 viz. local file-based
# locking.
prioritise_write_locks = 1
# Other entries can go here to allow you to load shared libraries
# e.g. if support for LVM1 metadata was compiled as a shared library use
# format_libraries = "liblvm2format1.so"
# Full pathnames can be given.
# Search this directory first for shared libraries.
# library_dir = "/lib/lvm2"
# The external locking library to load if locking_type is set to 2.
# locking_library = "liblvm2clusterlock.so"
}
activation {
# Set to 0 to disable udev syncronisation (if compiled into the binaries).
# Processes will not wait for notification from udev.
# They will continue irrespective of any possible udev processing
# in the background. You should only use this if udev is not running
# or has rules that ignore the devices LVM2 creates.
# The command line argument --nodevsync takes precedence over this setting.
# If set to 1 when udev is not running, and there are LVM2 processes
# waiting for udev, run 'dmsetup udevcomplete_all' manually to wake them up.
udev_sync = 1
# How to fill in missing stripes if activating an incomplete volume.
# Using "error" will make inaccessible parts of the device return
# I/O errors on access. You can instead use a device path, in which
# case, that device will be used to in place of missing stripes.
# But note that using anything other than "error" with mirrored
# or snapshotted volumes is likely to result in data corruption.
missing_stripe_filler = "error"
# How much stack (in KB) to reserve for use while devices suspended
reserved_stack = 256
# How much memory (in KB) to reserve for use while devices suspended
reserved_memory = 8192
# Nice value used while devices suspended
process_priority = -18
# If volume_list is defined, each LV is only activated if there is a
# match against the list.
# "vgname" and "vgname/lvname" are matched exactly.
# "@tag" matches any tag set in the LV or VG.
# "@*" matches if any tag defined on the host is also set in the LV or VG
#
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
# Size (in KB) of each copy operation when mirroring
mirror_region_size = 512
# Setting to use when there is no readahead value stored in the metadata.
#
# "none" - Disable readahead.
# "auto" - Use default value chosen by kernel.
readahead = "auto"
# 'mirror_image_fault_policy' and 'mirror_log_fault_policy' define
# how a device failure affecting a mirror is handled.
# A mirror is composed of mirror images (copies) and a log.
# A disk log ensures that a mirror does not need to be re-synced
# (all copies made the same) every time a machine reboots or crashes.
#
# In the event of a failure, the specified policy will be used to determine
# what happens. This applies to automatic repairs (when the mirror is being
# monitored by dmeventd) and to manual lvconvert --repair when
# --use-policies is given.
#
# "remove" - Simply remove the faulty device and run without it. If
# the log device fails, the mirror would convert to using
# an in-memory log. This means the mirror will not
# remember its sync status across crashes/reboots and
# the entire mirror will be re-synced. If a
# mirror image fails, the mirror will convert to a
# non-mirrored device if there is only one remaining good
# copy.
#
# "allocate" - Remove the faulty device and try to allocate space on
# a new device to be a replacement for the failed device.
# Using this policy for the log is fast and maintains the
# ability to remember sync state through crashes/reboots.
# Using this policy for a mirror device is slow, as it
# requires the mirror to resynchronize the devices, but it
# will preserve the mirror characteristic of the device.
# This policy acts like "remove" if no suitable device and
# space can be allocated for the replacement.
#
# "allocate_anywhere" - Not yet implemented. Useful to place the log device
# temporarily on same physical volume as one of the mirror
# images. This policy is not recommended for mirror devices
# since it would break the redundant nature of the mirror. This
# policy acts like "remove" if no suitable device and space can
# be allocated for the replacement.
mirror_log_fault_policy = "allocate"
mirror_device_fault_policy = "remove"
}
####################
# Advanced section #
####################
# Metadata settings
#
# metadata {
# Default number of copies of metadata to hold on each PV. 0, 1 or 2.
# You might want to override it from the command line with 0
# when running pvcreate on new PVs which are to be added to large VGs.
# pvmetadatacopies = 1
# Approximate default size of on-disk metadata areas in sectors.
# You should increase this if you have large volume groups or
# you want to retain a large on-disk history of your metadata changes.
# pvmetadatasize = 255
# List of directories holding live copies of text format metadata.
# These directories must not be on logical volumes!
# It's possible to use LVM2 with a couple of directories here,
# preferably on different (non-LV) filesystems, and with no other
# on-disk metadata (pvmetadatacopies = 0). Or this can be in
# addition to on-disk metadata areas.
# The feature was originally added to simplify testing and is not
# supported under low memory situations - the machine could lock up.
#
# Never edit any files in these directories by hand unless you
# you are absolutely sure you know what you are doing! Use
# the supplied toolset to make changes (e.g. vgcfgrestore).
# dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
#}
# Event daemon
#
dmeventd {
# mirror_library is the library used when monitoring a mirror device.
#
# "libdevmapper-event-lvm2mirror.so" attempts to recover from
# failures. It removes failed devices from a volume group and
# reconfigures a mirror as necessary. If no mirror library is
# provided, mirrors are not monitored through dmeventd.
mirror_library = "libdevmapper-event-lvm2mirror.so"
# snapshot_library is the library used when monitoring a snapshot device.
#
# "libdevmapper-event-lvm2snapshot.so" monitors the filling of
# snapshots and emits a warning through syslog, when the use of
# snapshot exceedes 80%. The warning is repeated when 85%, 90% and
# 95% of the snapshot are filled.
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
}

View File

@ -0,0 +1,28 @@
--ec2_url=http://192.168.255.1:8773/services/Cloud
--rabbit_host=192.168.255.1
--redis_host=192.168.255.1
--s3_host=192.168.255.1
--vpn_ip=192.168.255.1
--datastore_path=/var/lib/nova/keeper
--networks_path=/var/lib/nova/networks
--instances_path=/var/lib/nova/instances
--buckets_path=/var/lib/nova/objectstore/buckets
--images_path=/var/lib/nova/objectstore/images
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--vlan_start=2000
--vlan_end=3000
--private_range=192.168.0.0/16
--public_range=10.0.0.0/24
--volume_group=vgdata
--storage_dev=/dev/sdc
--bridge_dev=eth2
--aoe_eth_dev=eth2
--public_interface=vlan0
--default_kernel=aki-DEFAULT
--default_ramdisk=ari-DEFAULT
--vpn_image_id=ami-cloudpipe
--daemonize
--verbose
--syslog
--prefix=nova

View File

@ -0,0 +1,3 @@
[Boto]
debug = 0
num_retries = 1

View File

@ -0,0 +1,35 @@
#!/bin/bash
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This gets zipped and run on the cloudpipe-managed OpenVPN server
NAME=$1
SUBJ=$2
mkdir -p projects/$NAME
cd projects/$NAME
# generate a server priv key
openssl genrsa -out server.key 2048
# generate a server CSR
openssl req -new -key server.key -out server.csr -batch -subj "$SUBJ"
if [ "`id -u`" != "`grep nova /etc/passwd | cut -d':' -f3`" ]; then
sudo chown -R nova:nogroup .
fi

View File

@ -0,0 +1,35 @@
<domain type='%(type)s'>
<name>%(name)s</name>
<os>
<type>hvm</type>
<kernel>%(basepath)s/kernel</kernel>
<initrd>%(basepath)s/ramdisk</initrd>
<cmdline>root=/dev/vda1 console=ttyS0</cmdline>
</os>
<features>
<acpi/>
</features>
<memory>%(memory_kb)s</memory>
<vcpu>%(vcpus)s</vcpu>
<devices>
<disk type='file'>
<source file='%(basepath)s/disk'/>
<target dev='vda' bus='virtio'/>
</disk>
<interface type='bridge'>
<source bridge='%(bridge_name)s'/>
<mac address='%(mac_address)s'/>
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
<!--
<filterref filter="nova-instance-%(name)s">
<parameter name="IP" value="%(ip_address)s" />
<parameter name="DHCPSERVER" value="%(dhcp_server)s" />
</filterref>
-->
</interface>
<serial type="file">
<source path='%(basepath)s/console.log'/>
<target port='1'/>
</serial>
</devices>
</domain>

View File

@ -0,0 +1,137 @@
#
# The MySQL database server configuration file.
#
# You can copy this to one of:
# - "/etc/mysql/my.cnf" to set global options,
# - "~/.my.cnf" to set user-specific options.
#
# One can use all long options that the program supports.
# Run program with --help to get a list of available options and with
# --print-defaults to see which it would actually understand and use.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
# This will be passed to all mysql clients
# It has been reported that passwords should be enclosed with ticks/quotes
# escpecially if they contain "#" chars...
# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
# Here is entries for some specific programs
# The following values assume you have at least 32M ram
# This was formally known as [safe_mysqld]. Both versions are currently parsed.
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
#
# * Basic Settings
#
#
# * IMPORTANT
# If you make changes to these settings and your system uses apparmor, you may
# also need to also adjust /etc/apparmor.d/usr.sbin.mysqld.
#
user = mysql
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
skip-external-locking
#
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
# bind-address = 127.0.0.1
#
# * Fine Tuning
#
innodb_buffer_pool_size = 12G
#innodb_log_file_size = 256M
innodb_log_buffer_size=4M
innodb_flush_log_at_trx_commit=2
innodb_thread_concurrency=8
innodb_flush_method=O_DIRECT
key_buffer = 128M
max_allowed_packet = 256M
thread_stack = 8196K
thread_cache_size = 32
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover = BACKUP
max_connections = 1000
table_cache = 1024
#thread_concurrency = 10
#
# * Query Cache Configuration
#
query_cache_limit = 32M
query_cache_size = 256M
#
# * Logging and Replication
#
# Both location gets rotated by the cronjob.
# Be aware that this log type is a performance killer.
# As of 5.1 you can enable the log at runtime!
#general_log_file = /var/log/mysql/mysql.log
#general_log = 1
log_error = /var/log/mysql/error.log
# Here you can see queries with especially long duration
log_slow_queries = /var/log/mysql/mysql-slow.log
long_query_time = 2
#log-queries-not-using-indexes
#
# The following can be used as easy to replay backup logs or for replication.
# note: if you are setting up a replication slave, see README.Debian about
# other settings you may need to change.
server-id = 1
log_bin = /var/log/mysql/mysql-bin.log
expire_logs_days = 10
max_binlog_size = 50M
#binlog_do_db = include_database_name
#binlog_ignore_db = include_database_name
#
# * InnoDB
#
sync_binlog=1
# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
# Read the manual for more InnoDB related options. There are many!
#
# * Security Features
#
# Read the manual, too, if you want chroot!
# chroot = /var/lib/mysql/
#
# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
#
# ssl-ca=/etc/mysql/cacert.pem
# ssl-cert=/etc/mysql/server-cert.pem
# ssl-key=/etc/mysql/server-key.pem
[mysqldump]
quick
quote-names
max_allowed_packet = 256M
[mysql]
#no-auto-rehash # faster start of mysql but no tab completition
[isamchk]
key_buffer = 128M
#
# * IMPORTANT: Additional settings that can override those from this file!
# The files must end with '.cnf', otherwise they'll be ignored.
#
!includedir /etc/mysql/conf.d/

View File

@ -0,0 +1,185 @@
#! /bin/sh
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(vish): This script sets up some reasonable defaults for iptables and
# creates nova-specific chains. If you use this script you should
# run nova-network and nova-compute with --use_nova_chains=True
# NOTE(vish): If you run public nova-api on a different port, make sure to
# change the port here
if [ -f /etc/default/nova-iptables ] ; then
. /etc/default/nova-iptables
fi
API_PORT=${API_PORT:-"8773"}
if [ ! -n "$IP" ]; then
# NOTE(vish): IP address is what address the services ALLOW on.
# This will just get the first ip in the list, so if you
# have more than one eth device set up, this will fail, and
# you should explicitly pass in the ip of the instance
IP=`ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
fi
if [ ! -n "$PRIVATE_RANGE" ]; then
#NOTE(vish): PRIVATE_RANGE: range is ALLOW to access DHCP
PRIVATE_RANGE="192.168.0.0/12"
fi
if [ ! -n "$MGMT_IP" ]; then
# NOTE(vish): Management IP is the ip over which to allow ssh traffic. It
# will also allow traffic to nova-api
MGMT_IP="$IP"
fi
if [ ! -n "$DMZ_IP" ]; then
# NOTE(vish): DMZ IP is the ip over which to allow api & objectstore access
DMZ_IP="$IP"
fi
clear_nova_iptables() {
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
iptables -F
iptables -t nat -F
iptables -F services
iptables -X services
# HACK: re-adding fail2ban rules :(
iptables -N fail2ban-ssh
iptables -A INPUT -p tcp -m multiport --dports 22 -j fail2ban-ssh
iptables -A fail2ban-ssh -j RETURN
}
load_nova_iptables() {
iptables -P INPUT DROP
iptables -A INPUT -m state --state INVALID -j DROP
iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
# NOTE(ja): allow localhost for everything
iptables -A INPUT -d 127.0.0.1/32 -j ACCEPT
# NOTE(ja): 22 only allowed MGMT_IP before, but we widened it to any
# address, since ssh should be listening only on internal
# before we re-add this rule we will need to add
# flexibility for RSYNC between omega/stingray
iptables -A INPUT -m tcp -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -m udp -p udp --dport 123 -j ACCEPT
iptables -A INPUT -p icmp -j ACCEPT
iptables -N services
iptables -A INPUT -j services
iptables -A INPUT -p tcp -j REJECT --reject-with tcp-reset
iptables -A INPUT -j REJECT --reject-with icmp-port-unreachable
iptables -P FORWARD DROP
iptables -A FORWARD -m state --state INVALID -j DROP
iptables -A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -p tcp -m tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu
# NOTE(vish): DROP on output is too restrictive for now. We need to add
# in a bunch of more specific output rules to use it.
# iptables -P OUTPUT DROP
iptables -A OUTPUT -m state --state INVALID -j DROP
iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
if [ -n "$GANGLIA" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 8649 -j ACCEPT
iptables -A services -m udp -p udp -d $IP --dport 8649 -j ACCEPT
fi
# if [ -n "$WEB" ] || [ -n "$ALL" ]; then
# # NOTE(vish): This opens up ports for web access, allowing web-based
# # dashboards to work.
# iptables -A services -m tcp -p tcp -d $IP --dport 80 -j ACCEPT
# iptables -A services -m tcp -p tcp -d $IP --dport 443 -j ACCEPT
# fi
if [ -n "$OBJECTSTORE" ] || [ -n "$ALL" ]; then
# infrastructure
iptables -A services -m tcp -p tcp -d $IP --dport 3333 -j ACCEPT
# clients
iptables -A services -m tcp -p tcp -d $DMZ_IP --dport 3333 -j ACCEPT
fi
if [ -n "$API" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport $API_PORT -j ACCEPT
if [ "$IP" != "$DMZ_IP" ]; then
iptables -A services -m tcp -p tcp -d $DMZ_IP --dport $API_PORT -j ACCEPT
fi
if [ "$IP" != "$MGMT_IP" ] && [ "$DMZ_IP" != "$MGMT_IP" ]; then
iptables -A services -m tcp -p tcp -d $MGMT_IP --dport $API_PORT -j ACCEPT
fi
fi
if [ -n "$REDIS" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 6379 -j ACCEPT
fi
if [ -n "$MYSQL" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 3306 -j ACCEPT
fi
if [ -n "$RABBITMQ" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 4369 -j ACCEPT
iptables -A services -m tcp -p tcp -d $IP --dport 5672 -j ACCEPT
iptables -A services -m tcp -p tcp -d $IP --dport 53284 -j ACCEPT
fi
if [ -n "$DNSMASQ" ] || [ -n "$ALL" ]; then
# NOTE(vish): this could theoretically be setup per network
# for each host, but it seems like overkill
iptables -A services -m tcp -p tcp -s $PRIVATE_RANGE --dport 53 -j ACCEPT
iptables -A services -m udp -p udp -s $PRIVATE_RANGE --dport 53 -j ACCEPT
iptables -A services -m udp -p udp --dport 67 -j ACCEPT
fi
if [ -n "$LDAP" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 389 -j ACCEPT
fi
if [ -n "$ISCSI" ] || [ -n "$ALL" ]; then
iptables -A services -m tcp -p tcp -d $IP --dport 3260 -j ACCEPT
iptables -A services -m tcp -p tcp -d 127.0.0.0/16 --dport 3260 -j ACCEPT
fi
}
case "$1" in
start)
echo "Starting nova-iptables: "
load_nova_iptables
;;
stop)
echo "Clearing nova-iptables: "
clear_nova_iptables
;;
restart)
echo "Restarting nova-iptables: "
clear_nova_iptables
load_nova_iptables
;;
*)
echo "Usage: $NAME {start|stop|restart}" >&2
exit 1
;;
esac
exit 0

View File

@ -0,0 +1,19 @@
#!/bin/sh
# FILE: /etc/udev/scripts/iscsidev.sh
BUS=${1}
HOST=${BUS%%:*}
[ -e /sys/class/iscsi_host ] || exit 1
file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname"
target_name=$(cat ${file})
# This is not an open-scsi drive
if [ -z "${target_name}" ]; then
exit 1
fi
echo "${target_name##*:}"

View File

@ -0,0 +1,6 @@
#!/bin/bash
/root/slap.sh
mysql -e "DROP DATABASE nova"
mysql -e "CREATE DATABASE nova"
mysql -e "GRANT ALL on nova.* to nova@'%' identified by 'TODO:CHANGEME:CMON'"
touch /root/installed

View File

@ -0,0 +1,261 @@
#!/usr/bin/env bash
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# LDAP INSTALL SCRIPT - SHOULD BE IDEMPOTENT, but it SCRUBS all USERS
apt-get install -y slapd ldap-utils python-ldap
cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF
#
# LDAP Public Key Patch schema for use with openssh-ldappubkey
# Author: Eric AUGE <eau@phear.org>
#
# Based on the proposal of : Mark Ruijter
#
# octetString SYNTAX
attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
DESC 'MANDATORY: OpenSSH Public key'
EQUALITY octetStringMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
# printableString SYNTAX yes|no
objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
DESC 'MANDATORY: OpenSSH LPK objectclass'
MAY ( sshPublicKey $ uid )
)
LPK_SCHEMA_EOF
cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
#
# Person object for Nova
# inetorgperson with extra attributes
# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
#
#
# using internet experimental oid arc as per BP64 3.1
objectidentifier novaSchema 1.3.6.1.3.1.666.666
objectidentifier novaAttrs novaSchema:3
objectidentifier novaOCs novaSchema:4
attributetype (
novaAttrs:1
NAME 'accessKey'
DESC 'Key for accessing data'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:2
NAME 'secretKey'
DESC 'Secret key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:3
NAME 'keyFingerprint'
DESC 'Fingerprint of private key'
EQUALITY caseIgnoreMatch
SUBSTR caseIgnoreSubstringsMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
SINGLE-VALUE
)
attributetype (
novaAttrs:4
NAME 'isAdmin'
DESC 'Is user an administrator?'
EQUALITY booleanMatch
SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
SINGLE-VALUE
)
attributetype (
novaAttrs:5
NAME 'projectManager'
DESC 'Project Managers of a project'
SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
)
objectClass (
novaOCs:1
NAME 'novaUser'
DESC 'access and secret keys'
AUXILIARY
MUST ( uid )
MAY ( accessKey $ secretKey $ isAdmin )
)
objectClass (
novaOCs:2
NAME 'novaKeyPair'
DESC 'Key pair for User'
SUP top
STRUCTURAL
MUST ( cn $ sshPublicKey $ keyFingerprint )
)
objectClass (
novaOCs:3
NAME 'novaProject'
DESC 'Container for project'
SUP groupOfNames
STRUCTURAL
MUST ( cn $ projectManager )
)
NOVA_SCHEMA_EOF
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
# slapd.conf - Configuration file for LDAP SLAPD
##########
# Basics #
##########
include /etc/ldap/schema/core.schema
include /etc/ldap/schema/cosine.schema
include /etc/ldap/schema/inetorgperson.schema
include /etc/ldap/schema/openssh-lpk_openldap.schema
include /etc/ldap/schema/nova.schema
pidfile /var/run/slapd/slapd.pid
argsfile /var/run/slapd/slapd.args
loglevel none
modulepath /usr/lib/ldap
# modulepath /usr/local/libexec/openldap
moduleload back_hdb
##########################
# Database Configuration #
##########################
database hdb
suffix "dc=example,dc=com"
rootdn "cn=Manager,dc=example,dc=com"
rootpw changeme
directory /var/lib/ldap
# directory /usr/local/var/openldap-data
index objectClass,cn eq
########
# ACLs #
########
access to attrs=userPassword
by anonymous auth
by self write
by * none
access to *
by self write
by * none
SLAPD_CONF_EOF
mv /etc/ldap/ldap.conf /etc/ldap/ldap.conf.orig
cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
# LDAP Client Settings
URI ldap://localhost
BASE dc=example,dc=com
BINDDN cn=Manager,dc=example,dc=com
SIZELIMIT 0
TIMELIMIT 0
LDAP_CONF_EOF
cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
# This is the root of the directory tree
dn: dc=example,dc=com
description: Example.Com, your trusted non-existent corporation.
dc: example
o: Example.Com
objectClass: top
objectClass: dcObject
objectClass: organization
# Subtree for users
dn: ou=Users,dc=example,dc=com
ou: Users
description: Users
objectClass: organizationalUnit
# Subtree for groups
dn: ou=Groups,dc=example,dc=com
ou: Groups
description: Groups
objectClass: organizationalUnit
# Subtree for system accounts
dn: ou=System,dc=example,dc=com
ou: System
description: Special accounts used by software applications.
objectClass: organizationalUnit
# Special Account for Authentication:
dn: uid=authenticate,ou=System,dc=example,dc=com
uid: authenticate
ou: System
description: Special account for authenticating users
userPassword: {MD5}TODO-000000000000000000000000000==
objectClass: account
objectClass: simpleSecurityObject
# create the sysadmin entry
dn: cn=developers,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: developers
description: IT admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=sysadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: sysadmins
description: IT admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=netadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: netadmins
description: Network admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: cloudadmins
description: Cloud admin group
member: uid=admin,ou=Users,dc=example,dc=com
dn: cn=itsec,ou=Groups,dc=example,dc=com
objectclass: groupOfNames
cn: itsec
description: IT security users group
member: uid=admin,ou=Users,dc=example,dc=com
BASE_LDIF_EOF
/etc/init.d/slapd stop
rm -rf /var/lib/ldap/*
rm -rf /etc/ldap/slapd.d/*
slaptest -f /etc/ldap/slapd.conf -F /etc/ldap/slapd.d
cp /usr/share/slapd/DB_CONFIG /var/lib/ldap/DB_CONFIG
slapadd -v -l /etc/ldap/base.ldif
chown -R openldap:openldap /etc/ldap/slapd.d
chown -R openldap:openldap /var/lib/ldap
/etc/init.d/slapd start

View File

@ -0,0 +1,8 @@
# fileserver.conf
[files]
path /srv/cloud/puppet/files
allow 10.0.0.0/24
[plugins]

View File

@ -0,0 +1 @@
exec { "update-apt": command => "/usr/bin/apt-get update" }

View File

@ -0,0 +1,14 @@
class issue {
file { "/etc/issue":
owner => "root",
group => "root",
mode => 444,
source => "puppet://${puppet_server}/files/etc/issue",
}
file { "/etc/issue.net":
owner => "root",
group => "root",
mode => 444,
source => "puppet://${puppet_server}/files/etc/issue",
}
}

View File

@ -0,0 +1,34 @@
# via http://projects.puppetlabs.com/projects/puppet/wiki/Kernel_Modules_Patterns
define kern_module ($ensure) {
$modulesfile = $operatingsystem ? { ubuntu => "/etc/modules", redhat => "/etc/rc.modules" }
case $operatingsystem {
redhat: { file { "/etc/rc.modules": ensure => file, mode => 755 } }
}
case $ensure {
present: {
exec { "insert_module_${name}":
command => $operatingsystem ? {
ubuntu => "/bin/echo '${name}' >> '${modulesfile}'",
redhat => "/bin/echo '/sbin/modprobe ${name}' >> '${modulesfile}' "
},
unless => "/bin/grep -qFx '${name}' '${modulesfile}'"
}
exec { "/sbin/modprobe ${name}": unless => "/bin/grep -q '^${name} ' '/proc/modules'" }
}
absent: {
exec { "/sbin/modprobe -r ${name}": onlyif => "/bin/grep -q '^${name} ' '/proc/modules'" }
exec { "remove_module_${name}":
command => $operatingsystem ? {
ubuntu => "/usr/bin/perl -ni -e 'print unless /^\\Q${name}\\E\$/' '${modulesfile}'",
redhat => "/usr/bin/perl -ni -e 'print unless /^\\Q/sbin/modprobe ${name}\\E\$/' '${modulesfile}'"
},
onlyif => $operatingsystem ? {
ubuntu => "/bin/grep -qFx '${name}' '${modulesfile}'",
redhat => "/bin/grep -q '^/sbin/modprobe ${name}' '${modulesfile}'"
}
}
}
default: { err ( "unknown ensure value ${ensure}" ) }
}
}

View File

@ -0,0 +1,6 @@
define loopback($num) {
exec { "mknod -m 0660 /dev/loop${num} b 7 ${num}; chown root:disk /dev/loop${num}":
creates => "/dev/loop${num}",
path => ["/usr/bin", "/usr/sbin", "/bin"]
}
}

View File

@ -0,0 +1,8 @@
class lvm {
file { "/etc/lvm/lvm.conf":
owner => "root",
group => "root",
mode => 444,
source => "puppet://${puppet_server}/files/etc/lvm.conf",
}
}

View File

@ -0,0 +1,8 @@
class lvmconf {
file { "/etc/lvm/lvm.conf":
owner => "root", group => "root", mode => 644,
source => "puppet://${puppet_server}/files/etc/lvm/lvm.conf",
ensure => present
}
}

View File

@ -0,0 +1,464 @@
import "kern_module"
import "apt"
import "loopback"
#$head_node_ip = "undef"
#$rabbit_ip = "undef"
#$vpn_ip = "undef"
#$public_interface = "undef"
#$vlan_start = "5000"
#$vlan_end = "6000"
#$private_range = "10.0.0.0/16"
#$public_range = "192.168.177.0/24"
define nova_iptables($services, $ip="", $private_range="", $mgmt_ip="", $dmz_ip="") {
file { "/etc/init.d/nova-iptables":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/nova-iptables",
}
file { "/etc/default/nova-iptables":
owner => "root", mode => 644,
content => template("nova-iptables.erb")
}
}
define nova_conf_pointer($name) {
file { "/etc/nova/nova-${name}.conf":
owner => "nova", mode => 400,
content => "--flagfile=/etc/nova/nova.conf"
}
}
class novaconf {
file { "/etc/nova/nova.conf":
owner => "nova", mode => 400,
content => template("production/nova-common.conf.erb", "production/nova-${cluster_name}.conf.erb")
}
nova_conf_pointer{'manage': name => 'manage'}
}
class novadata {
package { "rabbitmq-server": ensure => present }
file { "/etc/rabbitmq/rabbitmq.conf":
owner => "root", mode => 644,
content => "NODENAME=rabbit@localhost",
}
service { "rabbitmq-server":
ensure => running,
enable => true,
hasstatus => true,
require => [
File["/etc/rabbitmq/rabbitmq.conf"],
Package["rabbitmq-server"]
]
}
package { "mysql-server": ensure => present }
file { "/etc/mysql/my.cnf":
owner => "root", mode => 644,
source => "puppet://${puppet_server}/files/production/my.cnf",
}
service { "mysql":
ensure => running,
enable => true,
hasstatus => true,
require => [
File["/etc/mysql/my.cnf"],
Package["mysql-server"]
]
}
file { "/root/slap.sh":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/slap.sh",
}
file { "/root/setup_data.sh":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/setup_data.sh",
}
# setup compute data
exec { "setup_data":
command => "/root/setup_data.sh",
path => "/usr/bin:/bin",
unless => "test -f /root/installed",
require => [
Service["mysql"],
File["/root/slap.sh"],
File["/root/setup_data.sh"]
]
}
}
define nscheduler($version) {
package { "nova-scheduler": ensure => $version, require => Exec["update-apt"] }
nova_conf_pointer{'scheduler': name => 'scheduler'}
exec { "update-rc.d -f nova-scheduler remove; update-rc.d nova-scheduler defaults 50":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/init.d/nova-scheduler",
unless => "test -f /etc/rc2.d/S50nova-scheduler"
}
service { "nova-scheduler":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-scheduler"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-scheduler.conf"]
]
}
}
define napi($version, $api_servers, $api_base_port) {
file { "/etc/boto.cfg":
owner => "root", mode => 644,
source => "puppet://${puppet_server}/files/production/boto.cfg",
}
file { "/var/lib/nova/CA/genvpn.sh":
owner => "nova", mode => 755,
source => "puppet://${puppet_server}/files/production/genvpn.sh",
}
package { "python-greenlet": ensure => present }
package { "nova-api": ensure => $version, require => [Exec["update-apt"], Package["python-greenlet"]] }
nova_conf_pointer{'api': name => 'api'}
exec { "update-rc.d -f nova-api remove; update-rc.d nova-api defaults 50":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/init.d/nova-api",
unless => "test -f /etc/rc2.d/S50nova-api"
}
service { "nova-netsync":
start => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock start",
stop => "/usr/bin/nova-netsync --pidfile=/var/run/nova/nova-netsync.pid --lockfile=/var/run/nova/nova-netsync.pid.lock stop",
ensure => running,
hasstatus => false,
pattern => "nova-netsync",
require => Service["nova-api"],
subscribe => File["/etc/nova/nova.conf"]
}
service { "nova-api":
start => "monit start all -g nova_api",
stop => "monit stop all -g nova_api",
restart => "monit restart all -g nova_api",
# ensure => running,
# hasstatus => true,
require => Service["monit"],
subscribe => [
Package["nova-objectstore"],
File["/etc/boto.cfg"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-objectstore.conf"]
]
}
# the haproxy & monit's template use $api_servers and $api_base_port
package { "haproxy": ensure => present }
file { "/etc/default/haproxy":
owner => "root", mode => 644,
content => "ENABLED=1",
require => Package['haproxy']
}
file { "/etc/haproxy/haproxy.cfg":
owner => "root", mode => 644,
content => template("/srv/cloud/puppet/templates/haproxy.cfg.erb"),
require => Package['haproxy']
}
service { "haproxy":
ensure => true,
enable => true,
hasstatus => true,
subscribe => [
Package["haproxy"],
File["/etc/default/haproxy"],
File["/etc/haproxy/haproxy.cfg"],
]
}
package { "socat": ensure => present }
file { "/usr/local/bin/gmetric_haproxy.sh":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/ganglia/gmetric_scripts/gmetric_haproxy.sh",
}
cron { "gmetric_haproxy":
command => "/usr/local/bin/gmetric_haproxy.sh",
user => root,
minute => "*/3",
}
package { "monit": ensure => present }
file { "/etc/default/monit":
owner => "root", mode => 644,
content => "startup=1",
require => Package['monit']
}
file { "/etc/monit/monitrc":
owner => "root", mode => 600,
content => template("/srv/cloud/puppet/templates/monitrc-nova-api.erb"),
require => Package['monit']
}
service { "monit":
ensure => true,
pattern => "sbin/monit",
subscribe => [
Package["monit"],
File["/etc/default/monit"],
File["/etc/monit/monitrc"],
]
}
}
define nnetwork($version) {
# kill the default network added by the package
exec { "kill-libvirt-default-net":
command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml",
path => "/usr/bin:/bin",
onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml"
}
# EVIL HACK: custom binary because dnsmasq 2.52 segfaulted accessing dereferenced object
file { "/usr/sbin/dnsmasq":
owner => "root", group => "root",
source => "puppet://${puppet_server}/files/production/dnsmasq",
}
package { "nova-network": ensure => $version, require => Exec["update-apt"] }
nova_conf_pointer{'dhcpbridge': name => 'dhcpbridge'}
nova_conf_pointer{'network': name => "network" }
exec { "update-rc.d -f nova-network remove; update-rc.d nova-network defaults 50":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/init.d/nova-network",
unless => "test -f /etc/rc2.d/S50nova-network"
}
service { "nova-network":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-network"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-network.conf"]
]
}
}
define nobjectstore($version) {
package { "nova-objectstore": ensure => $version, require => Exec["update-apt"] }
nova_conf_pointer{'objectstore': name => 'objectstore'}
exec { "update-rc.d -f nova-objectstore remove; update-rc.d nova-objectstore defaults 50":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/init.d/nova-objectstore",
unless => "test -f /etc/rc2.d/S50nova-objectstore"
}
service { "nova-objectstore":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-objectstore"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-objectstore.conf"]
]
}
}
define ncompute($version) {
include ganglia-python
include ganglia-compute
# kill the default network added by the package
exec { "kill-libvirt-default-net":
command => "virsh net-destroy default; rm /etc/libvirt/qemu/networks/autostart/default.xml",
path => "/usr/bin:/bin",
onlyif => "test -f /etc/libvirt/qemu/networks/autostart/default.xml"
}
# LIBVIRT has to be restarted when ebtables / gawk is installed
service { "libvirt-bin":
ensure => running,
pattern => "sbin/libvirtd",
subscribe => [
Package["ebtables"],
Kern_module["kvm_intel"]
],
require => [
Package["libvirt-bin"],
Package["ebtables"],
Package["gawk"],
Kern_module["kvm_intel"],
File["/dev/kvm"]
]
}
package { "libvirt-bin": ensure => "0.8.3-1ubuntu14~ppalucid2" }
package { "ebtables": ensure => present }
package { "gawk": ensure => present }
# ensure proper permissions on /dev/kvm
file { "/dev/kvm":
owner => "root",
group => "kvm",
mode => 660
}
# require hardware virt
kern_module { "kvm_intel":
ensure => present,
}
# increase loopback devices
file { "/etc/modprobe.d/loop.conf":
owner => "root", mode => 644,
content => "options loop max_loop=40"
}
nova_conf_pointer{'compute': name => 'compute'}
loopback{loop0: num => 0}
loopback{loop1: num => 1}
loopback{loop2: num => 2}
loopback{loop3: num => 3}
loopback{loop4: num => 4}
loopback{loop5: num => 5}
loopback{loop6: num => 6}
loopback{loop7: num => 7}
loopback{loop8: num => 8}
loopback{loop9: num => 9}
loopback{loop10: num => 10}
loopback{loop11: num => 11}
loopback{loop12: num => 12}
loopback{loop13: num => 13}
loopback{loop14: num => 14}
loopback{loop15: num => 15}
loopback{loop16: num => 16}
loopback{loop17: num => 17}
loopback{loop18: num => 18}
loopback{loop19: num => 19}
loopback{loop20: num => 20}
loopback{loop21: num => 21}
loopback{loop22: num => 22}
loopback{loop23: num => 23}
loopback{loop24: num => 24}
loopback{loop25: num => 25}
loopback{loop26: num => 26}
loopback{loop27: num => 27}
loopback{loop28: num => 28}
loopback{loop29: num => 29}
loopback{loop30: num => 30}
loopback{loop31: num => 31}
loopback{loop32: num => 32}
loopback{loop33: num => 33}
loopback{loop34: num => 34}
loopback{loop35: num => 35}
loopback{loop36: num => 36}
loopback{loop37: num => 37}
loopback{loop38: num => 38}
loopback{loop39: num => 39}
package { "python-libvirt": ensure => "0.8.3-1ubuntu14~ppalucid2" }
package { "nova-compute":
ensure => "$version",
require => Package["python-libvirt"]
}
#file { "/usr/share/nova/libvirt.qemu.xml.template":
# owner => "nova", mode => 400,
# source => "puppet://${puppet_server}/files/production/libvirt.qemu.xml.template",
#}
# fix runlevels: using enable => true adds it as 20, which is too early
exec { "update-rc.d -f nova-compute remove":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/rc2.d/S??nova-compute"
}
service { "nova-compute":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-compute"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-compute.conf"],
#File["/usr/share/nova/libvirt.qemu.xml.template"],
Service["libvirt-bin"],
Kern_module["kvm_intel"]
]
}
}
define nvolume($version) {
package { "nova-volume": ensure => $version, require => Exec["update-apt"] }
nova_conf_pointer{'volume': name => 'volume'}
# fix runlevels: using enable => true adds it as 20, which is too early
exec { "update-rc.d -f nova-volume remove":
path => "/usr/bin:/usr/sbin:/bin",
onlyif => "test -f /etc/rc2.d/S??nova-volume"
}
file { "/etc/default/iscsitarget":
owner => "root", mode => 644,
content => "ISCSITARGET_ENABLE=true"
}
package { "iscsitarget": ensure => present }
file { "/dev/iscsi": ensure => directory } # FIXME(vish): owner / mode?
file { "/usr/sbin/nova-iscsi-dev.sh":
owner => "root", mode => 755,
source => "puppet://${puppet_server}/files/production/nova-iscsi-dev.sh"
}
file { "/etc/udev/rules.d/55-openiscsi.rules":
owner => "root", mode => 644,
content => 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/usr/sbin/nova-iscsi-dev.sh %b",SYMLINK+="iscsi/%c%n"'
}
service { "iscsitarget":
ensure => running,
enable => true,
hasstatus => true,
require => [
File["/etc/default/iscsitarget"],
Package["iscsitarget"]
]
}
service { "nova-volume":
ensure => running,
hasstatus => true,
subscribe => [
Package["nova-volume"],
File["/etc/nova/nova.conf"],
File["/etc/nova/nova-volume.conf"]
]
}
}
class novaspool {
# This isn't in release yet
#cron { logspool:
# command => "/usr/bin/nova-logspool /var/log/nova.log /var/lib/nova/spool",
# user => "nova"
#}
#cron { spoolsentry:
# command => "/usr/bin/nova-spoolsentry ${sentry_url} ${sentry_key} /var/lib/nova/spool",
# user => "nova"
#}
}

View File

@ -0,0 +1,7 @@
class swift {
package { "memcached": ensure => present }
service { "memcached": require => Package['memcached'] }
package { "swift-proxy": ensure => present }
}

View File

@ -0,0 +1,120 @@
# site.pp
import "templates"
import "classes/*"
node novabase inherits default {
# $puppet_server = "192.168.0.10"
$cluster_name = "openstack001"
$ganglia_udp_send_channel = "openstack001.example.com"
$syslog = "192.168.0.10"
# THIS STUFF ISN'T IN RELEASE YET
#$sentry_url = "http://192.168.0.19/sentry/store/"
#$sentry_key = "TODO:SENTRYPASS"
$local_network = "192.168.0.0/16"
$vpn_ip = "192.168.0.2"
$public_interface = "eth0"
include novanode
# include nova-common
include opsmetrics
# non-nova stuff such as nova-dash inherit from novanode
# novaspool needs a better home
# include novaspool
}
# Builder
node "nova000.example.com" inherits novabase {
$syslog = "server"
include ntp
include syslog-server
}
# Non-Nova nodes
node
"blog.example.com",
"wiki.example.com"
inherits novabase {
include ganglia-python
include ganglia-apache
include ganglia-mysql
}
node "nova001.example.com"
inherits novabase {
include novabase
nova_iptables { nova:
services => [
"ganglia",
"mysql",
"rabbitmq",
"ldap",
"api",
"objectstore",
"nrpe",
],
ip => "192.168.0.10",
}
nobjectstore { nova: version => "0.9.0" }
nscheduler { nova: version => "0.9.0" }
napi { nova:
version => "0.9.0",
api_servers => 10,
api_base_port => 8000
}
}
node "nova002.example.com"
inherits novabase {
include novaconf
nova_iptables { nova:
services => [
"ganglia",
"dnsmasq",
"nrpe"
],
ip => "192.168.4.2",
private_range => "192.168.0.0/16",
}
nnetwork { nova: version => "0.9.0" }
}
node
"nova003.example.com",
"nova004.example.com",
"nova005.example.com",
"nova006.example.com",
"nova007.example.com",
"nova008.example.com",
"nova009.example.com",
"nova010.example.com",
"nova011.example.com",
"nova012.example.com",
"nova013.example.com",
"nova014.example.com",
"nova015.example.com",
"nova016.example.com",
"nova017.example.com",
"nova018.example.com",
"nova019.example.com",
inherits novabase {
include novaconf
ncompute { nova: version => "0.9.0" }
nvolume { nova: version => "0.9.0" }
}
#node
# "nova020.example.com"
# "nova021.example.com"
#inherits novanode {
# include novaconf
#ncompute { nova: version => "0.9.0" }
#}

View File

@ -0,0 +1,21 @@
# templates.pp
import "classes/*"
class baseclass {
# include dns-client # FIXME: missing resolv.conf.erb??
include issue
}
node default {
$nova_site = "undef"
$nova_ns1 = "undef"
$nova_ns2 = "undef"
# include baseclass
}
# novanode handles the system-level requirements for Nova/Swift nodes
class novanode {
include baseclass
include lvmconf
}

View File

@ -0,0 +1,11 @@
[main]
logdir=/var/log/puppet
vardir=/var/lib/puppet
ssldir=/var/lib/puppet/ssl
rundir=/var/run/puppet
factpath=$vardir/lib/facter
pluginsync=false
[puppetmasterd]
templatedir=/var/lib/nova/contrib/puppet/templates
autosign=true

View File

@ -0,0 +1,39 @@
# this config needs haproxy-1.1.28 or haproxy-1.2.1
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
#log loghost local0 info
maxconn 4096
#chroot /usr/share/haproxy
stats socket /var/run/haproxy.sock
user haproxy
group haproxy
daemon
#debug
#quiet
defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
stats enable
stats uri /haproxy
maxconn 2000
contimeout 5000
clitimeout 50000
srvtimeout 50000
listen nova-api 0.0.0.0:8773
option httpchk GET / HTTP/1.0\r\nHost:\ example.com
option forwardfor
reqidel ^X-Forwarded-For:.*
balance roundrobin
<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset -%>
server api_<%= port %> 127.0.0.1:<%= port %> maxconn 1 check
<% end -%>
option httpclose # disable keep-alive

View File

@ -0,0 +1,138 @@
###############################################################################
## Monit control file
###############################################################################
##
## Comments begin with a '#' and extend through the end of the line. Keywords
## are case insensitive. All path's MUST BE FULLY QUALIFIED, starting with '/'.
##
## Below you will find examples of some frequently used statements. For
## information about the control file, a complete list of statements and
## options please have a look in the monit manual.
##
##
###############################################################################
## Global section
###############################################################################
##
## Start monit in the background (run as a daemon):
#
set daemon 60 # check services at 1-minute intervals
with start delay 30 # optional: delay the first check by half a minute
# (by default check immediately after monit start)
## Set syslog logging with the 'daemon' facility. If the FACILITY option is
## omitted, monit will use 'user' facility by default. If you want to log to
## a stand alone log file instead, specify the path to a log file
#
set logfile syslog facility log_daemon
#
#
### Set the location of monit id file which saves the unique id specific for
### given monit. The id is generated and stored on first monit start.
### By default the file is placed in $HOME/.monit.id.
#
# set idfile /var/.monit.id
#
### Set the location of monit state file which saves the monitoring state
### on each cycle. By default the file is placed in $HOME/.monit.state. If
### state file is stored on persistent filesystem, monit will recover the
### monitoring state across reboots. If it is on temporary filesystem, the
### state will be lost on reboot.
#
# set statefile /var/.monit.state
#
## Set the list of mail servers for alert delivery. Multiple servers may be
## specified using comma separator. By default monit uses port 25 - this
## is possible to override with the PORT option.
#
# set mailserver mail.bar.baz, # primary mailserver
# backup.bar.baz port 10025, # backup mailserver on port 10025
# localhost # fallback relay
#
#
## By default monit will drop alert events if no mail servers are available.
## If you want to keep the alerts for a later delivery retry, you can use the
## EVENTQUEUE statement. The base directory where undelivered alerts will be
## stored is specified by the BASEDIR option. You can limit the maximal queue
## size using the SLOTS option (if omitted, the queue is limited by space
## available in the back end filesystem).
#
# set eventqueue
# basedir /var/monit # set the base directory where events will be stored
# slots 100 # optionaly limit the queue size
#
#
## Send status and events to M/Monit (Monit central management: for more
## informations about M/Monit see http://www.tildeslash.com/mmonit).
#
# set mmonit http://monit:monit@192.168.1.10:8080/collector
#
#
## Monit by default uses the following alert mail format:
##
## --8<--
## From: monit@$HOST # sender
## Subject: monit alert -- $EVENT $SERVICE # subject
##
## $EVENT Service $SERVICE #
## #
## Date: $DATE #
## Action: $ACTION #
## Host: $HOST # body
## Description: $DESCRIPTION #
## #
## Your faithful employee, #
## monit #
## --8<--
##
## You can override this message format or parts of it, such as subject
## or sender using the MAIL-FORMAT statement. Macros such as $DATE, etc.
## are expanded at runtime. For example, to override the sender:
#
# set mail-format { from: monit@foo.bar }
#
#
## You can set alert recipients here whom will receive alerts if/when a
## service defined in this file has errors. Alerts may be restricted on
## events by using a filter as in the second example below.
#
# set alert sysadm@foo.bar # receive all alerts
# set alert manager@foo.bar only on { timeout } # receive just service-
# # timeout alert
#
#
## Monit has an embedded web server which can be used to view status of
## services monitored, the current configuration, actual services parameters
## and manage services from a web interface.
#
set httpd port 2812 and
use address localhost # only accept connection from localhost
allow localhost # allow localhost to connect to the server and
# allow admin:monit # require user 'admin' with password 'monit'
# allow @monit # allow users of group 'monit' to connect (rw)
# allow @users readonly # allow users of group 'users' to connect readonly
#
#
###############################################################################
## Services
###############################################################################
<% api_servers.to_i.times do |offset| %><% port = api_base_port.to_i + offset %>
check process nova_api_<%= port %> with pidfile /var/run/nova/nova-api-<%= port %>.pid
group nova_api
start program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock start"
as uid nova
stop program = "/usr/bin/nova-api --flagfile=/etc/nova/nova.conf --pidfile=/var/run/nova/nova-api-<%= port %>.pid --api_listen_port=<%= port %> --lockfile=/var/run/nova/nova-api-<%= port %>.pid.lock stop"
as uid nova
if failed port <%= port %> protocol http
with timeout 15 seconds
for 4 cycles
then restart
if totalmem > 300 Mb then restart
if cpu is greater than 60% for 2 cycles then alert
if cpu > 80% for 3 cycles then restart
if 3 restarts within 5 cycles then timeout
<% end %>

View File

@ -0,0 +1,10 @@
<% services.each do |service| -%>
<%= service.upcase %>=1
<% end -%>
<% if ip && ip != "" %>IP="<%=ip%>"<% end %>
<% if private_range && private_range != "" %>PRIVATE_RANGE="<%=private_range%>"<% end %>
<% if mgmt_ip && mgmt_ip != "" %>MGMT_IP="<%=mgmt_ip%>"<% end %>
<% if dmz_ip && dmz_ip != "" %>DMZ_IP="<%=dmz_ip%>"<% end %>
# warning: this file is auto-generated by puppet

View File

@ -0,0 +1,56 @@
# global
--dmz_net=192.168.0.0
--dmz_mask=255.255.0.0
--dmz_cidr=192.168.0.0/16
--ldap_user_dn=cn=Administrators,dc=example,dc=com
--ldap_user_unit=Users
--ldap_user_subtree=ou=Users,dc=example,dc=com
--ldap_project_subtree=ou=Groups,dc=example,dc=com
--role_project_subtree=ou=Groups,dc=example,dc=com
--ldap_cloudadmin=cn=NovaAdmins,ou=Groups,dc=example,dc=com
--ldap_itsec=cn=NovaSecurity,ou=Groups,dc=example,dc=com
--ldap_sysadmin=cn=Administrators,ou=Groups,dc=example,dc=com
--ldap_netadmin=cn=Administrators,ou=Groups,dc=example,dc=com
--ldap_developer=cn=developers,ou=Groups,dc=example,dc=com
--verbose
--daemonize
--syslog
--networks_path=/var/lib/nova/networks
--instances_path=/var/lib/nova/instances
--buckets_path=/var/lib/nova/objectstore/buckets
--images_path=/var/lib/nova/objectstore/images
--scheduler_driver=nova.scheduler.simple.SimpleScheduler
--libvirt_xml_template=/usr/share/nova/libvirt.qemu.xml.template
--credentials_template=/usr/share/nova/novarc.template
--boot_script_template=/usr/share/nova/bootscript.template
--vpn_client_template=/usr/share/nova/client.ovpn.template
--max_cores=40
--max_gigabytes=2000
--ca_path=/var/lib/nova/CA
--keys_path=/var/lib/nova/keys
--vpn_start=11000
--volume_group=vgdata
--volume_manager=nova.volume.manager.ISCSIManager
--volume_driver=nova.volume.driver.ISCSIDriver
--default_kernel=aki-DEFAULT
--default_ramdisk=ari-DEFAULT
--dhcpbridge=/usr/bin/nova-dhcpbridge
--vpn_image_id=ami-cloudpipe
--dhcpbridge_flagfile=/etc/nova/nova.conf
--credential_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=NOVA/CN=%s-%s
--auth_driver=nova.auth.ldapdriver.LdapDriver
--quota_cores=17
--quota_floating_ips=5
--quota_instances=6
--quota_volumes=10
--quota_gigabytes=100
--use_nova_chains=True
--input_chain=services
--FAKE_subdomain=ec2
--use_project_ca=True
--fixed_ip_disassociate_timeout=300
--api_max_requests=1
--api_listen_ip=127.0.0.1
--user_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=%s-%s-%s
--project_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-ca-%s-%s
--vpn_cert_subject=/C=US/ST=Texas/L=Bexar/O=NovaDev/OU=Nova/CN=project-vpn-%s-%s

View File

@ -0,0 +1,21 @@
--fixed_range=192.168.0.0/16
--iscsi_ip_prefix=192.168.4
--floating_range=10.0.0.0/24
--rabbit_host=192.168.0.10
--s3_host=192.168.0.10
--cc_host=192.168.0.10
--cc_dmz=192.168.24.10
--s3_dmz=192.168.24.10
--ec2_url=http://192.168.0.1:8773/services/Cloud
--vpn_ip=192.168.0.2
--ldap_url=ldap://192.168.0.10
--sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova
--other_sql_connection=mysql://nova:TODO-MYPASS@192.168.0.10/nova
--routing_source_ip=192.168.0.2
--bridge_dev=eth1
--public_interface=eth0
--vlan_start=3100
--num_networks=700
--rabbit_userid=TODO:RABBIT
--rabbit_password=TODO:CHANGEME
--ldap_password=TODO:CHANGEME