Merge pull request #33 from bodepd/swift_example_manifest

Add example site.pp
This commit is contained in:
Dan Bode
2012-06-12 19:35:30 -07:00
5 changed files with 266 additions and 292 deletions

View File

@@ -1,7 +1,6 @@
#
# This example file is almost the
# same as what I have been using
# to build swift in my environment (which is based on vagrant)
# can be used to build out a sample swift all in one environment
#
$swift_local_net_ip='127.0.0.1'

View File

@@ -1,186 +0,0 @@
#
# Example file for building out a multi-node environment
#
# This example creates nodes of the following roles:
# swift_storage - nodes that host storage servers
# swift_proxy - nodes that serve as a swift proxy
# swift_ringbuilder - nodes that are responsible for
# rebalancing the rings
#
# This example assumes a few things:
# * the multi-node scenario requires a puppetmaster
# * it assumes that networking is correctly configured
#
# These nodes need to be brought up in a certain order
#
# 1. storage nodes
# 2. ringbuilder
# 3. run the storage nodes again (to synchronize the ring db)
# TODO - the code for this has not been written yet...
# 4. run the proxy
# 5. test that everything works!!
#
# This example file is what I used for testing
# in vagrant
#
#
# simple shared salt
$swift_shared_secret='changeme'
# assumes that the ip address where all of the storage nodes
# will communicate is on eth1
$swift_local_net_ip = $ipaddress_eth0
Exec { logoutput => true }
#
# specifies that nodes with the cert names of
# swift_storage_1,2, and 3 will be assigned the
# role of swift_storage_nodes with in the respective
# zones of 1,2,3
#
node 'swift_storage_1' {
$swift_zone = 1
include role_swift_storage
}
node 'swift_storage_2' {
$swift_zone = 2
include role_swift_storage
}
node 'swift_storage_3' {
$swift_zone = 3
include role_swift_storage
}
#
# Specfies that a node with certname of swift_proxy
# will be assigned the role of swift proxy.
# In my testing environemnt, the proxy node also serves
# as the ringbuilder
#
node 'swift_proxy' {
# TODO this should not be recommended
class { 'role_swift_ringbuilder': }
class { 'role_swift_proxy':
require => Class['role_swift_ringbuilder'],
}
}
node 'swift_ringbuilding' {
include role_swift_ringbuilder
}
#
# classes that are used for role assignment
#
class role_swift {
class { 'ssh::server::install': }
class { 'swift':
# not sure how I want to deal with this shared secret
swift_hash_suffix => $swift_shared_secret,
package_ensure => latest,
}
}
class role_swift_ringbuilder inherits role_swift {
# collect all of the resources that are needed
# to rebalance the ring
Ring_object_device <<| |>>
Ring_container_device <<| |>>
Ring_account_device <<| |>>
class { 'swift::ringbuilder':
part_power => '18',
replicas => '3',
min_part_hours => 1,
require => Class['swift'],
}
class { 'swift::ringserver':
local_net_ip => $swift_local_net_ip,
}
# exports rsync gets that can be used to sync the ring files
@@swift::ringsync { ['account', 'object', 'container']:
ring_server => $swift_local_net_ip
}
}
class role_swift_proxy inherits role_swift {
# curl is only required so that I can run tests
package { 'curl': ensure => present }
class { 'memcached':
listen_ip => '127.0.0.1',
}
# TODO should I enable swath in the default config?
class { 'swift::proxy':
proxy_local_net_ip => $swift_local_net_ip,
pipeline => ['healthcheck', 'cache', 'tempauth', 'proxy-server'],
account_autocreate => true,
require => Class['swift::ringbuilder'],
}
class { ['swift::proxy::healthcheck', 'swift::proxy::cache', 'swift::proxy::tempauth']: }
}
class role_swift_storage inherits role_swift {
# create xfs partitions on a loopback device and mount them
swift::storage::loopback { '1':
base_dir => '/srv/loopback-device',
mnt_base_dir => '/srv/node',
require => Class['swift'],
}
# install all swift storage servers together
class { 'swift::storage::all':
storage_local_net_ip => $swift_local_net_ip,
}
# TODO I need to wrap these in a define so that
# mcollective can collect that define
# these exported resources write ring config
# resources into the database so that they can be
# consumed by the ringbuilder role
@@ring_object_device { "${swift_local_net_ip}:6000/1":
zone => $swift_zone,
weight => 1,
}
@@ring_container_device { "${swift_local_net_ip}:6001/1":
zone => $swift_zone,
weight => 1,
}
# TODO should device be changed to volume
@@ring_account_device { "${swift_local_net_ip}:6002/1":
zone => $swift_zone,
weight => 1,
}
# sync ring databases if they have been exported
# do this before checking the status of the storage services
Swift::Ringsync<<||>> -> Class['swift::storage::all']
}

View File

@@ -1,36 +0,0 @@
#
# This file contains an example set of configuration that can be applied
# to nodes before swift is installed on them.
#
# This file is used to set up the basic environment that is used for
# testing swift deployments.
#
# use the trunk repo for swift packages
class { 'swift::repo::trunk':}
#
# install the class apt and use 10.0.2.2:3128 as a proxy
#
class { 'apt':
proxy_host => '10.0.2.2',
proxy_port => '3128',
disable_keys => true,
}
#
# use puppetlab's official apt repo to install the latest
# released version of puppet
#
apt::source { 'puppet':
location => 'http://apt.puppetlabs.com/ubuntu',
release => 'natty',
key => '4BD6EC30',
}
#
# ensure that the latest version of puppet is installed
#
package { 'puppet':
ensure => latest,
require => Apt::Source['puppet'],
}

View File

@@ -1,68 +0,0 @@
# Example proxy using more middlewares:
# - Keystone: keystone + authtoken
# - Amazon S3 compatibility: swift3 + s3token
# - Rate limiting: ratelimit
# - Catch errors: catch_errors
#
# A keystone service user is required for swift, with admin role on thei
# services tenant.
# The swift service and endpoint must also be created in keystone.
#
$keystone_swift_user = 'swift'
$keystone_swift_pass = 'ChangeMe'
$keystone_services_tenant = 'services'
$keystone_host = '127.0.0.1'
$keystone_auth_port = 35357
$keystone_auth_protocol = 'http'
class { 'swift::proxy':
proxy_local_net_ip => $swift_local_net_ip,
pipeline => [
'catch_errors',
'healthcheck',
'cache',
'ratelimit',
'swift3',
's3token',
'authtoken',
'keystone',
'proxy-server'
],
account_autocreate => true,
require => Class['swift::ringbuilder'],
}
class { [
'swift::proxy::catch_errors',
'swift::proxy::healthcheck',
'swift::proxy::cache',
'swift::proxy::swift3',
]: }
class { 'swift::proxy::ratelimit':
clock_accuracy => 1000,
max_sleep_time_seconds => 60,
log_sleep_time_seconds => 0,
rate_buffer_seconds => 5,
account_ratelimit => 0
}
class { 'swift::proxy::s3token':
auth_host => $keystone_host,
auth_port => $keystone_auth_port,
auth_protocol => $keystone_auth_protocol,
}
class { 'swift::proxy::keystone':
operator_roles => ['admin', 'SwiftOperator'],
}
class { 'swift::proxy::authtoken':
admin_user => $keystone_swift_user,
admin_tenant_name => $keystone_services_tenant,
admin_password => $keystone_swift_pass,
auth_host => $keystone_host,
auth_port => $keystone_auth_port,
auth_protocol => $keystone_auth_protocol,
}

265
examples/site.pp Normal file
View File

@@ -0,0 +1,265 @@
#
# Example file for building out a multi-node environment
#
# This example creates nodes of the following roles:
# swift_storage - nodes that host storage servers
# swift_proxy - nodes that serve as a swift proxy
# swift_ringbuilder - nodes that are responsible for
# rebalancing the rings
#
# This example assumes a few things:
# * the multi-node scenario requires a puppetmaster
# * it assumes that networking is correctly configured
#
# These nodes need to be brought up in a certain order
#
# 1. storage nodes
# 2. ringbuilder
# 3. run the storage nodes again (to synchronize the ring db)
# 4. run the proxy
# 5. test that everything works!!
# this site manifest serves as an example of how to
# deploy various swift environments
$admin_email = 'dan@example_company.com'
$keystone_db_password = 'keystone_db_password'
$keystone_admin_token = 'keystone_token'
$admin_password = 'admin_password'
$swift_user_password = 'swift_pass'
# swift specific configurations
$swift_shared_secret = 'changeme'
$swift_local_net_ip = $ipaddress_eth0
$swift_proxy_address = '192.168.101.11'
$controller_node_public = '192.168.101.11'
$verbose = true
# This node can be used to deploy a keystone service.
# This service only contains the credentials for authenticating
# swift
node keystone {
# set up mysql server
class { 'mysql::server':
config_hash => {
# the priv grant fails on precise if I set a root password
# TODO I should make sure that this works
# 'root_password' => $mysql_root_password,
'bind_address' => '0.0.0.0'
}
}
# set up all openstack databases, users, grants
class { 'keystone::db::mysql':
password => $keystone_db_password,
}
# in stall and configure the keystone service
class { 'keystone':
admin_token => $keystone_admin_token,
# we are binding keystone on all interfaces
# the end user may want to be more restrictive
bind_host => '0.0.0.0',
log_verbose => $verbose,
log_debug => $verbose,
catalog_type => 'sql',
}
# set up keystone database
# set up the keystone config for mysql
class { 'keystone::config::mysql':
password => $keystone_db_password,
}
# set up keystone admin users
class { 'keystone::roles::admin':
email => $admin_email,
password => $admin_password,
}
# configure the keystone service user and endpoint
class { 'swift::keystone::auth':
password => $swift_user_password,
address => $swift_proxy_address,
}
}
# configurations that need to be applied to all swift nodes
node swift_base {
class { 'ssh::server::install': }
class { 'swift':
# not sure how I want to deal with this shared secret
swift_hash_suffix => 'swift_shared_secret',
package_ensure => latest,
}
}
# The following specifies 3 swift storage nodes
node 'swift_storage_1' inherits swift_base {
$swift_zone = 1
include role_swift_storage
}
node 'swift_storage_2' inherits swift_base {
$swift_zone = 2
include role_swift_storage
}
node 'swift_storage_3' inherits swift_base {
$swift_zone = 3
include role_swift_storage
}
#
# The example below is used to model swift storage nodes that
# manage 2 endpoints.
#
# The endpoints are actually just loopback devices. For real deployments
# they would need to be replaced with something that create and mounts xfs
# partitions
#
class role_swift_storage {
# create xfs partitions on a loopback device and mount them
swift::storage::loopback { ['1', '2']:
base_dir => '/srv/loopback-device',
mnt_base_dir => '/srv/node',
require => Class['swift'],
}
# install all swift storage servers together
class { 'swift::storage::all':
storage_local_net_ip => $swift_local_net_ip,
}
# specify endpoints per device to be added to the ring specification
@@ring_object_device { "${swift_local_net_ip}:6000/1":
zone => $swift_zone,
weight => 1,
}
@@ring_object_device { "${swift_local_net_ip}:6000/2":
zone => $swift_zone,
weight => 1,
}
@@ring_container_device { "${swift_local_net_ip}:6001/1":
zone => $swift_zone,
weight => 1,
}
@@ring_container_device { "${swift_local_net_ip}:6001/2":
zone => $swift_zone,
weight => 1,
}
# TODO should device be changed to volume
@@ring_account_device { "${swift_local_net_ip}:6002/1":
zone => $swift_zone,
weight => 1,
}
@@ring_account_device { "${swift_local_net_ip}:6002/2":
zone => $swift_zone,
weight => 1,
}
# collect resources for synchronizing the ring databases
Swift::Ringsync<<||>>
}
node /swift_proxy/ inherits swift_base {
# curl is only required so that I can run tests
package { 'curl': ensure => present }
class { 'memcached':
listen_ip => '127.0.0.1',
}
# specify swift proxy and all of its middlewares
class { 'swift::proxy':
proxy_local_net_ip => $swift_local_net_ip,
pipeline => [
'catch_errors',
'healthcheck',
'cache',
'ratelimit',
'swift3',
's3token',
'authtoken',
'keystone',
'proxy-server'
],
account_autocreate => true,
# TODO where is the ringbuilder class?
require => Class['swift::ringbuilder'],
}
# configure all of the middlewares
class { [
'swift::proxy::catch_errors',
'swift::proxy::healthcheck',
'swift::proxy::cache',
'swift::proxy::swift3',
]: }
class { 'swift::proxy::ratelimit':
clock_accuracy => 1000,
max_sleep_time_seconds => 60,
log_sleep_time_seconds => 0,
rate_buffer_seconds => 5,
account_ratelimit => 0
}
class { 'swift::proxy::s3token':
# assume that the controller host is the swift api server
auth_host => $controller_node_public,
auth_port => '35357',
}
class { 'swift::proxy::keystone':
operator_roles => ['admin', 'SwiftOperator'],
}
class { 'swift::proxy::authtoken':
admin_user => 'swift',
admin_tenant_name => 'services',
admin_password => $swift_user_password,
# assume that the controller host is the swift api server
auth_host => $controller_node_public,
}
# collect all of the resources that are needed
# to balance the ring
Ring_object_device <<| |>>
Ring_container_device <<| |>>
Ring_account_device <<| |>>
# create the ring
class { 'swift::ringbuilder':
# the part power should be determined by assuming 100 partitions per drive
part_power => '18',
replicas => '3',
min_part_hours => 1,
require => Class['swift'],
}
# sets up an rsync db that can be used to sync the ring DB
class { 'swift::ringserver':
local_net_ip => $swift_local_net_ip,
}
# exports rsync gets that can be used to sync the ring files
@@swift::ringsync { ['account', 'object', 'container']:
ring_server => $swift_local_net_ip
}
# deploy a script that can be used for testing
file { '/tmp/swift_keystone_test.rb':
source => 'puppet:///modules/swift/swift_keystone_test.rb'
}
}