Dan Bode 56cb7b955d refactor of keystone examples
This commit refactors the keystone examples file so
that it is easier to consume from the
openstack_dev_env for testing.
    
Instead of keeping swift examples in this top level
testing project:
    
  https://github.com/puppetlabs/puppetlabs-openstack_dev_env
    
instead, it imports the example manifest from this
modules:

  # manifests/site.pp
  import '/etc/puppet/modules-0/swift/examples/site.pp'
    
This is intended to solve two problems:
1. previously, the example code was ported to
this project, this caused the example code that
ships with each module to get out of date (since
testing is only performed on the dev_env)
2. prevents me from having to maintain multiple
examples.
    
There are a few things that had to be refactored for
this to work:
        
1. example needed to be updated to use hiera
    
so that the dev_env can override variables and
control the config of swift through the dev env.

2. keystone role changed to swift-keystone

example needed to be updated
2013-01-11 15:25:47 -08:00

264 lines
7.2 KiB
Puppet

#
# Example file for building out a multi-node environment
#
# This example creates nodes of the following roles:
# swift_storage - nodes that host storage servers
# swift_proxy - nodes that serve as a swift proxy
# swift_ringbuilder - nodes that are responsible for
# rebalancing the rings
#
# This example assumes a few things:
# * the multi-node scenario requires a puppetmaster
# * it assumes that networking is correctly configured
#
# These nodes need to be brought up in a certain order
#
# 1. storage nodes
# 2. ringbuilder
# 3. run the storage nodes again (to synchronize the ring db)
# 4. run the proxy
# 5. test that everything works!!
# this site manifest serves as an example of how to
# deploy various swift environments
#$swift_admin_password = 'admin_password'
$swift_admin_password = hiera('admin_password', 'admin_password')
# swift specific configurations
#$swift_shared_secret = 'changeme'
$swift_shared_secret = hiera('swift_shared_secret', 'changeme')
#$swift_local_net_ip = $ipaddress_eth0
$swift_local_net_ip = hiera('swift_local_net_ip', $ipaddress_eth0)
#$swift_keystone_node = '172.16.0.21'
$swift_keystone_node = hiera('swift_keystone_node', '172.16.0.25')
#$swift_proxy_node = '172.168.0.25'
$swift_proxy_node = hiera('swift_proxy_node', '172.16.0.21')
$swift_zone = hiera('swift_zone', 1)
# configurations that need to be applied to all swift nodes
$swift_keystone_db_password = hiera('keystone_db_password', 'keystone_db_password')
$keystone_admin_token = hiera('admin_token', 'service_token')
$swift_keystone_admin_email = hiera('admin_email', 'keystone@localhost')
$swift_keystone_admin_password = hiera('admin_password', 'ChangeMe')
$swift_verbose = hiera('verbose', 'True')
# This node can be used to deploy a keystone service.
# This service only contains the credentials for authenticating
# swift
node swift-keystone {
# set up mysql server
class { 'mysql::server':
config_hash => {
# the priv grant fails on precise if I set a root password
# TODO I should make sure that this works
# 'root_password' => $mysql_root_password,
'bind_address' => '0.0.0.0'
}
}
keystone_config {
'DEFAULT/log_config': ensure => absent,
}
# set up all openstack databases, users, grants
class { 'keystone::db::mysql':
password => $swift_keystone_db_password,
}
class { '::keystone':
verbose => $verbose,
debug => $verbose,
catalog_type => 'sql',
admin_token => $admin_token,
enabled => $enabled,
sql_connection => "mysql://keystone_admin:${swift_keystone_db_password}@127.0.0.1/keystone",
}
# Setup the Keystone Identity Endpoint
class { 'keystone::endpoint': }
# set up keystone admin users
class { 'keystone::roles::admin':
email => $swift_keystone_admin_email,
password => $swift_keystone_admin_password,
}
# configure the keystone service user and endpoint
class { 'swift::keystone::auth':
password => $swift_admin_password,
address => $swift_proxy_node,
}
}
node swift_base {
class { 'ssh::server::install': }
class { 'swift':
# not sure how I want to deal with this shared secret
swift_hash_suffix => $swift_shared_secret,
package_ensure => latest,
}
}
#
# The example below is used to model swift storage nodes that
# manage 2 endpoints.
#
# The endpoints are actually just loopback devices. For real deployments
# they would need to be replaced with something that create and mounts xfs
# partitions
#
node /swift-storage/ inherits swift_base {
# create xfs partitions on a loopback device and mount them
swift::storage::loopback { ['1', '2']:
base_dir => '/srv/loopback-device',
mnt_base_dir => '/srv/node',
require => Class['swift'],
}
# install all swift storage servers together
class { 'swift::storage::all':
storage_local_net_ip => $swift_local_net_ip,
}
# specify endpoints per device to be added to the ring specification
@@ring_object_device { "${swift_local_net_ip}:6000/1":
zone => $swift_zone,
weight => 1,
}
@@ring_object_device { "${swift_local_net_ip}:6000/2":
zone => $swift_zone,
weight => 1,
}
@@ring_container_device { "${swift_local_net_ip}:6001/1":
zone => $swift_zone,
weight => 1,
}
@@ring_container_device { "${swift_local_net_ip}:6001/2":
zone => $swift_zone,
weight => 1,
}
# TODO should device be changed to volume
@@ring_account_device { "${swift_local_net_ip}:6002/1":
zone => $swift_zone,
weight => 1,
}
@@ring_account_device { "${swift_local_net_ip}:6002/2":
zone => $swift_zone,
weight => 1,
}
# collect resources for synchronizing the ring databases
Swift::Ringsync<<||>>
}
node /swift-proxy/ inherits swift_base {
# curl is only required so that I can run tests
package { 'curl': ensure => present }
class { 'memcached':
listen_ip => '127.0.0.1',
}
# specify swift proxy and all of its middlewares
class { 'swift::proxy':
proxy_local_net_ip => $swift_local_net_ip,
pipeline => [
'catch_errors',
'healthcheck',
'cache',
'ratelimit',
'swift3',
's3token',
'authtoken',
'keystone',
'proxy-server'
],
account_autocreate => true,
# TODO where is the ringbuilder class?
require => Class['swift::ringbuilder'],
}
# configure all of the middlewares
class { [
'swift::proxy::catch_errors',
'swift::proxy::healthcheck',
'swift::proxy::cache',
'swift::proxy::swift3',
]: }
class { 'swift::proxy::ratelimit':
clock_accuracy => 1000,
max_sleep_time_seconds => 60,
log_sleep_time_seconds => 0,
rate_buffer_seconds => 5,
account_ratelimit => 0
}
class { 'swift::proxy::s3token':
# assume that the controller host is the swift api server
auth_host => $swift_keystone_node,
auth_port => '35357',
}
class { 'swift::proxy::keystone':
operator_roles => ['admin', 'SwiftOperator'],
}
class { 'swift::proxy::authtoken':
admin_user => 'swift',
admin_tenant_name => 'services',
admin_password => $swift_admin_password,
# assume that the controller host is the swift api server
auth_host => $swift_keystone_node,
}
# collect all of the resources that are needed
# to balance the ring
Ring_object_device <<| |>>
Ring_container_device <<| |>>
Ring_account_device <<| |>>
# create the ring
class { 'swift::ringbuilder':
# the part power should be determined by assuming 100 partitions per drive
part_power => '18',
replicas => '3',
min_part_hours => 1,
require => Class['swift'],
}
# sets up an rsync db that can be used to sync the ring DB
class { 'swift::ringserver':
local_net_ip => $swift_local_net_ip,
}
# exports rsync gets that can be used to sync the ring files
@@swift::ringsync { ['account', 'object', 'container']:
ring_server => $swift_local_net_ip
}
# deploy a script that can be used for testing
class { 'swift::test_file':
auth_server => $swift_keystone_node,
password => $swift_keystone_admin_password,
}
}