Add pacemaker provider for HA services

* Add pacemaker service provider and unit tests
* Add generic OCF handler script for RA scripts
* Add service class and docs for pacemaker primitives creation with
  default OCF type = pacemaker
* Add specs for pacemaker service define

Implements step 1 of blueprint pacemaker-provider-for-openstack

Change-Id: I5d98d8f9494bb7df4466022b3d49ac6392deb1a5
Co-Author: Dmitry Ilyin (idv1985 <dilyin@mirantis.com>)
Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
This commit is contained in:
Bogdan Dobrelya 2014-11-24 10:25:54 +01:00
parent 0fc013377b
commit d5b4999b8c
17 changed files with 2298 additions and 2 deletions

View File

@ -1,5 +1,6 @@
fixtures:
repositories:
'corosync': 'https://github.com/puppetlabs/puppetlabs-corosync'
'apt' : 'git://github.com/puppetlabs/puppetlabs-apt'
'stdlib' : 'git://github.com/puppetlabs/puppetlabs-stdlib'
symlinks:

1
.gitignore vendored
View File

@ -2,3 +2,4 @@ spec/fixtures/
pkg
Gemfile.lock
*.swp
.idea

View File

@ -0,0 +1,709 @@
require 'rexml/document'
class Puppet::Provider::Pacemaker_common < Puppet::Provider
@raw_cib = nil
@cib = nil
@primitives = nil
@primitives_structure = nil
RETRY_COUNT = 100
RETRY_STEP = 6
# get a raw CIB from cibadmin
# or from a debug file if raw_cib_file is set
# @return [String] cib xml
def raw_cib
@raw_cib = cibadmin '-Q'
if @raw_cib == '' or not @raw_cib
fail 'Could not dump CIB XML using "cibadmin -Q" command!'
end
@raw_cib
end
# create a new REXML CIB document
# @return [REXML::Document] at '/'
def cib
return @cib if @cib
@cib = REXML::Document.new(raw_cib)
end
# reset all saved variables to obtain new data
def cib_reset
# Puppet.debug 'Reset CIB memoization'
@raw_cib = nil
@cib = nil
@primitives = nil
@primitives_structure = nil
@nodes_structure = nil
end
# get status CIB section
# @return [REXML::Element] at /cib/status
def cib_section_status
REXML::XPath.match cib, '/cib/status'
end
# get lrm_rsc_ops section from lrm_resource section CIB section
# @param lrm_resource [REXML::Element]
# at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource[@id="resource-name"]/lrm_rsc_op
# @return [REXML::Element]
def cib_section_lrm_rsc_ops(lrm_resource)
REXML::XPath.match lrm_resource, 'lrm_rsc_op'
end
# get node_state CIB section
# @return [REXML::Element] at /cib/status/node_state
def cib_section_nodes_state
REXML::XPath.match cib_section_status, 'node_state'
end
# get primitives CIB section
# @return [REXML::Element] at /cib/configuration/resources/primitive
def cib_section_primitives
REXML::XPath.match cib, '//primitive'
end
# get lrm_rsc_ops section from lrm_resource section CIB section
# @param lrm [REXML::Element]
# at /cib/status/node_state/lrm[@id="node-name"]/lrm_resources/lrm_resource
# @return [REXML::Element]
def cib_section_lrm_resources(lrm)
REXML::XPath.match lrm, 'lrm_resources/lrm_resource'
end
# determine the status of a single operation
# @param op [Hash<String => String>]
# @return ['start','stop','master',nil]
def operation_status(op)
# skip incomplete ops
return unless op['op-status'] == '0'
if op['operation'] == 'monitor'
# for monitor operation status is determined by its rc-code
# 0 - start, 8 - master, 7 - stop, else - error
case op['rc-code']
when '0'
'start'
when '7'
'stop'
when '8'
'master'
else
# not entirely correct but count failed monitor as 'stop'
'stop'
end
elsif %w(start stop promote).include? op['operation']
# for start/stop/promote status is set if op was successful
# use master instead of promote
return unless %w(0 7 8).include? op['rc-code']
if op['operation'] == 'promote'
'master'
else
op['operation']
end
else
# other operations are irrelevant
nil
end
end
# determine resource status by parsing last operations
# @param ops [Array<Hash>]
# @return ['start','stop','master',nil]
# nil means that status is unknown
def determine_primitive_status(ops)
status = nil
ops.each do |op|
op_status = operation_status op
status = op_status if op_status
end
status
end
# check if operations have same failed operations
# that should be cleaned up later
# @param ops [Array<Hash>]
# @return [TrueClass,FalseClass]
def failed_operations_found?(ops)
ops.each do |op|
# skip incompleate ops
next unless op['op-status'] == '0'
# skip useless ops
next unless %w(start stop monitor promote).include? op['operation']
# are there failed start, stop
if %w(start stop promote).include? op['operation']
return true if op['rc-code'] != '0'
end
# are there failed monitors
if op['operation'] == 'monitor'
return true unless %w(0 7 8).include? op['rc-code']
end
end
false
end
# convert elements's attributes to hash
# @param element [REXML::Element]
# @return [Hash<String => String>]
def attributes_to_hash(element)
hash = {}
element.attributes.each do |a, v|
hash.store a.to_s, v.to_s
end
hash
end
# convert element's children to hash
# of their attributes using key and hash key
# @param element [REXML::Element]
# @param key <String>
# @return [Hash<String => String>]
def elements_to_hash(element, key, tag = nil)
elements = {}
children = element.get_elements tag
return elements unless children
children.each do |child|
child_structure = attributes_to_hash child
name = child_structure[key]
next unless name
elements.store name, child_structure
end
elements
end
# decode lrm_resources section of CIB
# @param lrm_resources [REXML::Element]
# @return [Hash<String => Hash>]
def decode_lrm_resources(lrm_resources)
resources = {}
lrm_resources.each do |lrm_resource|
resource = attributes_to_hash lrm_resource
id = resource['id']
next unless id
lrm_rsc_ops = cib_section_lrm_rsc_ops lrm_resource
ops = decode_lrm_rsc_ops lrm_rsc_ops
resource.store 'ops', ops
resource.store 'status', determine_primitive_status(ops)
resource.store 'failed', failed_operations_found?(ops)
resources.store id, resource
end
resources
end
# decode lrm_rsc_ops section of the resource's CIB
# @param lrm_rsc_ops [REXML::Element]
# @return [Array<Hash>]
def decode_lrm_rsc_ops(lrm_rsc_ops)
ops = []
lrm_rsc_ops.each do |lrm_rsc_op|
op = attributes_to_hash lrm_rsc_op
next unless op['call-id']
ops << op
end
ops.sort { |a,b| a['call-id'].to_i <=> b['call-id'].to_i }
end
# get nodes structure with resources and their statuses
# @return [Hash<String => Hash>]
def nodes
return @nodes_structure if @nodes_structure
@nodes_structure = {}
cib_section_nodes_state.each do |node_state|
node = attributes_to_hash node_state
id = node['id']
next unless id
lrm = node_state.elements['lrm']
lrm_resources = cib_section_lrm_resources lrm
resources = decode_lrm_resources lrm_resources
node.store 'primitives', resources
@nodes_structure.store id, node
end
@nodes_structure
end
# get primitives configuration structure with primitives and their attributes
# @return [Hash<String => Hash>]
def primitives
return @primitives_structure if @primitives_structure
@primitives_structure = {}
cib_section_primitives.each do |primitive|
primitive_structure = {}
id = primitive.attributes['id']
next unless id
primitive_structure.store 'name', id
primitive.attributes.each do |k, v|
primitive_structure.store k.to_s, v
end
if primitive.parent.name and primitive.parent.attributes['id']
parent_structure = {
'id' => primitive.parent.attributes['id'],
'type' => primitive.parent.name
}
primitive_structure.store 'name', parent_structure['id']
primitive_structure.store 'parent', parent_structure
end
instance_attributes = primitive.elements['instance_attributes']
if instance_attributes
instance_attributes_structure = elements_to_hash instance_attributes, 'name', 'nvpair'
primitive_structure.store 'instance_attributes', instance_attributes_structure
end
meta_attributes = primitive.elements['meta_attributes']
if meta_attributes
meta_attributes_structure = elements_to_hash meta_attributes, 'name', 'nvpair'
primitive_structure.store 'meta_attributes', meta_attributes_structure
end
operations = primitive.elements['operations']
if operations
operations_structure = elements_to_hash operations, 'id', 'op'
primitive_structure.store 'operations', operations_structure
end
@primitives_structure.store id, primitive_structure
end
@primitives_structure
end
# check if primitive is clone or multistate
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_complex?(primitive)
return unless primitive_exists? primitive
primitives[primitive].key? 'parent'
end
# check if primitive is clone
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_clone?(primitive)
is_complex = primitive_is_complex? primitive
return is_complex unless is_complex
primitives[primitive]['parent']['type'] == 'clone'
end
# check if primitive is multistate
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
def primitive_is_multistate?(primitive)
is_complex = primitive_is_complex? primitive
return is_complex unless is_complex
primitives[primitive]['parent']['type'] == 'master'
end
# disable this primitive
# @param primitive [String]
def disable_primitive(primitive)
retry_command {
pcs 'resource', 'disable', primitive
}
end
alias :stop_primitive :disable_primitive
# enable this primitive
# @param primitive [String]
def enable_primitive(primitive)
retry_command {
pcs 'resource', 'enable', primitive
}
end
alias :start_primitive :enable_primitive
# ban this primitive
# @param primitive [String]
def ban_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'ban', primitive, node
}
end
# move this primitive
# @param primitive [String]
def move_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'move', primitive, node
}
end
# unban/unmove this primitive
# @param primitive [String]
def unban_primitive(primitive, node = '')
retry_command {
pcs 'resource', 'clear', primitive, node
}
end
alias :clear_primitive :unban_primitive
alias :unmove_primitive :unban_primitive
# cleanup this primitive
# @param primitive [String]
def cleanup_primitive(primitive, node = '')
opts = ['--cleanup', "--resource=#{primitive}"]
opts << "--node=#{node}" if ! node.empty?
retry_command {
crm_resource opts
}
end
# manage this primitive
# @param primitive [String]
def manage_primitive(primitive)
retry_command {
pcs 'resource', 'manage', primitive
}
end
# unamanage this primitive
# @param primitive [String]
def unmanage_primitive(primitive)
retry_command {
pcs 'resource', 'unmanage', primitive
}
end
# set quorum_policy of the cluster
# @param primitive [String]
def no_quorum_policy(primitive)
retry_command {
pcs 'property', 'set', "no-quorum-policy=#{primitive}"
}
end
# set maintenance_mode of the cluster
# @param primitive [TrueClass,FalseClass]
def maintenance_mode(primitive)
retry_command {
pcs 'property', 'set', "maintenance-mode=#{primitive}"
}
end
# add a location constraint
# @param primitive [String] the primitive's name
# @param node [String] the node's name
# @param score [Numeric,String] score value
def constraint_location_add(primitive, node, score = 100)
id = "#{primitive}_on_#{node}"
retry_command {
pcs 'constraint', 'location', 'add', id, primitive, node, score
}
end
# remove a location constraint
# @param primitive [String] the primitive's name
# @param node [String] the node's name
def constraint_location_remove(primitive, node)
id = "#{primitive}_on_#{node}"
retry_command {
pcs 'constraint', 'location', 'remove', id
}
end
# get a status of a primitive on the entire cluster
# of on a node if node name param given
# @param primitive [String]
# @param node [String]
# @return [String]
def primitive_status(primitive, node = nil)
if node
nodes.
fetch(node, {}).
fetch('primitives',{}).
fetch(primitive, {}).
fetch('status', nil)
else
statuses = []
nodes.each do |k,v|
status = v.fetch('primitives',{}).
fetch(primitive, {}).
fetch('status', nil)
statuses << status
end
status_values = {
'stop' => 0,
'start' => 1,
'master' => 2,
}
statuses.max_by do |status|
return unless status
status_values[status]
end
end
end
# generate report of primitive statuses by node
# mostly for debugging
# @return [Hash]
def primitives_status_by_node
report = {}
return unless nodes.is_a? Hash
nodes.each do |node_name, node_data|
primitives_of_node = node_data['primitives']
next unless primitives_of_node.is_a? Hash
primitives_of_node.each do |primitive, primitive_data|
primitive_status = primitive_data['status']
report[primitive] = {} unless report[primitive].is_a? Hash
report[primitive][node_name] = primitive_status
end
end
report
end
# form a cluster status report for debugging
# @return [String]
def get_cluster_debug_report
report = "\n"
primitives_status_by_node.each do |primitive, data|
primitive_name = primitive
primitive_name = primitives[primitive]['name'] if primitives[primitive]['name']
primitive_type = 'Simple'
primitive_type = 'Cloned' if primitive_is_clone? primitive
primitive_type = 'Multistate' if primitive_is_multistate? primitive
primitive_status = primitive_status primitive
report += "-> #{primitive_type} primitive '#{primitive_name}' global status: #{primitive_status}"
report += ' (UNMANAGE)' unless primitive_is_managed? primitive
report += "\n"
report += ' ' if data.any?
nodes = []
data.keys.sort.each do |node_name|
node_status = data.fetch node_name
node_block = "#{node_name}: #{node_status}"
node_block += ' (FAIL)' if primitive_has_failures? primitive, node_name
nodes << node_block
end
report += nodes.join ' | '
report += "\n"
end
report
end
# does this primitive have failed operations?
# @param primitive [String] primitive name
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_has_failures?(primitive, node = nil)
return unless primitive_exists? primitive
if node
nodes.
fetch(node, {}).
fetch('primitives',{}).
fetch(primitive, {}).
fetch('failed', nil)
else
nodes.each do |k,v|
failed = v.fetch('primitives',{}).
fetch(primitive, {}).
fetch('failed', nil)
return true if failed
end
false
end
end
# determine if a primitive is running on the entire cluster
# of on a node if node name param given
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_is_running?(primitive, node = nil)
return unless primitive_exists? primitive
status = primitive_status primitive, node
return status unless status
%w(start master).include? status
end
# check if primitive is running as a master
# either anywhere or on the give node
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [TrueClass,FalseClass]
def primitive_has_master_running?(primitive, node = nil)
is_multistate = primitive_is_multistate? primitive
return is_multistate unless is_multistate
status = primitive_status primitive, node
return status unless status
status == 'master'
end
# return service status value expected by Puppet
# puppet wants :running or :stopped symbol
# @param primitive [String] primitive id
# @param node [String] on this node if given
# @return [:running,:stopped]
def get_primitive_puppet_status(primitive, node = nil)
if primitive_is_running? primitive, node
:running
else
:stopped
end
end
# return service enabled status value expected by Puppet
# puppet wants :true or :false symbols
# @param primitive [String]
# @return [:true,:false]
def get_primitive_puppet_enable(primitive)
if primitive_is_managed? primitive
:true
else
:false
end
end
# check if primitive exists in the confiuguration
# @param primitive primitive id or name
def primitive_exists?(primitive)
primitives.key? primitive
end
# determine if primitive is managed
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
# TODO: will not work correctly if cluster is in management mode
def primitive_is_managed?(primitive)
return unless primitive_exists? primitive
is_managed = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('is-managed', {}).fetch('value', 'true')
is_managed == 'true'
end
# determine if primitive has target-state started
# @param primitive [String] primitive id
# @return [TrueClass,FalseClass]
# TODO: will not work correctly if target state is set globally to stopped
def primitive_is_started?(primitive)
return unless primitive_exists? primitive
target_role = primitives.fetch(primitive).fetch('meta_attributes', {}).fetch('target-role', {}).fetch('value', 'Started')
target_role == 'Started'
end
# check if pacemaker is online
# and we can work with it
# @return [TrueClass,FalseClass]
def is_online?
begin
cibadmin '-Q'
true
rescue Puppet::ExecutionFailure
false
else
true
end
end
# retry the given command until it runs without errors
# or for RETRY_COUNT times with RETRY_STEP sec step
# print cluster status report on fail
# returns normal command output on success
# @return [String]
def retry_command
(0..RETRY_COUNT).each do
begin
out = yield
rescue Puppet::ExecutionFailure => e
Puppet.debug "Command failed: #{e.message}"
sleep RETRY_STEP
else
return out
end
end
Puppet.debug get_cluster_debug_report if is_online?
fail "Execution timeout after #{RETRY_COUNT * RETRY_STEP} seconds!"
end
# retry the given block until it returns true
# or for RETRY_COUNT times with RETRY_STEP sec step
# print cluster status report on fail
def retry_block_until_true
(0..RETRY_COUNT).each do
return if yield
sleep RETRY_STEP
end
Puppet.debug get_cluster_debug_report if is_online?
fail "Execution timeout after #{RETRY_COUNT * RETRY_STEP} seconds!"
end
# wait for pacemaker to become online
def wait_for_online
Puppet.debug "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for Pacemaker to become online"
retry_block_until_true do
is_online?
end
Puppet.debug 'Pacemaker is online'
end
# cleanup a primitive and then wait until
# we can get it's status again because
# cleanup blocks operations sections for a while
# @param primitive [String] primitive name
def cleanup_with_wait(primitive, node = '')
node_msgpart = node.empty? ? '' : " on node '#{node}'"
Puppet.debug "Cleanup primitive '#{primitive}'#{node_msgpart} and wait until cleanup finishes"
cleanup_primitive(primitive, node)
retry_block_until_true do
cib_reset
primitive_status(primitive) != nil
end
Puppet.debug "Primitive '#{primitive}' have been cleaned up#{node_msgpart} and is online again"
end
# wait for primitive to start
# if node is given then start on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_start(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to start"
message += " on node '#{node}'" if node
Puppet.debug message
retry_block_until_true do
cib_reset
primitive_is_running? primitive, node
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' have started"
message += " on node '#{node}'" if node
Puppet.debug message
end
# wait for primitive to start as a master
# if node is given then start as a master on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_master(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to start master"
message += " on node '#{node}'" if node
Puppet.debug message
retry_block_until_true do
cib_reset
primitive_has_master_running? primitive, node
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' have started master"
message += " on node '#{node}'" if node
Puppet.debug message
end
# wait for primitive to stop
# if node is given then start on this node
# @param primitive [String] primitive id
# @param node [String] on this node if given
def wait_for_stop(primitive, node = nil)
message = "Waiting #{RETRY_COUNT * RETRY_STEP} seconds for service '#{primitive}' to stop"
message += " on node '#{node}'" if node
Puppet.debug message
retry_block_until_true do
cib_reset
result = primitive_is_running? primitive, node
result.is_a? FalseClass
end
Puppet.debug get_cluster_debug_report
message = "Service '#{primitive}' was stopped"
message += " on node '#{node}'" if node
Puppet.debug message
end
end

View File

@ -0,0 +1,199 @@
require File.join File.dirname(__FILE__), '../pacemaker_common.rb'
Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Pacemaker_common do
has_feature :enableable
has_feature :refreshable
commands :uname => 'uname'
commands :pcs => 'pcs'
commands :crm_resource => 'crm_resource'
commands :cibadmin => 'cibadmin'
# hostname of the current node
# @return [String]
def hostname
return @hostname if @hostname
@hostname = (uname '-n').chomp.strip
end
# original name passed from the type
# @return [String]
def title
@resource[:name]
end
# primitive name with 'p_' added if needed
# @return [String]
def name
return @name if @name
primitive_name = title
if primitive_exists? primitive_name
Puppet.debug "Primitive with title '#{primitive_name}' was found in CIB"
@name = primitive_name
return @name
end
primitive_name = "p_#{primitive_name}"
if primitive_exists? primitive_name
Puppet.debug "Using '#{primitive_name}' name instead of '#{title}'"
@name = primitive_name
return @name
end
fail "Primitive '#{title}' was not found in CIB!"
end
# full name of the primitive
# if resource is complex use group name
# @return [String]
def full_name
return @full_name if @full_name
if primitive_is_complex? name
full_name = primitives[name]['name']
Puppet.debug "Using full name '#{full_name}' for complex primitive '#{name}'"
@full_name = full_name
else
@full_name = name
end
end
# name of the basic service without 'p_' prefix
# used to disable the basic service
# @return [String]
def basic_service_name
return @basic_service_name if @basic_service_name
if name.start_with? 'p_'
basic_service_name = name.gsub /^p_/, ''
Puppet.debug "Using '#{basic_service_name}' as the basic service name for primitive '#{name}'"
@basic_service_name = basic_service_name
else
@basic_service_name = name
end
end
# called by Puppet to determine if the service
# is running on the local node
# @return [:running,:stopped]
def status
wait_for_online
Puppet.debug "Call: 'status' for Pacemaker service '#{name}' on node '#{hostname}'"
cib_reset
out = get_primitive_puppet_status name, hostname
Puppet.debug get_cluster_debug_report
Puppet.debug "Return: '#{out}' (#{out.class})"
out
end
# called by Puppet to start the service
def start
Puppet.debug "Call 'start' for Pacemaker service '#{name}' on node '#{hostname}'"
enable unless primitive_is_managed? name
disable_basic_service
constraint_location_add name, hostname
unban_primitive name, hostname
start_primitive name
cleanup_with_wait(name, hostname) if primitive_has_failures?(name, hostname)
if primitive_is_multistate? name
Puppet.debug "Choose master start for Pacemaker service '#{name}'"
wait_for_master name
else
Puppet.debug "Choose global start for Pacemaker service '#{name}'"
wait_for_start name
end
end
# called by Puppet to stop the service
def stop
Puppet.debug "Call 'stop' for Pacemaker service '#{name}' on node '#{hostname}'"
enable unless primitive_is_managed? name
cleanup_with_wait(name, hostname) if primitive_has_failures?(name, hostname)
if primitive_is_complex? name
Puppet.debug "Choose local stop for Pacemaker service '#{name}' on node '#{hostname}'"
ban_primitive name, hostname
wait_for_stop name, hostname
else
Puppet.debug "Choose global stop for Pacemaker service '#{name}'"
stop_primitive name
wait_for_stop name
end
end
# called by Puppet to restart the service
def restart
Puppet.debug "Call 'restart' for Pacemaker service '#{name}' on node '#{hostname}'"
unless primitive_is_running? name, hostname
Puppet.info "Pacemaker service '#{name}' is not running on node '#{hostname}'. Skipping restart!"
return
end
begin
stop
rescue
nil
ensure
start
end
end
# called by Puppet to enable the service
def enable
Puppet.debug "Call 'enable' for Pacemaker service '#{name}' on node '#{hostname}'"
manage_primitive name
end
# called by Puppet to disable the service
def disable
Puppet.debug "Call 'disable' for Pacemaker service '#{name}' on node '#{hostname}'"
unmanage_primitive name
end
alias :manual_start :disable
# called by Puppet to determine if the service is enabled
# @return [:true,:false]
def enabled?
Puppet.debug "Call 'enabled?' for Pacemaker service '#{name}' on node '#{hostname}'"
out = get_primitive_puppet_enable name
Puppet.debug "Return: '#{out}' (#{out.class})"
out
end
# create an extra provider instance to deal with the basic service
# the provider will be chosen to match the current system
# @return [Puppet::Type::Service::Provider]
def extra_provider(provider_name = nil)
return @extra_provider if @extra_provider
begin
param_hash = {}
param_hash.store :name, basic_service_name
param_hash.store :provider, provider_name if provider_name
type = Puppet::Type::Service.new param_hash
@extra_provider = type.provider
rescue => e
Puppet.warning "Could not get extra provider for Pacemaker primitive '#{name}': #{e.message}"
@extra_provider = nil
end
end
# disable and stop the basic service
def disable_basic_service
return unless extra_provider
begin
if extra_provider.enableable? and extra_provider.enabled? == :true
Puppet.info "Disable basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'"
extra_provider.disable
else
Puppet.info "Basic service '#{extra_provider.name}' is disabled as reported by '#{extra_provider.class.name}' provider"
end
if extra_provider.status == :running
Puppet.info "Stop basic service '#{extra_provider.name}' using provider '#{extra_provider.class.name}'"
extra_provider.stop
else
Puppet.info "Basic service '#{extra_provider.name}' is stopped as reported by '#{extra_provider.class.name}' provider"
end
rescue => e
Puppet.warning "Could not disable basic service for Pacemaker primitive '#{name}' using '#{extra_provider.class.name}' provider: #{e.message}"
end
end
end

View File

@ -0,0 +1,192 @@
# == Class: openstack_extras::pacemaker::service
#
# Configures Pacemaker resource for a specified service and
# overrides its service provider to Pacemaker.
# Assumes there is a service already exists in the Puppet catalog.
# For example, the one, such as nova-api, heat-engine, neutron-agent-l3
# and so on, created by other core Puppet modules for Openstack.
#
# === Parameters
#
# [*ensure*]
# (optional) The state of the service provided by Pacemaker
# Defaults to present
#
# [*ocf_root_path*]
# (optional) The path for OCF scripts
# Defaults to /usr/lib/ocf
#
# [*primitive_class*]
# (optional) The class of Pacemaker resource (primitive)
# Defaults to ocf
#
# [*primitive_provider*]
# (optional) The provider of OCF scripts
# Defaults to pacemaker
#
# [*primitive_type*]
# (optional) The type of the primitive (OCF file name).
# Used with the other parameters as a full path to OCF script:
# primitive_class/primitive_provider/primitive_type
# resided at ocf_root_path/resource.d
# Defaults to false
#
# [*parameters*]
# (optional) The hash of parameters for a primitive
# Defaults to false
#
# [*operations*]
# (optional) The hash of operations for a primitive
# Defaults to false
#
# [*metadata*]
# (optional) The hash of metadata for a primitive
# Defaults to false
#
# [*ms_metadata*]
# (optional) The hash of ms_metadata for a primitive
# Defaults to false
#
# [*use_handler*]
# (optional) The handler (wrapper script) for OCF script
# Could be useful for debug and informational purposes.
# It sets some default values like OCF_ROOT in order to
# simplify debugging of OCF scripts
# Defaults to true
#
# [*handler_root_path*]
# (optional) The path for a handler script
# Defaults to /usr/local/bin
#
# [*ocf_script_template*]
# (optional) ERB template for OCF script for Pacemaker
# resource
# Defaults to false
#
# [*ocf_script_file*]
# (optional) OCF file for Pacemaker resource
# Defaults to false
#
# [*create_primitive*]
# (optional) Controls Pacemaker primitive creation
# Defaults to true
#
# === Examples
#
# Will create resource and ensure Pacemaker provider for
# 'some-api-service' with the given OCF scripte template and
# parameters:
#
# $metadata = {
# 'resource-stickiness' => '1'
# }
# $operations = {
# 'monitor' => {
# 'interval' => '20',
# 'timeout' => '30',
# },
# 'start' => {
# 'timeout' => '60',
# },
# 'stop' => {
# 'timeout' => '60',
# },
# }
# $ms_metadata = {
# 'interleave' => true,
# }
#
# openstack_extras::pacemaker::service { 'some-api-service' :
# primitive_type => 'some-api-service',
# metadata => $metadata,
# ms_metadata => $ms_metadata,
# operations => $operations,
# ocf_script_template => 'some_module/some_api_service.ocf.erb',
# }
#
define openstack_extras::pacemaker::service (
$ensure = 'present',
$ocf_root_path = '/usr/lib/ocf',
$primitive_class = 'ocf',
$primitive_provider = 'pacemaker',
$primitive_type = false,
$parameters = false,
$operations = false,
$metadata = false,
$ms_metadata = false,
$use_handler = true,
$handler_root_path = '/usr/local/bin',
$ocf_script_template = false,
$ocf_script_file = false,
$create_primitive = true,
) {
$service_name = $title
$primitive_name = "p_${service_name}"
$ocf_script_name = "${service_name}-ocf-file"
$ocf_handler_name = "ocf_handler_${service_name}"
$ocf_dir_path = "${ocf_root_path}/resource.d"
$ocf_script_path = "${ocf_dir_path}/${primitive_provider}/${$primitive_type}"
$ocf_handler_path = "${handler_root_path}/${ocf_handler_name}"
Service<| title == $service_name |> {
provider => 'pacemaker',
}
Service<| name == $service_name |> {
provider => 'pacemaker',
}
if $create_primitive {
cs_primitive { $primitive_name :
ensure => $ensure,
primitive_class => $primitive_class,
primitive_type => $primitive_type,
provided_by => $primitive_provider,
parameters => $parameters,
operations => $operations,
metadata => $metadata,
ms_metadata => $ms_metadata,
}
}
if $ocf_script_template or $ocf_script_file {
file { $ocf_script_name :
ensure => $ensure,
path => $ocf_script_path,
mode => '0755',
owner => 'root',
group => 'root',
}
if $ocf_script_template {
File[$ocf_script_name] {
content => template($ocf_script_template),
}
} elsif $ocf_script_file {
File[$ocf_script_name] {
source => "puppet:///modules/${ocf_script_file}",
}
}
}
if ($primitive_class == 'ocf') and ($use_handler) {
file { $ocf_handler_name :
ensure => present,
path => $ocf_handler_path,
owner => 'root',
group => 'root',
mode => '0700',
content => template('openstack_extras/ocf_handler.erb'),
}
}
File<| title == $ocf_script_name |> ->
Cs_primitive<| title == $primitive_name |>
File<| title == $ocf_script_name |> ~> Service[$service_name]
Cs_primitive<| title == $primitive_name |> -> Service[$service_name]
File<| title == $ocf_handler_name |> -> Service[$service_name]
}

View File

@ -31,5 +31,6 @@
],
"description": "Puppet module to add useful utilities for OpenStack deployments",
"dependencies": [
{ "name": "puppetlabs/corosync", "version_requirement": ">=0.1.0" }
]
}

View File

@ -0,0 +1,139 @@
require 'spec_helper'
describe 'openstack_extras::pacemaker::service', :type => :define do
let :pre_condition do
"class { 'foo': }"
end
let (:title) { 'foo-api' }
let :default_params do
{
:ensure => 'present',
:ocf_root_path => '/usr/lib/ocf',
:primitive_class => 'ocf',
:primitive_provider => 'pacemaker',
:primitive_type => false,
:parameters => false,
:operations => false,
:metadata => false,
:ms_metadata => false,
:use_handler => true,
:handler_root_path => '/usr/local/bin',
:ocf_script_template => false,
:ocf_script_file => false,
:create_primitive => true
}
end
context 'with defaults' do
it 'should contain openstack_extras::pacemaker::service definition' do
should contain_openstack_extras__pacemaker__service(title).with(default_params)
end
it 'should override existing service provider' do
should contain_service('foo-api').with(
{
:provider => 'pacemaker'
})
end
it 'should create a pacemaker primitive' do
should contain_cs_primitive('p_foo-api').with(
{
'ensure' => default_params[:ensure],
'primitive_class' => default_params[:primitive_class],
'primitive_type' => default_params[:primitive_type],
'provided_by' => default_params[:primitive_provider],
'parameters' => default_params[:parameters],
'operations' => default_params[:operations],
'metadata' => default_params[:metadata],
'ms_metadata' => default_params[:ms_metadata],
})
end
end
context 'with custom OCF file' do
let :params do
default_params.merge(
{
:ocf_script_file => 'foo/scripts/foo.ocf'
}
)
end
let (:ocf_dir_path) { "#{params[:ocf_root_path]}/resource.d" }
let (:ocf_script_path) { "#{ocf_dir_path}/#{params[:primitive_provider]}/#{params[:primitive_type]}" }
let (:ocf_handler_name) { "ocf_handler_#{title}" }
let (:ocf_handler_path) { "#{params[:handler_root_path]}/#{ocf_handler_name}" }
it 'should create an OCF file' do
should contain_file("#{title}-ocf-file").with(
{
'ensure' => 'present',
'path' => ocf_script_path,
'mode' => '0755',
'owner' => 'root',
'group' => 'root',
'source' => "puppet:///modules/#{params[:ocf_script_file]}"
})
end
it 'should create a handler file' do
should contain_file("#{ocf_handler_name}").with(
{
'ensure' => 'present',
'path' => ocf_handler_path,
'owner' => 'root',
'group' => 'root',
'mode' => '0700',
}).with_content(/OCF_ROOT/)
end
end
context 'with custom OCF path, provider, erb and w/o a wrapper' do
let(:params) do
default_params.merge(
{
:ocf_script_template => 'foo/foo.ocf.erb',
:use_handler => false,
:primitive_provider => 'some_provider',
:ocf_root_path => '/usr/lib/some_path',
})
end
let (:ocf_dir_path) { "#{params[:ocf_root_path]}/resource.d" }
let (:ocf_script_path) {
"#{ocf_dir_path}/#{params[:primitive_provider]}/#{params[:primitive_type]}"
}
it 'should create an OCF file from template' do
should contain_file("#{title}-ocf-file").with(
{
'path' => ocf_script_path,
'mode' => '0755',
'owner' => 'root',
'group' => 'root'
}).with_content(/erb/)
end
it 'should not create a handler file' do
should_not contain_file("#{params[:ocf_handler_name]}")
end
it 'should create a pacemaker primitive' do
should contain_cs_primitive('p_foo-api').with(
{
'ensure' => params[:ensure],
'primitive_class' => params[:primitive_class],
'primitive_type' => params[:primitive_type],
'provided_by' => params[:primitive_provider],
'parameters' => params[:parameters],
'operations' => params[:operations],
'metadata' => params[:metadata],
'ms_metadata' => params[:ms_metadata],
})
end
end
end

1
spec/fixtures/manifests/site.pp vendored Normal file
View File

@ -0,0 +1 @@

View File

View File

@ -0,0 +1,3 @@
class foo () {
service { 'foo-api': }
}

View File

@ -0,0 +1 @@
erb

View File

@ -1 +0,0 @@
require 'spec_helper'

View File

@ -1 +1 @@
require 'puppetlabs_spec_helper/module_spec_helper'
require 'puppetlabs_spec_helper/module_spec_helper'

View File

@ -0,0 +1,483 @@
<cib epoch="622" num_updates="11" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.7" have-quorum="1" dc-uuid="node-1" cib-last-written="Wed Nov 5 10:54:20 2014" update-origin="node-2" update-client="cibadmin">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.10-42f2063"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="classic openais (with plugin)"/>
<nvpair id="cib-bootstrap-options-expected-quorum-votes" name="expected-quorum-votes" value="3"/>
<nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
<nvpair id="cib-bootstrap-options-start-failure-is-fatal" name="start-failure-is-fatal" value="false"/>
<nvpair id="cib-bootstrap-options-symmetric-cluster" name="symmetric-cluster" value="false"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1415124915"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="node-1" uname="node-1">
<instance_attributes id="nodes-node-1">
<nvpair id="nodes-node-1-gtid" name="gtid" value="b65eb4b3-644a-11e4-afd3-9335a5b6ec3f:80680"/>
</instance_attributes>
</node>
<node id="node-2" uname="node-2">
<instance_attributes id="nodes-node-2">
<nvpair id="nodes-node-2-gtid" name="gtid" value="b65eb4b3-644a-11e4-afd3-9335a5b6ec3f:80645"/>
</instance_attributes>
</node>
<node id="node-3" uname="node-3"/>
</nodes>
<resources>
<primitive class="ocf" id="vip__public" provider="pacemaker" type="ns_IPaddr2">
<instance_attributes id="vip__public-instance_attributes">
<nvpair id="vip__public-instance_attributes-nic" name="nic" value="br-ex"/>
<nvpair id="vip__public-instance_attributes-iflabel" name="iflabel" value="ka"/>
<nvpair id="vip__public-instance_attributes-iptables_comment" name="iptables_comment" value="masquerade-for-public-net"/>
<nvpair id="vip__public-instance_attributes-ns_veth" name="ns_veth" value="hapr-p"/>
<nvpair id="vip__public-instance_attributes-base_veth" name="base_veth" value="br-ex-hapr"/>
<nvpair id="vip__public-instance_attributes-cidr_netmask" name="cidr_netmask" value="24"/>
<nvpair id="vip__public-instance_attributes-gateway" name="gateway" value="link"/>
<nvpair id="vip__public-instance_attributes-iptables_stop_rules" name="iptables_stop_rules" value="iptables -t mangle -D PREROUTING -i br-ex-hapr -j MARK --set-mark 0x2a ; iptables -t nat -D POSTROUTING -m mark --mark 0x2a ! -o br-ex -j MASQUERADE"/>
<nvpair id="vip__public-instance_attributes-ns" name="ns" value="haproxy"/>
<nvpair id="vip__public-instance_attributes-iptables_start_rules" name="iptables_start_rules" value="iptables -t mangle -I PREROUTING -i br-ex-hapr -j MARK --set-mark 0x2a ; iptables -t nat -I POSTROUTING -m mark --mark 0x2a ! -o br-ex -j MASQUERADE"/>
<nvpair id="vip__public-instance_attributes-ip" name="ip" value="10.108.1.2"/>
<nvpair id="vip__public-instance_attributes-gateway_metric" name="gateway_metric" value="10"/>
</instance_attributes>
<meta_attributes id="vip__public-meta_attributes">
<nvpair id="vip__public-meta_attributes-migration-threshold" name="migration-threshold" value="3"/>
<nvpair id="vip__public-meta_attributes-failure-timeout" name="failure-timeout" value="60"/>
<nvpair id="vip__public-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="vip__public-monitor-3" interval="3" name="monitor" timeout="30"/>
<op id="vip__public-start-0" interval="0" name="start" timeout="30"/>
<op id="vip__public-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
<clone id="clone_ping_vip__public">
<primitive class="ocf" id="ping_vip__public" provider="pacemaker" type="ping">
<instance_attributes id="ping_vip__public-instance_attributes">
<nvpair id="ping_vip__public-instance_attributes-dampen" name="dampen" value="30s"/>
<nvpair id="ping_vip__public-instance_attributes-timeout" name="timeout" value="3s"/>
<nvpair id="ping_vip__public-instance_attributes-multiplier" name="multiplier" value="1000"/>
<nvpair id="ping_vip__public-instance_attributes-host_list" name="host_list" value="10.108.1.1"/>
</instance_attributes>
<operations>
<op id="ping_vip__public-monitor-20" interval="20" name="monitor" timeout="30"/>
</operations>
</primitive>
</clone>
<primitive class="ocf" id="vip__management" provider="pacemaker" type="ns_IPaddr2">
<instance_attributes id="vip__management-instance_attributes">
<nvpair id="vip__management-instance_attributes-nic" name="nic" value="br-mgmt"/>
<nvpair id="vip__management-instance_attributes-iflabel" name="iflabel" value="ka"/>
<nvpair id="vip__management-instance_attributes-iptables_comment" name="iptables_comment" value="masquerade-for-management-net"/>
<nvpair id="vip__management-instance_attributes-ns_veth" name="ns_veth" value="hapr-m"/>
<nvpair id="vip__management-instance_attributes-base_veth" name="base_veth" value="br-mgmt-hapr"/>
<nvpair id="vip__management-instance_attributes-cidr_netmask" name="cidr_netmask" value="24"/>
<nvpair id="vip__management-instance_attributes-gateway" name="gateway" value="link"/>
<nvpair id="vip__management-instance_attributes-iptables_stop_rules" name="iptables_stop_rules" value="iptables -t mangle -D PREROUTING -i br-mgmt-hapr -j MARK --set-mark 0x2b ; iptables -t nat -D POSTROUTING -m mark --mark 0x2b ! -o br-mgmt -j MASQUERADE"/>
<nvpair id="vip__management-instance_attributes-ns" name="ns" value="haproxy"/>
<nvpair id="vip__management-instance_attributes-iptables_start_rules" name="iptables_start_rules" value="iptables -t mangle -I PREROUTING -i br-mgmt-hapr -j MARK --set-mark 0x2b ; iptables -t nat -I POSTROUTING -m mark --mark 0x2b ! -o br-mgmt -j MASQUERADE"/>
<nvpair id="vip__management-instance_attributes-ip" name="ip" value="10.108.2.2"/>
<nvpair id="vip__management-instance_attributes-gateway_metric" name="gateway_metric" value="20"/>
</instance_attributes>
<meta_attributes id="vip__management-meta_attributes">
<nvpair id="vip__management-meta_attributes-migration-threshold" name="migration-threshold" value="3"/>
<nvpair id="vip__management-meta_attributes-failure-timeout" name="failure-timeout" value="60"/>
<nvpair id="vip__management-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="vip__management-monitor-3" interval="3" name="monitor" timeout="30"/>
<op id="vip__management-start-0" interval="0" name="start" timeout="30"/>
<op id="vip__management-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
<master id="master_p_rabbitmq-server">
<meta_attributes id="master_p_rabbitmq-server-meta_attributes">
<nvpair id="master_p_rabbitmq-server-meta_attributes-notify" name="notify" value="true"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-master-node-max" name="master-node-max" value="1"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-ordered" name="ordered" value="false"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-target-role" name="target-role" value="Master"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-master-max" name="master-max" value="1"/>
<nvpair id="master_p_rabbitmq-server-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_rabbitmq-server" provider="pacemaker" type="rabbitmq-server">
<instance_attributes id="p_rabbitmq-server-instance_attributes">
<nvpair id="p_rabbitmq-server-instance_attributes-node_port" name="node_port" value="5673"/>
</instance_attributes>
<meta_attributes id="p_rabbitmq-server-meta_attributes">
<nvpair id="p_rabbitmq-server-meta_attributes-migration-threshold" name="migration-threshold" value="INFINITY"/>
<nvpair id="p_rabbitmq-server-meta_attributes-failure-timeout" name="failure-timeout" value="60s"/>
</meta_attributes>
<operations>
<op id="p_rabbitmq-server-promote-0" interval="0" name="promote" timeout="120"/>
<op id="p_rabbitmq-server-monitor-30" interval="30" name="monitor" timeout="60"/>
<op id="p_rabbitmq-server-start-0" interval="0" name="start" timeout="120"/>
<op id="p_rabbitmq-server-monitor-27" interval="27" name="monitor" role="Master" timeout="60"/>
<op id="p_rabbitmq-server-stop-0" interval="0" name="stop" timeout="60"/>
<op id="p_rabbitmq-server-notify-0" interval="0" name="notify" timeout="60"/>
<op id="p_rabbitmq-server-demote-0" interval="0" name="demote" timeout="60"/>
</operations>
</primitive>
</master>
<clone id="clone_p_neutron-plugin-openvswitch-agent">
<meta_attributes id="clone_p_neutron-plugin-openvswitch-agent-meta_attributes">
<nvpair id="clone_p_neutron-plugin-openvswitch-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-plugin-openvswitch-agent" provider="pacemaker" type="neutron-agent-ovs">
<instance_attributes id="p_neutron-plugin-openvswitch-agent-instance_attributes">
<nvpair id="p_neutron-plugin-openvswitch-agent-instance_attributes-plugin_config" name="plugin_config" value="/etc/neutron/plugin.ini"/>
</instance_attributes>
<operations>
<op id="p_neutron-plugin-openvswitch-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-plugin-openvswitch-agent-start-0" interval="0" name="start" timeout="80"/>
<op id="p_neutron-plugin-openvswitch-agent-stop-0" interval="0" name="stop" timeout="80"/>
</operations>
</primitive>
</clone>
<primitive class="ocf" id="p_neutron-dhcp-agent" provider="pacemaker" type="neutron-agent-dhcp">
<instance_attributes id="p_neutron-dhcp-agent-instance_attributes">
<nvpair id="p_neutron-dhcp-agent-instance_attributes-os_auth_url" name="os_auth_url" value="http://10.108.2.2:35357/v2.0"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-amqp_server_port" name="amqp_server_port" value="5673"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-multiple_agents" name="multiple_agents" value="false"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-password" name="password" value="7BqMhboS"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-tenant" name="tenant" value="services"/>
<nvpair id="p_neutron-dhcp-agent-instance_attributes-username" name="username" value="undef"/>
</instance_attributes>
<meta_attributes id="p_neutron-dhcp-agent-meta_attributes">
<nvpair id="p_neutron-dhcp-agent-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="p_neutron-dhcp-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-dhcp-agent-start-0" interval="0" name="start" timeout="60"/>
<op id="p_neutron-dhcp-agent-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
<primitive id="p_heat-engine" class="ocf" provider="pacemaker" type="heat-engine">
<meta_attributes id="p_heat-engine-meta_attributes">
<nvpair id="p_heat-engine-meta_attributes-resource-stickiness" name="resource-stickiness" value="1"/>
</meta_attributes>
<operations>
<op id="p_heat-engine-monitor-20" interval="20" name="monitor" timeout="30"/>
<op id="p_heat-engine-start-0" interval="0" name="start" timeout="60"/>
<op id="p_heat-engine-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
<clone id="clone_p_neutron-metadata-agent">
<meta_attributes id="clone_p_neutron-metadata-agent-meta_attributes">
<nvpair id="clone_p_neutron-metadata-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-metadata-agent" provider="pacemaker" type="neutron-agent-metadata">
<operations>
<op id="p_neutron-metadata-agent-monitor-60" interval="60" name="monitor" timeout="10"/>
<op id="p_neutron-metadata-agent-start-0" interval="0" name="start" timeout="30"/>
<op id="p_neutron-metadata-agent-stop-0" interval="0" name="stop" timeout="30"/>
</operations>
</primitive>
</clone>
<clone id="clone_p_neutron-l3-agent">
<meta_attributes id="clone_p_neutron-l3-agent-meta_attributes">
<nvpair id="clone_p_neutron-l3-agent-meta_attributes-interleave" name="interleave" value="true"/>
</meta_attributes>
<primitive class="ocf" id="p_neutron-l3-agent" provider="pacemaker" type="neutron-agent-l3">
<instance_attributes id="p_neutron-l3-agent-instance_attributes">
<nvpair id="p_neutron-l3-agent-instance_attributes-os_auth_url" name="os_auth_url" value="http://10.108.2.2:35357/v2.0"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-multiple_agents" name="multiple_agents" value="true"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-syslog" name="syslog" value="true"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-password" name="password" value="7BqMhboS"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-plugin_config" name="plugin_config" value="/etc/neutron/l3_agent.ini"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-debug" name="debug" value="true"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-tenant" name="tenant" value="services"/>
<nvpair id="p_neutron-l3-agent-instance_attributes-username" name="username" value="undef"/>
</instance_attributes>
<operations>
<op id="p_neutron-l3-agent-monitor-20" interval="20" name="monitor" timeout="10"/>
<op id="p_neutron-l3-agent-start-0" interval="0" name="start" timeout="60"/>
<op id="p_neutron-l3-agent-stop-0" interval="0" name="stop" timeout="60"/>
</operations>
</primitive>
</clone>
<clone id="clone_p_mysql">
<primitive class="ocf" id="p_mysql" provider="pacemaker" type="mysql-wss">
<instance_attributes id="p_mysql-instance_attributes">
<nvpair id="p_mysql-instance_attributes-socket" name="socket" value="/var/run/mysqld/mysqld.sock"/>
<nvpair id="p_mysql-instance_attributes-test_passwd" name="test_passwd" value="password"/>
<nvpair id="p_mysql-instance_attributes-test_user" name="test_user" value="wsrep_sst"/>
</instance_attributes>
<operations>
<op id="p_mysql-monitor-120"