Introduce some new functions in pcmk_common

We hereby introduce some new utility functions:
A) pcs_offline() which will be used to run pcs commands without retry
   on a CIB file
B) push_cib_offline() which is used to try and push a cib that was
   manipulated with pcs_offline() back to the live cluster. It will
   try a certain specified amount of times before giving up.
C) pcmk_graph_contain_id?() which takes a resource_name and a graph
   file generated via 'crm_simulate -G<file>' and will return true
   if the resource is going to be restarted by pacemaker and false if
   otherwise. This is used so that we can determine if pacemaker deems
   that a resource has changed or not.
D) pcmk_restart_resource?() which, given a resource_name and a cib will
   return true if pacemaker plans to restart the resource and false if
   it does not. Due to https://bugzilla.redhat.com/show_bug.cgi?id=1561617
   we leverage crm_simulate and not crm_diff
E) pcmk_resource_has_changed?() which will delete the resource from an
   offline CIB copy, re-add it and then verify via
   pcmk_restart_resource?() if a restart would take place. The reason we
   need to remove a resource and then re-add it is due to the way pcs
   commands around bundles are limited (you basically can't update a
   bundle with the list of all all the storage-mappings: you have to specify which
   mappings are going to be added and which removed). So the simplest
   way is to just remove the resource and re-add it.

Change-Id: Iba7092b79af3a4dbc2888f35504175d5eb0abcdb
This commit is contained in:
Michele Baldessari 2018-04-06 13:40:29 +02:00
parent 77ba104809
commit da4673ffbe
4 changed files with 430 additions and 1 deletions

View File

@ -9,4 +9,8 @@ ruby-devel [test platform:rpm]
ruby-dev [test platform:dpkg]
zlib1g-dev [test platform:dpkg]
zlib-devel [test platform:rpm]
pacemaker-cli-utils [test platform:dpkg]
pacemaker-cli [test platform:rpm]
pcs [test platform:rpm]
pcs [test platform:dpkg]
puppet [build]

View File

@ -1,4 +1,5 @@
require 'digest'
require 'rexml/document'
# Constants that represent the state of a resource/constraint
PCMK_NOCHANGENEEDED = 0
@ -173,6 +174,133 @@ def build_pcs_location_rule_cmd(resource)
if location_rule['expression']
location_cmd += " " + location_rule['expression'].join(' ')
end
Puppet.debug("build_location_rule_cmd: #{location_cmd}")
Puppet.debug("build_pcs_location_rule_cmd: #{location_cmd}")
location_cmd
end
# This method runs a pcs command on an offline cib
# Much simpler logic compared to pcs()
# return output for good exit or false for failure.
def pcs_offline(cmd, cib)
Puppet.debug("pcs_offline: /usr/sbin/pcs -f #{cib} #{cmd}")
pcs_out = `/usr/sbin/pcs -f #{cib} #{cmd}`
return $?.exitstatus == 0 ? pcs_out : false
end
# This is a loop that simply tries to push a CIB a number of time
# on to the live cluster. It does not remove the CIB except in the Error
# case. Returns nothing in case of success and errors out in case of errors
def push_cib_offline(cib, tries=1, try_sleep=0, post_success_sleep=0)
tries.times do |try|
try_text = tries > 1 ? "try #{try+1}/#{tries}: " : ''
Puppet.debug("pcs_cib_offline push #{try_text}")
if push_cib(cib) == 0
sleep post_success_sleep
return
end
Puppet.debug("Error: #{pcs_out}")
if try == tries-1
delete_cib(cib)
raise Puppet::Error, "push_cib_offline for #{cib} failed"
end
if try_sleep > 0
Puppet.debug("Sleeping for #{try_sleep} seconds between tries")
sleep try_sleep
end
end
end
# The following function will take a resource_name an xml graph file as generated by crm_simulate and
# will return true if the resource_name is contained in the transition graph (i.e. the cluster would
# restart the resource) and false if not (i.e. the cluster would not restart the resource)
def pcmk_graph_contain_id?(resource_name, graph_file, is_bundle=false)
graph = File.new(graph_file)
graph_doc = REXML::Document.new graph
xpath_query = '/transition_graph//primitive/@id'
ids = []
REXML::XPath.each(graph_doc, xpath_query) do |element|
id = element.to_s
# if we are a bundle we compare the start of the strings
# because the primitive id will be in the form of galera-bundle-1 as opposed to galera-bundle
if is_bundle then
if id.start_with?(resource_name) then
return true
end
else
if id == resource_name then
return true
end
end
end
return false
end
# This given a cib and a resource name, this method returns true if pacemaker
# will restart the resource false if no action will be taken by pacemaker
# Note that we need to leverage crm_simulate instead of crm_diff due to:
# https://bugzilla.redhat.com/show_bug.cgi?id=1561617
def pcmk_restart_resource?(resource_name, cib, is_bundle=false)
tmpfile = pcmk_tmpname("#{PCMK_TMP_BASE}/puppet-cib-simulate", nil)
cmd = "/usr/sbin/crm_simulate -x #{cib} -s -G#{tmpfile}"
crm_out = `#{cmd}`
if $?.exitstatus != 0
FileUtils.rm(tmpfile, :force => true)
delete_cib(cib)
raise Puppet::Error, "#{cmd} failed with: #{crm_out}"
end
# Now in tmpfile we have the xml of the changes to the cluster
# If tmpfile only contains one empy <transition_graph> no changes took place
ret = pcmk_graph_contain_id?(resource_name, tmpfile, is_bundle)
FileUtils.rm(tmpfile, :force => true)
return ret
end
# This method takes a resource and a creation command and does the following
# 1. Deletes the resource from the offline CIB
# 2. Recreates the resource on the offline CIB
# 3. Verifies if the pacemaker will restart the resource and returns true if the answer is a yes
def pcmk_resource_has_changed?(resource, cmd_create, is_bundle=false)
cib = backup_cib()
cmd_delete = "resource delete #{resource[:name]}"
ret = pcs_offline(cmd_delete, cib)
if ret == false
delete_cib(cib)
raise Puppet::Error, "#{cmd_delete} returned error. This should never happen."
end
ret = pcs_offline(cmd_create, cib)
if ret == false
delete_cib(cib)
raise Puppet::Error, "#{cmd_create} returned error. This should never happen."
end
ret = pcmk_restart_resource?(resource[:name], cib, is_bundle)
Puppet.debug("pcmk_resource_has_changed returned #{ret}")
delete_cib(cib)
return ret
end
# This function will update a resource by making a cib backup
# removing the resource and readding it and the push the CIB
# to the cluster
def pcmk_update_resource(resource, cmd_create)
cib = backup_cib()
cmd_delete = "resource delete #{resource[:name]}"
ret = pcs_offline(cmd_delete, cib)
if ret == false
delete_cib(cib)
raise Puppet::Error, "#{cmd_delete} returned error. This should never happen."
end
ret = pcs_offline(cmd_create, cib)
if ret == false
delete_cib(cib)
raise Puppet::Error, "#{cmd_create} returned error. This should never happen."
end
if resource[:location_rule] then
cmd_location = build_pcs_location_rule_cmd(resource)
ret = pcs_offline(cmd_location, cib)
if ret == false
delete_cib(cib)
raise Puppet::Error, "#{cmd_location} returned error. This should never happen."
end
end
push_cib_offline(cib, resource[:tries], resource[:try_sleep], resource[:post_success_sleep])
end

View File

@ -0,0 +1,238 @@
<cib crm_feature_set="3.0.12" validate-with="pacemaker-2.8" epoch="25" num_updates="2" admin_epoch="0" cib-last-written="Fri Apr 6 14:23:10 2018" update-origin="foobar-0" update-client="crm_attribute" update-user="root" have-quorum="1" dc-uuid="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.16-12.el7_4.8-94ff4df"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="clustername"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="foobar-0">
<instance_attributes id="nodes-1">
<nvpair id="nodes-1-rabbitmq-role" name="rabbitmq-role" value="true"/>
<nvpair id="nodes-1-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@foobar-0"/>
</instance_attributes>
</node>
<node id="2" uname="foobar-1">
<instance_attributes id="nodes-2">
<nvpair id="nodes-2-rabbitmq-role" name="rabbitmq-role" value="true"/>
<nvpair id="nodes-2-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@foobar-1"/>
</instance_attributes>
</node>
<node id="3" uname="foobar-2">
<instance_attributes id="nodes-3">
<nvpair id="nodes-3-rabbitmq-role" name="rabbitmq-role" value="true"/>
<nvpair id="nodes-3-rmq-node-attr-last-known-rabbitmq" name="rmq-node-attr-last-known-rabbitmq" value="rabbit@foobar-2"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive class="stonith" id="stonith-fence_amt-stonith-fence-1" type="fence_amt">
<instance_attributes id="stonith-fence_amt-stonith-fence-1-instance_attributes">
<nvpair id="stonith-fence_amt-stonith-fence-1-instance_attributes-ipaddr" name="ipaddr" value="192.0.3.99"/>
<nvpair id="stonith-fence_amt-stonith-fence-1-instance_attributes-passwd" name="passwd" value="renVamyep3!"/>
<nvpair id="stonith-fence_amt-stonith-fence-1-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="foobar-0"/>
</instance_attributes>
<operations>
<op id="stonith-fence_amt-stonith-fence-1-monitor-interval-60s" interval="60s" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="stonith-fence_amt-stonith-fence-2" type="fence_amt">
<instance_attributes id="stonith-fence_amt-stonith-fence-2-instance_attributes">
<nvpair id="stonith-fence_amt-stonith-fence-2-instance_attributes-ipaddr" name="ipaddr" value="192.0.2.100"/>
<nvpair id="stonith-fence_amt-stonith-fence-2-instance_attributes-passwd" name="passwd" value="renVamyep3!"/>
<nvpair id="stonith-fence_amt-stonith-fence-2-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="foobar-1"/>
</instance_attributes>
<operations>
<op id="stonith-fence_amt-stonith-fence-2-monitor-interval-60s" interval="60s" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="stonith-fence_amt-stonith-fence-3" type="fence_amt">
<instance_attributes id="stonith-fence_amt-stonith-fence-3-instance_attributes">
<nvpair id="stonith-fence_amt-stonith-fence-3-instance_attributes-ipaddr" name="ipaddr" value="192.0.2.101"/>
<nvpair id="stonith-fence_amt-stonith-fence-3-instance_attributes-passwd" name="passwd" value="renVamyep3!"/>
<nvpair id="stonith-fence_amt-stonith-fence-3-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="foobar-2"/>
</instance_attributes>
<operations>
<op id="stonith-fence_amt-stonith-fence-3-monitor-interval-60s" interval="60s" name="monitor"/>
</operations>
</primitive>
<bundle id="test_bundle">
<docker image="docker.io/sdelrio/docker-minimal-nginx" network="host" options="--user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" replicas="3"/>
<storage>
<storage-mapping id="haproxy-cfg-files" options="ro" source-dir="/var/lib/kolla/config_files/haproxy.json" target-dir="/var/lib/kolla/config_files/config.json"/>
<storage-mapping id="haproxy-cfg-data" options="ro" source-dir="/var/lib/config-data/puppet-generated/haproxy/" target-dir="/var/lib/kolla/config_files/src"/>
<storage-mapping id="haproxy-hosts" options="ro" source-dir="/etc/hosts" target-dir="/etc/hosts"/>
<storage-mapping id="haproxy-localtime" options="ro" source-dir="/etc/localtime" target-dir="/etc/localtime"/>
<storage-mapping id="haproxy-pki-extracted" options="ro" source-dir="/etc/pki/ca-trust/extracted" target-dir="/etc/pki/ca-trust/extracted"/>
<storage-mapping id="haproxy-pki-ca-bundle-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.crt" target-dir="/etc/pki/tls/certs/ca-bundle.crt"/>
<storage-mapping id="haproxy-pki-ca-bundle-trust-crt" options="ro" source-dir="/etc/pki/tls/certs/ca-bundle.trust.crt" target-dir="/etc/pki/tls/certs/ca-bundle.trust.crt"/>
<storage-mapping id="haproxy-pki-cert" options="ro" source-dir="/etc/pki/tls/cert.pem" target-dir="/etc/pki/tls/cert.pem"/>
<storage-mapping id="haproxy-dev-log" options="rw" source-dir="/dev/log" target-dir="/dev/log"/>
</storage>
</bundle>
<primitive class="ocf" id="ip-172.16.11.97" provider="heartbeat" type="IPaddr2">
<instance_attributes id="ip-172.16.11.97-instance_attributes">
<nvpair id="ip-172.16.11.97-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
<nvpair id="ip-172.16.11.97-instance_attributes-ip" name="ip" value="172.16.11.97"/>
</instance_attributes>
<operations>
<op id="ip-172.16.11.97-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
<op id="ip-172.16.11.97-start-interval-0s" interval="0s" name="start" timeout="20s"/>
<op id="ip-172.16.11.97-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
</operations>
</primitive>
<clone id="rabbitmq-clone">
<primitive class="ocf" id="rabbitmq" provider="heartbeat" type="rabbitmq-cluster">
<instance_attributes id="rabbitmq-instance_attributes">
<nvpair id="rabbitmq-instance_attributes-set_policy" name="set_policy" value="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
</instance_attributes>
<meta_attributes id="rabbitmq-meta_attributes">
<nvpair id="rabbitmq-meta_attributes-notify" name="notify" value="true"/>
</meta_attributes>
<operations>
<op id="rabbitmq-monitor-interval-10" interval="10" name="monitor" timeout="40"/>
<op id="rabbitmq-start-interval-0s" interval="0s" name="start" timeout="200s"/>
<op id="rabbitmq-stop-interval-0s" interval="0s" name="stop" timeout="200s"/>
</operations>
</primitive>
<meta_attributes id="rabbitmq-clone-meta_attributes">
<nvpair id="rabbitmq-clone-meta_attributes-interleave" name="interleave" value="true"/>
<nvpair id="rabbitmq-clone-meta_attributes-ordered" name="ordered" value="true"/>
</meta_attributes>
</clone>
</resources>
<constraints>
<rsc_location id="location-stonith-fence_amt-stonith-fence-1-foobar-0--INFINITY" node="foobar-0" rsc="stonith-fence_amt-stonith-fence-1" score="-INFINITY"/>
<rsc_location id="location-stonith-fence_amt-stonith-fence-2-foobar-1--INFINITY" node="foobar-1" rsc="stonith-fence_amt-stonith-fence-2" score="-INFINITY"/>
<rsc_location id="location-stonith-fence_amt-stonith-fence-3-foobar-2--INFINITY" node="foobar-2" rsc="stonith-fence_amt-stonith-fence-3" score="-INFINITY"/>
<rsc_location id="location-rabbitmq-clone" resource-discovery="exclusive" rsc="rabbitmq-clone">
<rule id="location-rabbitmq-clone-rule" score="0">
<expression attribute="rabbitmq-role" id="location-rabbitmq-clone-rule-expr" operation="eq" value="true"/>
</rule>
</rsc_location>
</constraints>
</configuration>
<status>
<node_state id="1" uname="foobar-0" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="stonith-fence_amt-stonith-fence-1" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-1_last_0" operation_key="stonith-fence_amt-stonith-fence-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="2:5:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;2:5:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1523017362" last-rc-change="1523017362" exec-time="1" queue-time="0" op-digest="c200ae5d45b5c3cbba9fee03e78cacfe" op-secure-params=" passwd password " op-secure-digest="50391a6be4d0d4ebb76998356c8ff966"/>
</lrm_resource>
<lrm_resource id="stonith-fence_amt-stonith-fence-2" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-2_last_0" operation_key="stonith-fence_amt-stonith-fence-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="5:10:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;5:10:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="10" rc-code="0" op-status="0" interval="0" last-run="1523017368" last-rc-change="1523017368" exec-time="1372" queue-time="0" op-digest="fa99cc06478ff373a3c01d551764307a" op-secure-params=" passwd password " op-secure-digest="43d7a028a1d194c5265a3b3ce302fde5"/>
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-2_monitor_60000" operation_key="stonith-fence_amt-stonith-fence-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="6:10:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;6:10:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="11" rc-code="0" op-status="0" interval="60000" last-rc-change="1523017370" exec-time="347" queue-time="0" op-digest="1b6e96c30cd893e21ef817d56c08b03c" op-secure-params=" passwd password " op-secure-digest="43d7a028a1d194c5265a3b3ce302fde5"/>
</lrm_resource>
<lrm_resource id="stonith-fence_amt-stonith-fence-3" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-3_last_0" operation_key="stonith-fence_amt-stonith-fence-3_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="8:13:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;8:13:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="16" rc-code="0" op-status="0" interval="0" last-run="1523017372" last-rc-change="1523017372" exec-time="373" queue-time="0" op-digest="01086af23cd7814717dd88ba475f6923" op-secure-params=" passwd password " op-secure-digest="f81520ca6e7990fb7f328802941e5fd6"/>
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-3_monitor_60000" operation_key="stonith-fence_amt-stonith-fence-3_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="19:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;19:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="17" rc-code="0" op-status="0" interval="60000" last-rc-change="1523017374" exec-time="524" queue-time="0" op-digest="39a65383b8fc43227462ca188262169d" op-secure-params=" passwd password " op-secure-digest="f81520ca6e7990fb7f328802941e5fd6"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-1" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-1_last_0" operation_key="test_bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="5:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;5:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="25" rc-code="7" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="140" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-2" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-2_last_0" operation_key="test_bundle-docker-2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="24:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;24:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="30" rc-code="0" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="577" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
<lrm_rsc_op id="test_bundle-docker-2_monitor_60000" operation_key="test_bundle-docker-2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="25:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;25:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="31" rc-code="0" op-status="0" interval="60000" last-rc-change="1523017374" exec-time="91" queue-time="0" op-digest="0430c47ada77d6452525a070ecb7174c"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-0" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-0_last_0" operation_key="test_bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="4:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;4:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="21" rc-code="7" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="171" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
</lrm_resource>
<lrm_resource id="ip-172.16.11.97" type="IPaddr2" class="ocf" provider="heartbeat">
<lrm_rsc_op id="ip-172.16.11.97_last_0" operation_key="ip-172.16.11.97_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="8:15:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;8:15:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="35" rc-code="7" op-status="0" interval="0" last-run="1523017375" last-rc-change="1523017375" exec-time="49" queue-time="0" op-digest="f90b6e0d5d3e7dc4f9da89e1f8bc0f2e"/>
</lrm_resource>
<lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat">
<lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="33:18:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;33:18:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="41" rc-code="0" op-status="0" interval="0" last-run="1523017387" last-rc-change="1523017387" exec-time="4398" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/>
<lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="31:19:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;31:19:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-0" call-id="42" rc-code="0" op-status="0" interval="10000" last-rc-change="1523017391" exec-time="402" queue-time="0" op-digest="6b46cdf9111345cbd0460b2540d3b2c7"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="1">
<instance_attributes id="status-1">
<nvpair id="status-1-shutdown" name="shutdown" value="0"/>
<nvpair id="status-1-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@foobar-0"/>
</instance_attributes>
</transient_attributes>
</node_state>
<node_state id="2" uname="foobar-1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="stonith-fence_amt-stonith-fence-1" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-1_last_0" operation_key="stonith-fence_amt-stonith-fence-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="2:7:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;2:7:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="6" rc-code="0" op-status="0" interval="0" last-run="1523017363" last-rc-change="1523017363" exec-time="1381" queue-time="0" op-digest="c200ae5d45b5c3cbba9fee03e78cacfe" op-secure-params=" passwd password " op-secure-digest="50391a6be4d0d4ebb76998356c8ff966"/>
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-1_monitor_60000" operation_key="stonith-fence_amt-stonith-fence-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="3:7:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;3:7:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="7" rc-code="0" op-status="0" interval="60000" last-rc-change="1523017365" exec-time="376" queue-time="0" op-digest="f251574e6cd2fec78c54b1807f085f85" op-secure-params=" passwd password " op-secure-digest="50391a6be4d0d4ebb76998356c8ff966"/>
</lrm_resource>
<lrm_resource id="stonith-fence_amt-stonith-fence-2" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-2_last_0" operation_key="stonith-fence_amt-stonith-fence-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="4:8:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;4:8:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="11" rc-code="7" op-status="0" interval="0" last-run="1523017366" last-rc-change="1523017366" exec-time="0" queue-time="0" op-digest="fa99cc06478ff373a3c01d551764307a" op-secure-params=" passwd password " op-secure-digest="43d7a028a1d194c5265a3b3ce302fde5"/>
</lrm_resource>
<lrm_resource id="stonith-fence_amt-stonith-fence-3" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-3_last_0" operation_key="stonith-fence_amt-stonith-fence-3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="5:11:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;5:11:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="15" rc-code="7" op-status="0" interval="0" last-run="1523017371" last-rc-change="1523017371" exec-time="0" queue-time="0" op-digest="01086af23cd7814717dd88ba475f6923" op-secure-params=" passwd password " op-secure-digest="f81520ca6e7990fb7f328802941e5fd6"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-1" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-1_last_0" operation_key="test_bundle-docker-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="22:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;22:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="28" rc-code="0" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="493" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
<lrm_rsc_op id="test_bundle-docker-1_monitor_60000" operation_key="test_bundle-docker-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="23:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;23:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="29" rc-code="0" op-status="0" interval="60000" last-rc-change="1523017374" exec-time="79" queue-time="0" op-digest="0430c47ada77d6452525a070ecb7174c"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-2" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-2_last_0" operation_key="test_bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="9:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;9:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="27" rc-code="7" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="85" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-0" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-0_last_0" operation_key="test_bundle-docker-0_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="7:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;7:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="19" rc-code="7" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="94" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
</lrm_resource>
<lrm_resource id="ip-172.16.11.97" type="IPaddr2" class="ocf" provider="heartbeat">
<lrm_rsc_op id="ip-172.16.11.97_last_0" operation_key="ip-172.16.11.97_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="9:15:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;9:15:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="33" rc-code="7" op-status="0" interval="0" last-run="1523017375" last-rc-change="1523017375" exec-time="82" queue-time="0" op-digest="f90b6e0d5d3e7dc4f9da89e1f8bc0f2e"/>
</lrm_resource>
<lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat">
<lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="30:16:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;30:16:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="39" rc-code="0" op-status="0" interval="0" last-run="1523017379" last-rc-change="1523017379" exec-time="3154" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/>
<lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="29:17:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;29:17:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-1" call-id="40" rc-code="0" op-status="0" interval="10000" last-rc-change="1523017383" exec-time="418" queue-time="0" op-digest="6b46cdf9111345cbd0460b2540d3b2c7"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="2">
<instance_attributes id="status-2">
<nvpair id="status-2-shutdown" name="shutdown" value="0"/>
<nvpair id="status-2-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@foobar-1"/>
</instance_attributes>
</transient_attributes>
</node_state>
<node_state id="3" uname="foobar-2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="3">
<lrm_resources>
<lrm_resource id="stonith-fence_amt-stonith-fence-1" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-1_last_0" operation_key="stonith-fence_amt-stonith-fence-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="4:5:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;4:5:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1523017362" last-rc-change="1523017362" exec-time="4" queue-time="0" op-digest="c200ae5d45b5c3cbba9fee03e78cacfe" op-secure-params=" passwd password " op-secure-digest="50391a6be4d0d4ebb76998356c8ff966"/>
</lrm_resource>
<lrm_resource id="stonith-fence_amt-stonith-fence-2" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-2_last_0" operation_key="stonith-fence_amt-stonith-fence-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="5:8:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;5:8:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="9" rc-code="7" op-status="0" interval="0" last-run="1523017366" last-rc-change="1523017366" exec-time="0" queue-time="0" op-digest="fa99cc06478ff373a3c01d551764307a" op-secure-params=" passwd password " op-secure-digest="43d7a028a1d194c5265a3b3ce302fde5"/>
</lrm_resource>
<lrm_resource id="stonith-fence_amt-stonith-fence-3" type="fence_amt" class="stonith">
<lrm_rsc_op id="stonith-fence_amt-stonith-fence-3_last_0" operation_key="stonith-fence_amt-stonith-fence-3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="6:11:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;6:11:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="13" rc-code="7" op-status="0" interval="0" last-run="1523017371" last-rc-change="1523017371" exec-time="0" queue-time="0" op-digest="01086af23cd7814717dd88ba475f6923" op-secure-params=" passwd password " op-secure-digest="f81520ca6e7990fb7f328802941e5fd6"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-1" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-1_last_0" operation_key="test_bundle-docker-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="11:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;11:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="21" rc-code="7" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="97" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-2" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-2_last_0" operation_key="test_bundle-docker-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="12:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:7;12:14:7:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="25" rc-code="7" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="93" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
</lrm_resource>
<lrm_resource id="test_bundle-docker-0" type="docker" class="ocf" provider="heartbeat">
<lrm_rsc_op id="test_bundle-docker-0_last_0" operation_key="test_bundle-docker-0_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="20:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;20:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="26" rc-code="0" op-status="0" interval="0" last-run="1523017374" last-rc-change="1523017374" exec-time="464" queue-time="0" op-digest="635b2bc9607b43869f8f42920727446e"/>
<lrm_rsc_op id="test_bundle-docker-0_monitor_60000" operation_key="test_bundle-docker-0_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="21:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;21:14:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="27" rc-code="0" op-status="0" interval="60000" last-rc-change="1523017374" exec-time="78" queue-time="0" op-digest="0430c47ada77d6452525a070ecb7174c"/>
</lrm_resource>
<lrm_resource id="ip-172.16.11.97" type="IPaddr2" class="ocf" provider="heartbeat">
<lrm_rsc_op id="ip-172.16.11.97_last_0" operation_key="ip-172.16.11.97_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="27:15:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;27:15:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="32" rc-code="0" op-status="0" interval="0" last-run="1523017375" last-rc-change="1523017375" exec-time="4074" queue-time="0" op-digest="f90b6e0d5d3e7dc4f9da89e1f8bc0f2e"/>
<lrm_rsc_op id="ip-172.16.11.97_monitor_10000" operation_key="ip-172.16.11.97_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="29:16:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;29:16:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="33" rc-code="0" op-status="0" interval="10000" last-rc-change="1523017379" exec-time="57" queue-time="0" op-digest="df3faee2692cff55e3de0111463fc75c"/>
</lrm_resource>
<lrm_resource id="rabbitmq" type="rabbitmq-cluster" class="ocf" provider="heartbeat">
<lrm_rsc_op id="rabbitmq_last_0" operation_key="rabbitmq_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="30:17:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;30:17:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="39" rc-code="0" op-status="0" interval="0" last-run="1523017383" last-rc-change="1523017383" exec-time="4203" queue-time="0" op-digest="780d433233eb4f94c1a151623d002e84"/>
<lrm_rsc_op id="rabbitmq_monitor_10000" operation_key="rabbitmq_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.12" transition-key="32:18:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" transition-magic="0:0;32:18:0:604473fe-c25e-49df-a803-9e1d0a74f9ba" on_node="foobar-2" call-id="40" rc-code="0" op-status="0" interval="10000" last-rc-change="1523017387" exec-time="427" queue-time="0" op-digest="6b46cdf9111345cbd0460b2540d3b2c7"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="3">
<instance_attributes id="status-3">
<nvpair id="status-3-shutdown" name="shutdown" value="0"/>
<nvpair id="status-3-rmq-node-attr-rabbitmq" name="rmq-node-attr-rabbitmq" value="rabbit@foobar-2"/>
</instance_attributes>
</transient_attributes>
</node_state>
</status>
</cib>

View File

@ -0,0 +1,59 @@
require 'spec_helper'
require_relative '../../../../lib/puppet/provider/pcmk_common'
describe "pcmk_common functions" do
before(:all) do
# FIXME: we need to stub this properly not via this hack
PCMK_TMP_BASE = "/tmp"
orig_cib = File.join File.dirname(__FILE__), 'cib-orig.xml'
FileUtils.cp orig_cib, "cib-noop.xml"
FileUtils.cp orig_cib, "cib-resource.xml"
FileUtils.cp orig_cib, "cib-bundle.xml"
end
it "pcmk_graph_contain_id? raises proper exception" do
expect { pcmk_graph_contain_id?('foo', 'bar') }.to raise_error(Errno::ENOENT)
end
it "pcs_offline noop update" do
expect(pcs_offline('resource update ip-172.16.11.97 cidr_netmask=32', 'cib-noop.xml')).to eq ""
end
it "pcmk_restart_resource? noop" do
expect(pcmk_restart_resource?('foo', "cib-noop.xml")).to eq false
expect(pcmk_restart_resource?('ip-172.16.11.97', "cib-noop.xml")).to eq false
end
it "pcs_offline update to resource definition" do
expect(pcs_offline('resource update ip-172.16.11.97 cidr_netmask=31', 'cib-resource.xml')).to eq ""
end
it "pcmk_restart_resource? vip resource" do
expect(pcmk_restart_resource?('foo', "cib-resource.xml")).to eq false
expect(pcmk_restart_resource?('ip-172.16.11.97', "cib-resource.xml")).to eq true
end
it "pcs_offline update to bundle definition" do
# We effectively change the number of replicas from 3 to 2
expect(pcs_offline('resource delete test_bundle', 'cib-bundle.xml')).not_to eq false
cmd = 'resource bundle create test_bundle container docker image=docker.io/sdelrio/docker-minimal-nginx '\
'replicas=2 options="--user=root --log-driver=journald -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS" '\
'network=host storage-map id=haproxy-cfg-files source-dir=/var/lib/kolla/config_files/haproxy.json '\
'target-dir=/var/lib/kolla/config_files/config.json options=ro storage-map id=haproxy-cfg-data '\
'source-dir=/var/lib/config-data/puppet-generated/haproxy/ target-dir=/var/lib/kolla/config_files/src '\
'options=ro storage-map id=haproxy-hosts source-dir=/etc/hosts target-dir=/etc/hosts options=ro '\
'storage-map id=haproxy-localtime source-dir=/etc/localtime target-dir=/etc/localtime options=ro '\
'storage-map id=haproxy-pki-extracted source-dir=/etc/pki/ca-trust/extracted '\
'target-dir=/etc/pki/ca-trust/extracted options=ro storage-map id=haproxy-pki-ca-bundle-crt '\
'source-dir=/etc/pki/tls/certs/ca-bundle.crt target-dir=/etc/pki/tls/certs/ca-bundle.crt options=ro '\
'storage-map id=haproxy-pki-ca-bundle-trust-crt source-dir=/etc/pki/tls/certs/ca-bundle.trust.crt '\
'target-dir=/etc/pki/tls/certs/ca-bundle.trust.crt options=ro storage-map id=haproxy-pki-cert '\
'source-dir=/etc/pki/tls/cert.pem target-dir=/etc/pki/tls/cert.pem options=ro storage-map '\
'id=haproxy-dev-log source-dir=/dev/log target-dir=/dev/log options=rw'
expect(pcs_offline(cmd, 'cib-bundle.xml')).to eq ""
end
it "pcmk_restart_resource? bundle resource" do
expect(pcmk_restart_resource?('foo', "cib-bundle.xml", true)).to eq false
expect(pcmk_restart_resource?('test_bundle', "cib-bundle.xml", true)).to eq true
end
end