unix line endings
This commit is contained in:
parent
274f0f766e
commit
d6a3872109
@ -1,20 +1,20 @@
|
||||
$:.unshift File.expand_path('lib', File.dirname(__FILE__))
|
||||
require 'astute/version'
|
||||
|
||||
Gem::Specification.new do |s|
|
||||
s.name = 'astute'
|
||||
s.version = Astute::VERSION
|
||||
|
||||
s.summary = 'Orchestrator for OpenStack deployment'
|
||||
s.description = 'Deployment Orchestrator of Puppet via MCollective. Works as a library or from CLI.'
|
||||
s.authors = ['Mike Scherbakov']
|
||||
s.email = ['mscherbakov@mirantis.com']
|
||||
|
||||
s.add_dependency 'mcollective-client', '> 2.0.0'
|
||||
s.add_dependency 'symboltable', '>= 1.0.2'
|
||||
|
||||
s.files = Dir.glob("{bin,lib,spec}/**/*")
|
||||
s.executables = ['astute', 'astute_run']
|
||||
s.require_path = 'lib'
|
||||
end
|
||||
|
||||
$:.unshift File.expand_path('lib', File.dirname(__FILE__))
|
||||
require 'astute/version'
|
||||
|
||||
Gem::Specification.new do |s|
|
||||
s.name = 'astute'
|
||||
s.version = Astute::VERSION
|
||||
|
||||
s.summary = 'Orchestrator for OpenStack deployment'
|
||||
s.description = 'Deployment Orchestrator of Puppet via MCollective. Works as a library or from CLI.'
|
||||
s.authors = ['Mike Scherbakov']
|
||||
s.email = ['mscherbakov@mirantis.com']
|
||||
|
||||
s.add_dependency 'mcollective-client', '> 2.0.0'
|
||||
s.add_dependency 'symboltable', '>= 1.0.2'
|
||||
|
||||
s.files = Dir.glob("{bin,lib,spec}/**/*")
|
||||
s.executables = ['astute', 'astute_run']
|
||||
s.require_path = 'lib'
|
||||
end
|
||||
|
||||
|
@ -1,55 +1,55 @@
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
require 'optparse'
|
||||
require 'yaml'
|
||||
begin
|
||||
require 'astute'
|
||||
rescue LoadError
|
||||
require 'rubygems'
|
||||
require 'astute'
|
||||
end
|
||||
|
||||
class ConsoleReporter
|
||||
def report(msg)
|
||||
puts msg.inspect
|
||||
end
|
||||
end
|
||||
|
||||
opts = {}
|
||||
optparse = OptionParser.new do |o|
|
||||
o.banner = "Usage: bin/astute -f FILENAME"
|
||||
|
||||
o.on("-v", "--[no-]verbose", "Run verbosely") do |v|
|
||||
opts[:verbose] = v
|
||||
end
|
||||
|
||||
o.on("-f FILENAME", "Environment in YAML format. Samples are in examples directory.") do |f|
|
||||
opts[:filename] = f
|
||||
end
|
||||
|
||||
o.on("-h") { puts o; exit }
|
||||
end
|
||||
optparse.parse!(ARGV)
|
||||
|
||||
if opts[:filename].nil?
|
||||
puts optparse
|
||||
exit
|
||||
end
|
||||
|
||||
reporter = ConsoleReporter.new
|
||||
Astute.logger = Logger.new(STDOUT) if opts[:verbose]
|
||||
|
||||
environment = YAML.load_file(opts[:filename])
|
||||
|
||||
case environment['attributes']['deployment_engine']
|
||||
when 'nailyfact'
|
||||
deploy_engine = Astute::DeploymentEngine::NailyFact
|
||||
when 'simplepuppet'
|
||||
deploy_engine = Astute::DeploymentEngine::SimplePuppet # It just calls puppet and doesn't do any magic
|
||||
else
|
||||
deploy_engine = nil # Orchestrator will use it's default
|
||||
end
|
||||
|
||||
orchestrator = Astute::Orchestrator.new(deploy_engine, log_parsing=false)
|
||||
orchestrator.deploy(reporter, environment['task_uuid'], environment['nodes'], environment['attributes'])
|
||||
#orchestrator.verify_networks(reporter, task_id, nodes, networks)
|
||||
#!/usr/bin/env ruby
|
||||
|
||||
require 'optparse'
|
||||
require 'yaml'
|
||||
begin
|
||||
require 'astute'
|
||||
rescue LoadError
|
||||
require 'rubygems'
|
||||
require 'astute'
|
||||
end
|
||||
|
||||
class ConsoleReporter
|
||||
def report(msg)
|
||||
puts msg.inspect
|
||||
end
|
||||
end
|
||||
|
||||
opts = {}
|
||||
optparse = OptionParser.new do |o|
|
||||
o.banner = "Usage: bin/astute -f FILENAME"
|
||||
|
||||
o.on("-v", "--[no-]verbose", "Run verbosely") do |v|
|
||||
opts[:verbose] = v
|
||||
end
|
||||
|
||||
o.on("-f FILENAME", "Environment in YAML format. Samples are in examples directory.") do |f|
|
||||
opts[:filename] = f
|
||||
end
|
||||
|
||||
o.on("-h") { puts o; exit }
|
||||
end
|
||||
optparse.parse!(ARGV)
|
||||
|
||||
if opts[:filename].nil?
|
||||
puts optparse
|
||||
exit
|
||||
end
|
||||
|
||||
reporter = ConsoleReporter.new
|
||||
Astute.logger = Logger.new(STDOUT) if opts[:verbose]
|
||||
|
||||
environment = YAML.load_file(opts[:filename])
|
||||
|
||||
case environment['attributes']['deployment_engine']
|
||||
when 'nailyfact'
|
||||
deploy_engine = Astute::DeploymentEngine::NailyFact
|
||||
when 'simplepuppet'
|
||||
deploy_engine = Astute::DeploymentEngine::SimplePuppet # It just calls puppet and doesn't do any magic
|
||||
else
|
||||
deploy_engine = nil # Orchestrator will use it's default
|
||||
end
|
||||
|
||||
orchestrator = Astute::Orchestrator.new(deploy_engine, log_parsing=false)
|
||||
orchestrator.deploy(reporter, environment['task_uuid'], environment['nodes'], environment['attributes'])
|
||||
#orchestrator.verify_networks(reporter, task_id, nodes, networks)
|
||||
|
0
deployment/mcollective/astute/bin/compact.yaml
Normal file → Executable file
0
deployment/mcollective/astute/bin/compact.yaml
Normal file → Executable file
0
deployment/mcollective/astute/bin/full.yaml
Normal file → Executable file
0
deployment/mcollective/astute/bin/full.yaml
Normal file → Executable file
0
deployment/mcollective/astute/bin/minimal.yaml
Normal file → Executable file
0
deployment/mcollective/astute/bin/minimal.yaml
Normal file → Executable file
0
deployment/mcollective/astute/bin/simple.yaml
Normal file → Executable file
0
deployment/mcollective/astute/bin/simple.yaml
Normal file → Executable file
@ -1,37 +1,37 @@
|
||||
require 'json'
|
||||
require 'logger'
|
||||
|
||||
require 'astute/config'
|
||||
require 'astute/logparser'
|
||||
require 'astute/orchestrator'
|
||||
require 'astute/metadata'
|
||||
require 'astute/deployment_engine'
|
||||
require 'astute/network'
|
||||
require 'astute/puppetd'
|
||||
require 'astute/rpuppet'
|
||||
require 'astute/deployment_engine/simple_puppet'
|
||||
require 'astute/deployment_engine/nailyfact'
|
||||
|
||||
module Astute
|
||||
autoload 'Context', 'astute/context'
|
||||
autoload 'MClient', 'astute/mclient'
|
||||
autoload 'ProxyReporter', 'astute/reporter'
|
||||
autoload 'NodeRemoval', 'astute/node_removal'
|
||||
|
||||
def self.logger
|
||||
@logger ||= Logger.new('/var/log/astute.log')
|
||||
@logger.formatter = proc {|severity, datetime, progname, msg|
|
||||
severity_map = {'DEBUG' => 'debug', 'INFO' => 'info', 'WARN' => 'warning',
|
||||
'ERROR' => 'err', 'FATAL' => 'crit'}
|
||||
"#{datetime.strftime("%Y-%m-%dT%H:%M:%S")} #{severity_map[severity]}: #{msg}\n"
|
||||
}
|
||||
@logger
|
||||
end
|
||||
|
||||
def self.logger=(logger)
|
||||
@logger = logger
|
||||
end
|
||||
|
||||
config_file = '/opt/astute/astute.conf'
|
||||
Astute.config.update(YAML.load(File.read(config_file))) if File.exists?(config_file)
|
||||
end
|
||||
require 'json'
|
||||
require 'logger'
|
||||
|
||||
require 'astute/config'
|
||||
require 'astute/logparser'
|
||||
require 'astute/orchestrator'
|
||||
require 'astute/metadata'
|
||||
require 'astute/deployment_engine'
|
||||
require 'astute/network'
|
||||
require 'astute/puppetd'
|
||||
require 'astute/rpuppet'
|
||||
require 'astute/deployment_engine/simple_puppet'
|
||||
require 'astute/deployment_engine/nailyfact'
|
||||
|
||||
module Astute
|
||||
autoload 'Context', 'astute/context'
|
||||
autoload 'MClient', 'astute/mclient'
|
||||
autoload 'ProxyReporter', 'astute/reporter'
|
||||
autoload 'NodeRemoval', 'astute/node_removal'
|
||||
|
||||
def self.logger
|
||||
@logger ||= Logger.new('/var/log/astute.log')
|
||||
@logger.formatter = proc {|severity, datetime, progname, msg|
|
||||
severity_map = {'DEBUG' => 'debug', 'INFO' => 'info', 'WARN' => 'warning',
|
||||
'ERROR' => 'err', 'FATAL' => 'crit'}
|
||||
"#{datetime.strftime("%Y-%m-%dT%H:%M:%S")} #{severity_map[severity]}: #{msg}\n"
|
||||
}
|
||||
@logger
|
||||
end
|
||||
|
||||
def self.logger=(logger)
|
||||
@logger = logger
|
||||
end
|
||||
|
||||
config_file = '/opt/astute/astute.conf'
|
||||
Astute.config.update(YAML.load(File.read(config_file))) if File.exists?(config_file)
|
||||
end
|
||||
|
0
deployment/mcollective/astute/lib/astute/config.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/config.rb
Normal file → Executable file
@ -1,11 +1,11 @@
|
||||
module Astute
|
||||
class Context
|
||||
attr_accessor :task_id, :reporter, :deploy_log_parser
|
||||
|
||||
def initialize(task_id, reporter, deploy_log_parser=nil)
|
||||
@task_id = task_id
|
||||
@reporter = reporter
|
||||
@deploy_log_parser = deploy_log_parser
|
||||
end
|
||||
end
|
||||
end
|
||||
module Astute
|
||||
class Context
|
||||
attr_accessor :task_id, :reporter, :deploy_log_parser
|
||||
|
||||
def initialize(task_id, reporter, deploy_log_parser=nil)
|
||||
@task_id = task_id
|
||||
@reporter = reporter
|
||||
@deploy_log_parser = deploy_log_parser
|
||||
end
|
||||
end
|
||||
end
|
||||
|
0
deployment/mcollective/astute/lib/astute/deployment_engine.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/deployment_engine.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/deployment_engine/nailyfact.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/deployment_engine/nailyfact.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/deployment_engine/puppet_kernel.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/deployment_engine/puppet_kernel.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/deployment_engine/simple_puppet.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/deployment_engine/simple_puppet.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/logparser.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/logparser.rb
Normal file → Executable file
@ -1,77 +1,77 @@
|
||||
require 'mcollective'
|
||||
|
||||
module Astute
|
||||
class MClient
|
||||
include MCollective::RPC
|
||||
|
||||
attr_accessor :retries
|
||||
|
||||
def initialize(ctx, agent, nodes=nil, check_result=true, timeout=nil)
|
||||
@task_id = ctx.task_id
|
||||
@agent = agent
|
||||
@nodes = nodes.map { |n| n.to_s }
|
||||
@check_result = check_result
|
||||
@mc = rpcclient(agent, :exit_on_failure => false)
|
||||
@mc.timeout = timeout if timeout
|
||||
@mc.progress = false
|
||||
@retries = Astute.config.MC_RETRIES
|
||||
unless @nodes.nil?
|
||||
@mc.discover(:nodes => @nodes)
|
||||
end
|
||||
end
|
||||
|
||||
def method_missing(method, *args)
|
||||
res = @mc.send(method, *args)
|
||||
if method == :discover
|
||||
@nodes = args[0][:nodes]
|
||||
return res
|
||||
end
|
||||
# Enable if needed. In normal case it eats the screen pretty fast
|
||||
log_result(res, method)
|
||||
return res unless @check_result
|
||||
|
||||
err_msg = ''
|
||||
# Following error might happen because of misconfiguration, ex. direct_addressing = 1 only on client
|
||||
# or.. could be just some hang? Let's retry if @retries is set
|
||||
if res.length < @nodes.length
|
||||
# some nodes didn't respond
|
||||
retry_index = 1
|
||||
while retry_index <= @retries
|
||||
sleep rand
|
||||
nodes_responded = res.map { |n| n.results[:sender] }
|
||||
not_responded = @nodes - nodes_responded
|
||||
Astute.logger.debug "Retry ##{retry_index} to run mcollective agent on nodes: '#{not_responded.join(',')}'"
|
||||
@mc.discover(:nodes => not_responded)
|
||||
new_res = @mc.send(method, *args)
|
||||
log_result(new_res, method)
|
||||
# new_res can have some nodes which finally responded
|
||||
res += new_res
|
||||
break if res.length == @nodes.length
|
||||
retry_index += 1
|
||||
end
|
||||
if res.length < @nodes.length
|
||||
nodes_responded = res.map { |n| n.results[:sender] }
|
||||
not_responded = @nodes - nodes_responded
|
||||
err_msg += "#{@task_id}: MCollective agents '#{not_responded.join(',')}' didn't respond.\n"
|
||||
end
|
||||
end
|
||||
failed = res.select { |x| x.results[:statuscode] != 0 }
|
||||
if failed.any?
|
||||
err_msg += "#{@task_id}: MCollective call failed in agent '#{@agent}', "\
|
||||
"method '#{method}', failed nodes: #{failed.map{|x| x.results[:sender]}.join(',')}"
|
||||
end
|
||||
raise err_msg unless err_msg.empty?
|
||||
|
||||
return res
|
||||
end
|
||||
|
||||
private
|
||||
def log_result(result, method)
|
||||
result.each do |node|
|
||||
Astute.logger.debug "#{@task_id}: MC agent '#{node.agent}', method '#{method}', "\
|
||||
"results: #{node.results.inspect}"
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
require 'mcollective'
|
||||
|
||||
module Astute
|
||||
class MClient
|
||||
include MCollective::RPC
|
||||
|
||||
attr_accessor :retries
|
||||
|
||||
def initialize(ctx, agent, nodes=nil, check_result=true, timeout=nil)
|
||||
@task_id = ctx.task_id
|
||||
@agent = agent
|
||||
@nodes = nodes.map { |n| n.to_s }
|
||||
@check_result = check_result
|
||||
@mc = rpcclient(agent, :exit_on_failure => false)
|
||||
@mc.timeout = timeout if timeout
|
||||
@mc.progress = false
|
||||
@retries = Astute.config.MC_RETRIES
|
||||
unless @nodes.nil?
|
||||
@mc.discover(:nodes => @nodes)
|
||||
end
|
||||
end
|
||||
|
||||
def method_missing(method, *args)
|
||||
res = @mc.send(method, *args)
|
||||
if method == :discover
|
||||
@nodes = args[0][:nodes]
|
||||
return res
|
||||
end
|
||||
# Enable if needed. In normal case it eats the screen pretty fast
|
||||
log_result(res, method)
|
||||
return res unless @check_result
|
||||
|
||||
err_msg = ''
|
||||
# Following error might happen because of misconfiguration, ex. direct_addressing = 1 only on client
|
||||
# or.. could be just some hang? Let's retry if @retries is set
|
||||
if res.length < @nodes.length
|
||||
# some nodes didn't respond
|
||||
retry_index = 1
|
||||
while retry_index <= @retries
|
||||
sleep rand
|
||||
nodes_responded = res.map { |n| n.results[:sender] }
|
||||
not_responded = @nodes - nodes_responded
|
||||
Astute.logger.debug "Retry ##{retry_index} to run mcollective agent on nodes: '#{not_responded.join(',')}'"
|
||||
@mc.discover(:nodes => not_responded)
|
||||
new_res = @mc.send(method, *args)
|
||||
log_result(new_res, method)
|
||||
# new_res can have some nodes which finally responded
|
||||
res += new_res
|
||||
break if res.length == @nodes.length
|
||||
retry_index += 1
|
||||
end
|
||||
if res.length < @nodes.length
|
||||
nodes_responded = res.map { |n| n.results[:sender] }
|
||||
not_responded = @nodes - nodes_responded
|
||||
err_msg += "#{@task_id}: MCollective agents '#{not_responded.join(',')}' didn't respond.\n"
|
||||
end
|
||||
end
|
||||
failed = res.select { |x| x.results[:statuscode] != 0 }
|
||||
if failed.any?
|
||||
err_msg += "#{@task_id}: MCollective call failed in agent '#{@agent}', "\
|
||||
"method '#{method}', failed nodes: #{failed.map{|x| x.results[:sender]}.join(',')}"
|
||||
end
|
||||
raise err_msg unless err_msg.empty?
|
||||
|
||||
return res
|
||||
end
|
||||
|
||||
private
|
||||
def log_result(result, method)
|
||||
result.each do |node|
|
||||
Astute.logger.debug "#{@task_id}: MC agent '#{node.agent}', method '#{method}', "\
|
||||
"results: #{node.results.inspect}"
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
@ -1,15 +1,15 @@
|
||||
require 'json'
|
||||
require 'ipaddr'
|
||||
|
||||
module Astute
|
||||
module Metadata
|
||||
def self.publish_facts(ctx, uid, metadata)
|
||||
# This is synchronious RPC call, so we are sure that data were sent and processed remotely
|
||||
Astute.logger.info "#{ctx.task_id}: nailyfact - storing metadata for node uid=#{uid}"
|
||||
Astute.logger.debug "#{ctx.task_id}: nailyfact stores metadata: #{metadata.inspect}"
|
||||
nailyfact = MClient.new(ctx, "nailyfact", [uid])
|
||||
# TODO(mihgen) check results!
|
||||
stats = nailyfact.post(:value => metadata.to_json)
|
||||
end
|
||||
end
|
||||
end
|
||||
require 'json'
|
||||
require 'ipaddr'
|
||||
|
||||
module Astute
|
||||
module Metadata
|
||||
def self.publish_facts(ctx, uid, metadata)
|
||||
# This is synchronious RPC call, so we are sure that data were sent and processed remotely
|
||||
Astute.logger.info "#{ctx.task_id}: nailyfact - storing metadata for node uid=#{uid}"
|
||||
Astute.logger.debug "#{ctx.task_id}: nailyfact stores metadata: #{metadata.inspect}"
|
||||
nailyfact = MClient.new(ctx, "nailyfact", [uid])
|
||||
# TODO(mihgen) check results!
|
||||
stats = nailyfact.post(:value => metadata.to_json)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -1,48 +1,48 @@
|
||||
module Astute
|
||||
module Network
|
||||
def self.check_network(ctx, nodes, networks)
|
||||
if nodes.empty?
|
||||
Astute.logger.error "#{ctx.task_id}: Network checker: nodes list is empty. Nothing to check."
|
||||
return {'status' => 'error', 'error' => "Nodes list is empty. Nothing to check."}
|
||||
elsif nodes.size == 1
|
||||
Astute.logger.info "#{ctx.task_id}: Network checker: nodes list contains one node only. Do nothing."
|
||||
return {'nodes' =>
|
||||
[{'uid'=>nodes[0]['uid'],
|
||||
'networks'=>[{'vlans' => networks.map {|n| n['vlan_id'].to_i}, 'iface'=>'eth0'}]
|
||||
}]
|
||||
}
|
||||
end
|
||||
uids = nodes.map {|n| n['uid']}
|
||||
# TODO Everything breakes if agent not found. We have to handle that
|
||||
net_probe = MClient.new(ctx, "net_probe", uids)
|
||||
|
||||
net_probe.start_frame_listeners(:iflist => ['eth0'].to_json)
|
||||
ctx.reporter.report({'progress' => 30, 'status' => 'verification'})
|
||||
|
||||
# Interface name is hardcoded for now. Later we expect it to be passed from Nailgun backend
|
||||
data_to_send = {'eth0' => networks.map {|n| n['vlan_id']}.join(',')}
|
||||
net_probe.send_probing_frames(:interfaces => data_to_send.to_json)
|
||||
ctx.reporter.report({'progress' => 60, 'status' => 'verification'})
|
||||
|
||||
stats = net_probe.get_probing_info
|
||||
result = stats.map {|node| {'uid' => node.results[:sender],
|
||||
'networks' => check_vlans_by_traffic(
|
||||
node.results[:sender],
|
||||
node.results[:data][:neighbours])} }
|
||||
Astute.logger.debug "#{ctx.task_id}: Network checking is done. Results: #{result.inspect}"
|
||||
return {'nodes' => result}
|
||||
end
|
||||
|
||||
private
|
||||
def self.check_vlans_by_traffic(uid, data)
|
||||
return data.map{|iface, vlans| {
|
||||
'iface' => iface,
|
||||
'vlans' =>
|
||||
vlans.reject{|k,v|
|
||||
v.size==1 and v.has_key?(uid)
|
||||
}.keys.map{|n| n.to_i}
|
||||
}
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
module Astute
|
||||
module Network
|
||||
def self.check_network(ctx, nodes, networks)
|
||||
if nodes.empty?
|
||||
Astute.logger.error "#{ctx.task_id}: Network checker: nodes list is empty. Nothing to check."
|
||||
return {'status' => 'error', 'error' => "Nodes list is empty. Nothing to check."}
|
||||
elsif nodes.size == 1
|
||||
Astute.logger.info "#{ctx.task_id}: Network checker: nodes list contains one node only. Do nothing."
|
||||
return {'nodes' =>
|
||||
[{'uid'=>nodes[0]['uid'],
|
||||
'networks'=>[{'vlans' => networks.map {|n| n['vlan_id'].to_i}, 'iface'=>'eth0'}]
|
||||
}]
|
||||
}
|
||||
end
|
||||
uids = nodes.map {|n| n['uid']}
|
||||
# TODO Everything breakes if agent not found. We have to handle that
|
||||
net_probe = MClient.new(ctx, "net_probe", uids)
|
||||
|
||||
net_probe.start_frame_listeners(:iflist => ['eth0'].to_json)
|
||||
ctx.reporter.report({'progress' => 30, 'status' => 'verification'})
|
||||
|
||||
# Interface name is hardcoded for now. Later we expect it to be passed from Nailgun backend
|
||||
data_to_send = {'eth0' => networks.map {|n| n['vlan_id']}.join(',')}
|
||||
net_probe.send_probing_frames(:interfaces => data_to_send.to_json)
|
||||
ctx.reporter.report({'progress' => 60, 'status' => 'verification'})
|
||||
|
||||
stats = net_probe.get_probing_info
|
||||
result = stats.map {|node| {'uid' => node.results[:sender],
|
||||
'networks' => check_vlans_by_traffic(
|
||||
node.results[:sender],
|
||||
node.results[:data][:neighbours])} }
|
||||
Astute.logger.debug "#{ctx.task_id}: Network checking is done. Results: #{result.inspect}"
|
||||
return {'nodes' => result}
|
||||
end
|
||||
|
||||
private
|
||||
def self.check_vlans_by_traffic(uid, data)
|
||||
return data.map{|iface, vlans| {
|
||||
'iface' => iface,
|
||||
'vlans' =>
|
||||
vlans.reject{|k,v|
|
||||
v.size==1 and v.has_key?(uid)
|
||||
}.keys.map{|n| n.to_i}
|
||||
}
|
||||
}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
0
deployment/mcollective/astute/lib/astute/node_removal.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/node_removal.rb
Normal file → Executable file
@ -1,44 +1,44 @@
|
||||
module Astute
|
||||
class Orchestrator
|
||||
def initialize(deploy_engine=nil, log_parsing=false)
|
||||
@deploy_engine = deploy_engine ||= Astute::DeploymentEngine::NailyFact
|
||||
if log_parsing
|
||||
@log_parser = LogParser::ParseNodeLogs.new('puppet-agent.log')
|
||||
else
|
||||
@log_parser = LogParser::NoParsing.new
|
||||
end
|
||||
end
|
||||
|
||||
def node_type(reporter, task_id, nodes, timeout=nil)
|
||||
context = Context.new(task_id, reporter)
|
||||
uids = nodes.map {|n| n['uid']}
|
||||
systemtype = MClient.new(context, "systemtype", uids, check_result=false, timeout)
|
||||
systems = systemtype.get_type
|
||||
return systems.map {|n| {'uid' => n.results[:sender],
|
||||
'node_type' => n.results[:data][:node_type].chomp}}
|
||||
end
|
||||
|
||||
def deploy(up_reporter, task_id, nodes, attrs)
|
||||
raise "Nodes to deploy are not provided!" if nodes.empty?
|
||||
# Following line fixes issues with uids: it should always be string
|
||||
nodes.map { |x| x['uid'] = x['uid'].to_s }
|
||||
proxy_reporter = ProxyReporter.new(up_reporter)
|
||||
context = Context.new(task_id, proxy_reporter, @log_parser)
|
||||
deploy_engine_instance = @deploy_engine.new(context)
|
||||
Astute.logger.info "Using #{deploy_engine_instance.class} for deployment."
|
||||
deploy_engine_instance.deploy(nodes, attrs)
|
||||
end
|
||||
|
||||
def remove_nodes(reporter, task_id, nodes)
|
||||
context = Context.new(task_id, reporter)
|
||||
node_removal = NodeRemoval.new
|
||||
return node_removal.remove(context, nodes)
|
||||
end
|
||||
|
||||
def verify_networks(reporter, task_id, nodes, networks)
|
||||
context = Context.new(task_id, reporter)
|
||||
result = Network.check_network(context, nodes, networks)
|
||||
return result
|
||||
end
|
||||
end
|
||||
end
|
||||
module Astute
|
||||
class Orchestrator
|
||||
def initialize(deploy_engine=nil, log_parsing=false)
|
||||
@deploy_engine = deploy_engine ||= Astute::DeploymentEngine::NailyFact
|
||||
if log_parsing
|
||||
@log_parser = LogParser::ParseNodeLogs.new('puppet-agent.log')
|
||||
else
|
||||
@log_parser = LogParser::NoParsing.new
|
||||
end
|
||||
end
|
||||
|
||||
def node_type(reporter, task_id, nodes, timeout=nil)
|
||||
context = Context.new(task_id, reporter)
|
||||
uids = nodes.map {|n| n['uid']}
|
||||
systemtype = MClient.new(context, "systemtype", uids, check_result=false, timeout)
|
||||
systems = systemtype.get_type
|
||||
return systems.map {|n| {'uid' => n.results[:sender],
|
||||
'node_type' => n.results[:data][:node_type].chomp}}
|
||||
end
|
||||
|
||||
def deploy(up_reporter, task_id, nodes, attrs)
|
||||
raise "Nodes to deploy are not provided!" if nodes.empty?
|
||||
# Following line fixes issues with uids: it should always be string
|
||||
nodes.map { |x| x['uid'] = x['uid'].to_s }
|
||||
proxy_reporter = ProxyReporter.new(up_reporter)
|
||||
context = Context.new(task_id, proxy_reporter, @log_parser)
|
||||
deploy_engine_instance = @deploy_engine.new(context)
|
||||
Astute.logger.info "Using #{deploy_engine_instance.class} for deployment."
|
||||
deploy_engine_instance.deploy(nodes, attrs)
|
||||
end
|
||||
|
||||
def remove_nodes(reporter, task_id, nodes)
|
||||
context = Context.new(task_id, reporter)
|
||||
node_removal = NodeRemoval.new
|
||||
return node_removal.remove(context, nodes)
|
||||
end
|
||||
|
||||
def verify_networks(reporter, task_id, nodes, networks)
|
||||
context = Context.new(task_id, reporter)
|
||||
result = Network.check_network(context, nodes, networks)
|
||||
return result
|
||||
end
|
||||
end
|
||||
end
|
||||
|
0
deployment/mcollective/astute/lib/astute/puppetd.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/puppetd.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/reporter.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/reporter.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/rpuppet.rb
Normal file → Executable file
0
deployment/mcollective/astute/lib/astute/rpuppet.rb
Normal file → Executable file
@ -1,10 +1,10 @@
|
||||
ASTUTE_VERSION:=$(shell ruby -e "require '$(SOURCE_DIR)/astute/lib/astute/version.rb'; puts Astute::VERSION")
|
||||
|
||||
$(BUILD_DIR)/packages/gems/astute-$(ASTUTE_VERSION).gem: \
|
||||
$(SOURCE_DIR)/astute/astute.gemspec \
|
||||
$(call find-files,astute/bin) \
|
||||
$(call find-files,astute/lib) \
|
||||
$(call find-files,astute/spec)
|
||||
@mkdir -p $(@D)
|
||||
cd $(SOURCE_DIR)/astute && gem build astute.gemspec
|
||||
mv $(SOURCE_DIR)/astute/astute-$(ASTUTE_VERSION).gem $@
|
||||
ASTUTE_VERSION:=$(shell ruby -e "require '$(SOURCE_DIR)/astute/lib/astute/version.rb'; puts Astute::VERSION")
|
||||
|
||||
$(BUILD_DIR)/packages/gems/astute-$(ASTUTE_VERSION).gem: \
|
||||
$(SOURCE_DIR)/astute/astute.gemspec \
|
||||
$(call find-files,astute/bin) \
|
||||
$(call find-files,astute/lib) \
|
||||
$(call find-files,astute/spec)
|
||||
@mkdir -p $(@D)
|
||||
cd $(SOURCE_DIR)/astute && gem build astute.gemspec
|
||||
mv $(SOURCE_DIR)/astute/astute-$(ASTUTE_VERSION).gem $@
|
||||
|
0
deployment/mcollective/astute/spec/example-logs/anaconda.log_
Normal file → Executable file
0
deployment/mcollective/astute/spec/example-logs/anaconda.log_
Normal file → Executable file
@ -1,43 +1,43 @@
|
||||
$LOAD_PATH << File.join(File.dirname(__FILE__),"..","lib")
|
||||
require 'rspec'
|
||||
# Following require is needed for rcov to provide valid results
|
||||
require 'rspec/autorun'
|
||||
require 'yaml'
|
||||
require 'astute'
|
||||
|
||||
RSpec.configure do |config|
|
||||
config.mock_with :mocha
|
||||
end
|
||||
|
||||
# NOTE(mihgen): I hate to wait for unit tests to complete,
|
||||
# resetting time to sleep significantly increases tests speed
|
||||
Astute.config.PUPPET_DEPLOY_INTERVAL = 0
|
||||
Astute.config.PUPPET_FADE_INTERVAL = 0
|
||||
|
||||
module SpecHelpers
|
||||
def mock_rpcclient(discover_nodes=nil, timeout=nil)
|
||||
rpcclient = mock('rpcclient') do
|
||||
stubs(:progress=)
|
||||
unless timeout.nil?
|
||||
expects(:timeout=).with(timeout)
|
||||
end
|
||||
unless discover_nodes.nil?
|
||||
expects(:discover).with(:nodes => discover_nodes.map {|x| x['uid'].to_s}).at_least_once
|
||||
else
|
||||
stubs(:discover)
|
||||
end
|
||||
end
|
||||
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
return rpcclient
|
||||
end
|
||||
|
||||
def mock_mc_result(result={})
|
||||
mc_res = {:statuscode => 0, :data => {}, :sender => '1'}
|
||||
mc_res.merge!(result)
|
||||
mc_result = mock('mc_result') do
|
||||
stubs(:results).returns(mc_res)
|
||||
stubs(:agent).returns('mc_stubbed_agent')
|
||||
end
|
||||
return mc_result
|
||||
end
|
||||
end
|
||||
$LOAD_PATH << File.join(File.dirname(__FILE__),"..","lib")
|
||||
require 'rspec'
|
||||
# Following require is needed for rcov to provide valid results
|
||||
require 'rspec/autorun'
|
||||
require 'yaml'
|
||||
require 'astute'
|
||||
|
||||
RSpec.configure do |config|
|
||||
config.mock_with :mocha
|
||||
end
|
||||
|
||||
# NOTE(mihgen): I hate to wait for unit tests to complete,
|
||||
# resetting time to sleep significantly increases tests speed
|
||||
Astute.config.PUPPET_DEPLOY_INTERVAL = 0
|
||||
Astute.config.PUPPET_FADE_INTERVAL = 0
|
||||
|
||||
module SpecHelpers
|
||||
def mock_rpcclient(discover_nodes=nil, timeout=nil)
|
||||
rpcclient = mock('rpcclient') do
|
||||
stubs(:progress=)
|
||||
unless timeout.nil?
|
||||
expects(:timeout=).with(timeout)
|
||||
end
|
||||
unless discover_nodes.nil?
|
||||
expects(:discover).with(:nodes => discover_nodes.map {|x| x['uid'].to_s}).at_least_once
|
||||
else
|
||||
stubs(:discover)
|
||||
end
|
||||
end
|
||||
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
return rpcclient
|
||||
end
|
||||
|
||||
def mock_mc_result(result={})
|
||||
mc_res = {:statuscode => 0, :data => {}, :sender => '1'}
|
||||
mc_res.merge!(result)
|
||||
mc_result = mock('mc_result') do
|
||||
stubs(:results).returns(mc_res)
|
||||
stubs(:agent).returns('mc_stubbed_agent')
|
||||
end
|
||||
return mc_result
|
||||
end
|
||||
end
|
||||
|
@ -1,105 +1,105 @@
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
require 'tempfile'
|
||||
require 'date'
|
||||
|
||||
include Astute
|
||||
|
||||
describe LogParser do
|
||||
context "Pattern-based progress bar calculation (anaconda.log)" do
|
||||
before :each do
|
||||
@pattern_spec = {'type' => 'pattern-list', 'chunk_size' => 40000, # Size of block which reads for pattern searching.
|
||||
'pattern_list' => [
|
||||
{'pattern' => 'Running kickstart %%pre script', 'progress' => 0.08},
|
||||
{'pattern' => 'to step enablefilesystems', 'progress' => 0.09},
|
||||
{'pattern' => 'to step reposetup', 'progress' => 0.13},
|
||||
{'pattern' => 'to step installpackages', 'progress' => 0.16},
|
||||
{'pattern' => 'Installing',
|
||||
'number' => 210, # Now it install 205 packets. Add 5 packets for growth in future.
|
||||
'p_min' => 0.16, # min percent
|
||||
'p_max' => 0.87 # max percent
|
||||
},
|
||||
{'pattern' => 'to step postinstallconfig', 'progress' => 0.87},
|
||||
{'pattern' => 'to step dopostaction', 'progress' => 0.92},
|
||||
].reverse
|
||||
}
|
||||
end
|
||||
|
||||
def test_supposed_time_parser(pattern_spec)
|
||||
date_regexp = '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}'
|
||||
date_format = '%Y-%m-%dT%H:%M:%S'
|
||||
fo = Tempfile.new('logparse')
|
||||
logfile = File.join(File.dirname(__FILE__), "..", "example-logs", "anaconda.log_")
|
||||
path = fo.path
|
||||
initial_progress = Astute::LogParser.get_log_progress(path, pattern_spec)
|
||||
initial_progress.should eql(0)
|
||||
|
||||
progress_table = []
|
||||
File.open(logfile).each do |line|
|
||||
fo.write(line)
|
||||
fo.flush
|
||||
date_string = line.match(date_regexp)
|
||||
if date_string
|
||||
date = DateTime.strptime(date_string[0], date_format)
|
||||
progress = Astute::LogParser.get_log_progress(path, pattern_spec)
|
||||
progress_table << {'date' => date, 'progress' => progress}
|
||||
end
|
||||
end
|
||||
fo.close!
|
||||
first_event_date, first_progress = progress_table[0]['date'], progress_table[0]['progress']
|
||||
last_event_date, last_progress = progress_table[-1]['date'], progress_table[-1]['progress']
|
||||
period = (last_event_date - first_event_date) / (last_progress - first_progress)
|
||||
hours, mins, secs, frac = Date::day_fraction_to_time(period)
|
||||
# FIXME(mihgen): I hope this calculation can be much simplified: needs refactoring
|
||||
# Assuming server was in reboot for reboot_time
|
||||
reboot_time = 30
|
||||
# period will be useful for other test cases
|
||||
period_in_sec = hours * 60 * 60 + mins * 60 + secs + reboot_time
|
||||
|
||||
# Let's normalize the time in table
|
||||
progress_table.each do |el|
|
||||
delta = el['date'] - first_event_date
|
||||
hours, mins, secs, frac = Date::day_fraction_to_time(delta)
|
||||
delta_in_sec = hours * 60 * 60 + mins * 60 + secs
|
||||
el['time'] = delta_in_sec + reboot_time
|
||||
end
|
||||
return progress_table, period_in_sec
|
||||
end
|
||||
|
||||
it "new progress must be equal or greater than previous" do
|
||||
progress_table, period_in_sec = test_supposed_time_parser(@pattern_spec)
|
||||
progress_table.each_cons(2) do |el|
|
||||
el[1]['progress'].should be >= el[0]['progress']
|
||||
el[0]['progress'].should be >= 0
|
||||
el[1]['progress'].should be <= 1
|
||||
end
|
||||
end
|
||||
|
||||
it "it should move smoothly"
|
||||
it "it must be updated at least 5 times" do
|
||||
# Otherwise progress bar has no meaning I guess...
|
||||
pending('Not yet implemented')
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
#pattern_spec = {'type' => 'supposed_time',
|
||||
#'chunk_size' => 10000,
|
||||
#'date_format' => '%Y-%m-%dT%H:%M:%S',
|
||||
#'date_regexp' => '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}',
|
||||
#'pattern_list' => [
|
||||
#{'pattern' => 'Running anaconda script', 'supposed_time' => 60},
|
||||
#{'pattern' => 'moving (1) to step enablefilesystems', 'supposed_time' => 3},
|
||||
#{'pattern' => "notifying kernel of 'change' event on device", 'supposed_time' => 97},
|
||||
#{'pattern' => 'Preparing to install packages', 'supposed_time' => 8},
|
||||
#{'pattern' => 'Installing glibc-common-2.12', 'supposed_time' => 9},
|
||||
#{'pattern' => 'Installing bash-4.1.2', 'supposed_time' => 10},
|
||||
#{'pattern' => 'Installing coreutils-8.4-19', 'supposed_time' => 20},
|
||||
#{'pattern' => 'Installing centos-release-6-3', 'supposed_time' => 20},
|
||||
#{'pattern' => 'Installing attr-2.4.44', 'supposed_time' => 19},
|
||||
#{'pattern' => 'leaving (1) step installpackages', 'supposed_time' => 51},
|
||||
#{'pattern' => 'moving (1) to step postscripts', 'supposed_time' => 3},
|
||||
#{'pattern' => 'leaving (1) step postscripts', 'supposed_time' => 132},
|
||||
#].reverse,
|
||||
#}
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
require 'tempfile'
|
||||
require 'date'
|
||||
|
||||
include Astute
|
||||
|
||||
describe LogParser do
|
||||
context "Pattern-based progress bar calculation (anaconda.log)" do
|
||||
before :each do
|
||||
@pattern_spec = {'type' => 'pattern-list', 'chunk_size' => 40000, # Size of block which reads for pattern searching.
|
||||
'pattern_list' => [
|
||||
{'pattern' => 'Running kickstart %%pre script', 'progress' => 0.08},
|
||||
{'pattern' => 'to step enablefilesystems', 'progress' => 0.09},
|
||||
{'pattern' => 'to step reposetup', 'progress' => 0.13},
|
||||
{'pattern' => 'to step installpackages', 'progress' => 0.16},
|
||||
{'pattern' => 'Installing',
|
||||
'number' => 210, # Now it install 205 packets. Add 5 packets for growth in future.
|
||||
'p_min' => 0.16, # min percent
|
||||
'p_max' => 0.87 # max percent
|
||||
},
|
||||
{'pattern' => 'to step postinstallconfig', 'progress' => 0.87},
|
||||
{'pattern' => 'to step dopostaction', 'progress' => 0.92},
|
||||
].reverse
|
||||
}
|
||||
end
|
||||
|
||||
def test_supposed_time_parser(pattern_spec)
|
||||
date_regexp = '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}'
|
||||
date_format = '%Y-%m-%dT%H:%M:%S'
|
||||
fo = Tempfile.new('logparse')
|
||||
logfile = File.join(File.dirname(__FILE__), "..", "example-logs", "anaconda.log_")
|
||||
path = fo.path
|
||||
initial_progress = Astute::LogParser.get_log_progress(path, pattern_spec)
|
||||
initial_progress.should eql(0)
|
||||
|
||||
progress_table = []
|
||||
File.open(logfile).each do |line|
|
||||
fo.write(line)
|
||||
fo.flush
|
||||
date_string = line.match(date_regexp)
|
||||
if date_string
|
||||
date = DateTime.strptime(date_string[0], date_format)
|
||||
progress = Astute::LogParser.get_log_progress(path, pattern_spec)
|
||||
progress_table << {'date' => date, 'progress' => progress}
|
||||
end
|
||||
end
|
||||
fo.close!
|
||||
first_event_date, first_progress = progress_table[0]['date'], progress_table[0]['progress']
|
||||
last_event_date, last_progress = progress_table[-1]['date'], progress_table[-1]['progress']
|
||||
period = (last_event_date - first_event_date) / (last_progress - first_progress)
|
||||
hours, mins, secs, frac = Date::day_fraction_to_time(period)
|
||||
# FIXME(mihgen): I hope this calculation can be much simplified: needs refactoring
|
||||
# Assuming server was in reboot for reboot_time
|
||||
reboot_time = 30
|
||||
# period will be useful for other test cases
|
||||
period_in_sec = hours * 60 * 60 + mins * 60 + secs + reboot_time
|
||||
|
||||
# Let's normalize the time in table
|
||||
progress_table.each do |el|
|
||||
delta = el['date'] - first_event_date
|
||||
hours, mins, secs, frac = Date::day_fraction_to_time(delta)
|
||||
delta_in_sec = hours * 60 * 60 + mins * 60 + secs
|
||||
el['time'] = delta_in_sec + reboot_time
|
||||
end
|
||||
return progress_table, period_in_sec
|
||||
end
|
||||
|
||||
it "new progress must be equal or greater than previous" do
|
||||
progress_table, period_in_sec = test_supposed_time_parser(@pattern_spec)
|
||||
progress_table.each_cons(2) do |el|
|
||||
el[1]['progress'].should be >= el[0]['progress']
|
||||
el[0]['progress'].should be >= 0
|
||||
el[1]['progress'].should be <= 1
|
||||
end
|
||||
end
|
||||
|
||||
it "it should move smoothly"
|
||||
it "it must be updated at least 5 times" do
|
||||
# Otherwise progress bar has no meaning I guess...
|
||||
pending('Not yet implemented')
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
#pattern_spec = {'type' => 'supposed_time',
|
||||
#'chunk_size' => 10000,
|
||||
#'date_format' => '%Y-%m-%dT%H:%M:%S',
|
||||
#'date_regexp' => '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}',
|
||||
#'pattern_list' => [
|
||||
#{'pattern' => 'Running anaconda script', 'supposed_time' => 60},
|
||||
#{'pattern' => 'moving (1) to step enablefilesystems', 'supposed_time' => 3},
|
||||
#{'pattern' => "notifying kernel of 'change' event on device", 'supposed_time' => 97},
|
||||
#{'pattern' => 'Preparing to install packages', 'supposed_time' => 8},
|
||||
#{'pattern' => 'Installing glibc-common-2.12', 'supposed_time' => 9},
|
||||
#{'pattern' => 'Installing bash-4.1.2', 'supposed_time' => 10},
|
||||
#{'pattern' => 'Installing coreutils-8.4-19', 'supposed_time' => 20},
|
||||
#{'pattern' => 'Installing centos-release-6-3', 'supposed_time' => 20},
|
||||
#{'pattern' => 'Installing attr-2.4.44', 'supposed_time' => 19},
|
||||
#{'pattern' => 'leaving (1) step installpackages', 'supposed_time' => 51},
|
||||
#{'pattern' => 'moving (1) to step postscripts', 'supposed_time' => 3},
|
||||
#{'pattern' => 'leaving (1) step postscripts', 'supposed_time' => 132},
|
||||
#].reverse,
|
||||
#}
|
||||
|
@ -1,79 +1,79 @@
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
include Astute
|
||||
|
||||
describe MClient do
|
||||
include SpecHelpers
|
||||
before(:each) do
|
||||
@ctx = mock('context')
|
||||
@ctx.stubs(:task_id)
|
||||
@ctx.stubs(:reporter)
|
||||
end
|
||||
|
||||
it "should receive method call and process valid result correctly" do
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
mc_valid_result = mock_mc_result
|
||||
|
||||
rpcclient.expects(:echo).with(:msg => 'hello world').once.returns([mc_valid_result]*3)
|
||||
|
||||
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
|
||||
stats = mclient.echo(:msg => 'hello world')
|
||||
stats.should eql([mc_valid_result]*3)
|
||||
end
|
||||
|
||||
it "should return even bad result if check_result=false" do
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
mc_valid_result = mock_mc_result
|
||||
mc_error_result = mock_mc_result({:statuscode => 1, :sender => '2'})
|
||||
|
||||
rpcclient.expects(:echo).with(:msg => 'hello world').once.\
|
||||
returns([mc_valid_result, mc_error_result])
|
||||
|
||||
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']}, check_result=false)
|
||||
stats = mclient.echo(:msg => 'hello world')
|
||||
stats.should eql([mc_valid_result, mc_error_result])
|
||||
end
|
||||
|
||||
it "should try to retry for non-responded nodes" do
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
|
||||
rpcclient = mock('rpcclient') do
|
||||
stubs(:progress=)
|
||||
expects(:discover).with(:nodes => ['1','2','3'])
|
||||
expects(:discover).with(:nodes => ['2','3'])
|
||||
end
|
||||
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
|
||||
mc_valid_result = mock_mc_result
|
||||
mc_valid_result2 = mock_mc_result({:sender => '2'})
|
||||
|
||||
rpcclient.stubs(:echo).returns([mc_valid_result]).then.
|
||||
returns([mc_valid_result2]).then
|
||||
|
||||
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
|
||||
mclient.retries = 1
|
||||
expect { mclient.echo(:msg => 'hello world') }.to raise_error(/MCollective agents '3' didn't respond./)
|
||||
end
|
||||
|
||||
it "should raise error if agent returns statuscode != 0" do
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
|
||||
rpcclient = mock('rpcclient') do
|
||||
stubs(:progress=)
|
||||
expects(:discover).with(:nodes => ['1','2','3'])
|
||||
expects(:discover).with(:nodes => ['2','3'])
|
||||
end
|
||||
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
|
||||
mc_valid_result = mock_mc_result
|
||||
mc_failed_result = mock_mc_result({:sender => '2', :statuscode => 1})
|
||||
|
||||
rpcclient.stubs(:echo).returns([mc_valid_result]).then.
|
||||
returns([mc_failed_result]).then
|
||||
|
||||
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
|
||||
mclient.retries = 1
|
||||
expect { mclient.echo(:msg => 'hello world') }.to \
|
||||
raise_error(/MCollective agents '3' didn't respond.\n.* failed nodes: 2/)
|
||||
end
|
||||
end
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
include Astute
|
||||
|
||||
describe MClient do
|
||||
include SpecHelpers
|
||||
before(:each) do
|
||||
@ctx = mock('context')
|
||||
@ctx.stubs(:task_id)
|
||||
@ctx.stubs(:reporter)
|
||||
end
|
||||
|
||||
it "should receive method call and process valid result correctly" do
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
mc_valid_result = mock_mc_result
|
||||
|
||||
rpcclient.expects(:echo).with(:msg => 'hello world').once.returns([mc_valid_result]*3)
|
||||
|
||||
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
|
||||
stats = mclient.echo(:msg => 'hello world')
|
||||
stats.should eql([mc_valid_result]*3)
|
||||
end
|
||||
|
||||
it "should return even bad result if check_result=false" do
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
mc_valid_result = mock_mc_result
|
||||
mc_error_result = mock_mc_result({:statuscode => 1, :sender => '2'})
|
||||
|
||||
rpcclient.expects(:echo).with(:msg => 'hello world').once.\
|
||||
returns([mc_valid_result, mc_error_result])
|
||||
|
||||
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']}, check_result=false)
|
||||
stats = mclient.echo(:msg => 'hello world')
|
||||
stats.should eql([mc_valid_result, mc_error_result])
|
||||
end
|
||||
|
||||
it "should try to retry for non-responded nodes" do
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
|
||||
rpcclient = mock('rpcclient') do
|
||||
stubs(:progress=)
|
||||
expects(:discover).with(:nodes => ['1','2','3'])
|
||||
expects(:discover).with(:nodes => ['2','3'])
|
||||
end
|
||||
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
|
||||
mc_valid_result = mock_mc_result
|
||||
mc_valid_result2 = mock_mc_result({:sender => '2'})
|
||||
|
||||
rpcclient.stubs(:echo).returns([mc_valid_result]).then.
|
||||
returns([mc_valid_result2]).then
|
||||
|
||||
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
|
||||
mclient.retries = 1
|
||||
expect { mclient.echo(:msg => 'hello world') }.to raise_error(/MCollective agents '3' didn't respond./)
|
||||
end
|
||||
|
||||
it "should raise error if agent returns statuscode != 0" do
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
|
||||
rpcclient = mock('rpcclient') do
|
||||
stubs(:progress=)
|
||||
expects(:discover).with(:nodes => ['1','2','3'])
|
||||
expects(:discover).with(:nodes => ['2','3'])
|
||||
end
|
||||
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
|
||||
mc_valid_result = mock_mc_result
|
||||
mc_failed_result = mock_mc_result({:sender => '2', :statuscode => 1})
|
||||
|
||||
rpcclient.stubs(:echo).returns([mc_valid_result]).then.
|
||||
returns([mc_failed_result]).then
|
||||
|
||||
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
|
||||
mclient.retries = 1
|
||||
expect { mclient.echo(:msg => 'hello world') }.to \
|
||||
raise_error(/MCollective agents '3' didn't respond.\n.* failed nodes: 2/)
|
||||
end
|
||||
end
|
||||
|
@ -1,170 +1,170 @@
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
|
||||
describe "NailyFact DeploymentEngine" do
|
||||
context "When deploy is called, " do
|
||||
before(:each) do
|
||||
@ctx = mock
|
||||
@ctx.stubs(:task_id)
|
||||
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
|
||||
reporter = mock
|
||||
@ctx.stubs(:reporter).returns(reporter)
|
||||
reporter.stubs(:report)
|
||||
@deploy_engine = Astute::DeploymentEngine::NailyFact.new(@ctx)
|
||||
@data = {"args" =>
|
||||
{"attributes" =>
|
||||
{"storage_network_range" => "172.16.0.0/24", "auto_assign_floating_ip" => false,
|
||||
"mysql" => {"root_password" => "Z2EqsZo5"},
|
||||
"keystone" => {"admin_token" => "5qKy0i63", "db_password" => "HHQ86Rym", "admin_tenant" => "admin"},
|
||||
"nova" => {"user_password" => "h8RY8SE7", "db_password" => "Xl9I51Cb"},
|
||||
"glance" => {"user_password" => "nDlUxuJq", "db_password" => "V050pQAn"},
|
||||
"rabbit" => {"user" => "nova", "password" => "FLF3txKC"},
|
||||
"management_network_range" => "192.168.0.0/24",
|
||||
"public_network_range" => "240.0.1.0/24",
|
||||
"fixed_network_range" => "10.0.0.0/24",
|
||||
"floating_network_range" => "240.0.0.0/24"},
|
||||
"task_uuid" => "19d99029-350a-4c9c-819c-1f294cf9e741",
|
||||
"nodes" => [{"mac" => "52:54:00:0E:B8:F5", "status" => "provisioning",
|
||||
"uid" => "devnailgun.mirantis.com", "error_type" => nil,
|
||||
"fqdn" => "devnailgun.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.2/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.2/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 1,
|
||||
"ip" => "10.20.0.200",
|
||||
"role" => "controller"},
|
||||
{"mac" => "52:54:00:50:91:DD", "status" => "provisioning",
|
||||
"uid" => 2, "error_type" => nil,
|
||||
"fqdn" => "slave-2.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.3/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.3/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 2,
|
||||
"ip" => "10.20.0.221",
|
||||
"role" => "compute"},
|
||||
{"mac" => "52:54:00:C3:2C:28", "status" => "provisioning",
|
||||
"uid" => 3, "error_type" => nil,
|
||||
"fqdn" => "slave-3.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.4/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.4/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 3,
|
||||
"ip" => "10.20.0.68",
|
||||
"role" => "compute"}]},
|
||||
"method" => "deploy",
|
||||
"respond_to" => "deploy_resp"}
|
||||
ha_nodes = @data['args']['nodes'] +
|
||||
[{"mac" => "52:54:00:0E:88:88", "status" => "provisioned",
|
||||
"uid" => "4", "error_type" => nil,
|
||||
"fqdn" => "controller-4.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.5/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.5/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 4,
|
||||
"ip" => "10.20.0.205",
|
||||
"role" => "controller"},
|
||||
{"mac" => "52:54:00:0E:99:99", "status" => "provisioned",
|
||||
"uid" => "5", "error_type" => nil,
|
||||
"fqdn" => "controller-5.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.6/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.6/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 5,
|
||||
"ip" => "10.20.0.206",
|
||||
"role" => "controller"}]
|
||||
@data_ha = Marshal.load(Marshal.dump(@data))
|
||||
@data_ha['args']['nodes'] = ha_nodes
|
||||
@data_ha['args']['attributes']['deployment_mode'] = "ha_compute"
|
||||
# VIPs are required for HA mode and should be passed from Nailgun (only in HA)
|
||||
@data_ha['args']['attributes']['management_vip'] = "192.168.0.111"
|
||||
@data_ha['args']['attributes']['public_vip'] = "240.0.1.111"
|
||||
end
|
||||
|
||||
it "it should call valid method depends on attrs" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'deployment_mode' => 'ha_compute'}
|
||||
attrs_modified = attrs.merge({'some' => 'somea'})
|
||||
|
||||
@deploy_engine.expects(:attrs_ha_compute).with(nodes, attrs).returns(attrs_modified)
|
||||
@deploy_engine.expects(:deploy_ha_compute).with(nodes, attrs_modified)
|
||||
# All implementations of deploy_piece go to subclasses
|
||||
@deploy_engine.respond_to?(:deploy_piece).should be_true
|
||||
@deploy_engine.deploy(nodes, attrs)
|
||||
end
|
||||
|
||||
it "it should raise an exception if deployment mode is unsupported" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'deployment_mode' => 'unknown'}
|
||||
expect {@deploy_engine.deploy(nodes, attrs)}.to raise_exception(/Method attrs_unknown is not implemented/)
|
||||
end
|
||||
|
||||
it "multinode_compute deploy should not raise any exception" do
|
||||
@data['args']['attributes']['deployment_mode'] = "multinode_compute"
|
||||
Astute::Metadata.expects(:publish_facts).times(@data['args']['nodes'].size)
|
||||
# we got two calls, one for controller, and another for all computes
|
||||
Astute::PuppetdDeployer.expects(:deploy).twice
|
||||
@deploy_engine.deploy(@data['args']['nodes'], @data['args']['attributes'])
|
||||
end
|
||||
|
||||
it "ha_compute deploy should not raise any exception" do
|
||||
Astute::Metadata.expects(:publish_facts).at_least_once
|
||||
Astute::PuppetdDeployer.expects(:deploy).times(7)
|
||||
@deploy_engine.deploy(@data_ha['args']['nodes'], @data_ha['args']['attributes'])
|
||||
end
|
||||
|
||||
it "ha_compute deploy should not raise any exception if there are only one controller" do
|
||||
Astute::Metadata.expects(:publish_facts).at_least_once
|
||||
Astute::PuppetdDeployer.expects(:deploy).times(4)
|
||||
ctrl = @data_ha['args']['nodes'].select {|n| n['role'] == 'controller'}[0]
|
||||
@deploy_engine.deploy([ctrl], @data_ha['args']['attributes'])
|
||||
end
|
||||
|
||||
it "singlenode_compute deploy should not raise any exception" do
|
||||
@data['args']['attributes']['deployment_mode'] = "singlenode_compute"
|
||||
@data['args']['nodes'] = [@data['args']['nodes'][0]] # We have only one node in singlenode
|
||||
Astute::Metadata.expects(:publish_facts).times(@data['args']['nodes'].size)
|
||||
Astute::PuppetdDeployer.expects(:deploy).once # one call for one node
|
||||
@deploy_engine.deploy(@data['args']['nodes'], @data['args']['attributes'])
|
||||
end
|
||||
end
|
||||
end
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
|
||||
describe "NailyFact DeploymentEngine" do
|
||||
context "When deploy is called, " do
|
||||
before(:each) do
|
||||
@ctx = mock
|
||||
@ctx.stubs(:task_id)
|
||||
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
|
||||
reporter = mock
|
||||
@ctx.stubs(:reporter).returns(reporter)
|
||||
reporter.stubs(:report)
|
||||
@deploy_engine = Astute::DeploymentEngine::NailyFact.new(@ctx)
|
||||
@data = {"args" =>
|
||||
{"attributes" =>
|
||||
{"storage_network_range" => "172.16.0.0/24", "auto_assign_floating_ip" => false,
|
||||
"mysql" => {"root_password" => "Z2EqsZo5"},
|
||||
"keystone" => {"admin_token" => "5qKy0i63", "db_password" => "HHQ86Rym", "admin_tenant" => "admin"},
|
||||
"nova" => {"user_password" => "h8RY8SE7", "db_password" => "Xl9I51Cb"},
|
||||
"glance" => {"user_password" => "nDlUxuJq", "db_password" => "V050pQAn"},
|
||||
"rabbit" => {"user" => "nova", "password" => "FLF3txKC"},
|
||||
"management_network_range" => "192.168.0.0/24",
|
||||
"public_network_range" => "240.0.1.0/24",
|
||||
"fixed_network_range" => "10.0.0.0/24",
|
||||
"floating_network_range" => "240.0.0.0/24"},
|
||||
"task_uuid" => "19d99029-350a-4c9c-819c-1f294cf9e741",
|
||||
"nodes" => [{"mac" => "52:54:00:0E:B8:F5", "status" => "provisioning",
|
||||
"uid" => "devnailgun.mirantis.com", "error_type" => nil,
|
||||
"fqdn" => "devnailgun.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.2/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.2/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 1,
|
||||
"ip" => "10.20.0.200",
|
||||
"role" => "controller"},
|
||||
{"mac" => "52:54:00:50:91:DD", "status" => "provisioning",
|
||||
"uid" => 2, "error_type" => nil,
|
||||
"fqdn" => "slave-2.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.3/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.3/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 2,
|
||||
"ip" => "10.20.0.221",
|
||||
"role" => "compute"},
|
||||
{"mac" => "52:54:00:C3:2C:28", "status" => "provisioning",
|
||||
"uid" => 3, "error_type" => nil,
|
||||
"fqdn" => "slave-3.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.4/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.4/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 3,
|
||||
"ip" => "10.20.0.68",
|
||||
"role" => "compute"}]},
|
||||
"method" => "deploy",
|
||||
"respond_to" => "deploy_resp"}
|
||||
ha_nodes = @data['args']['nodes'] +
|
||||
[{"mac" => "52:54:00:0E:88:88", "status" => "provisioned",
|
||||
"uid" => "4", "error_type" => nil,
|
||||
"fqdn" => "controller-4.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.5/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.5/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 4,
|
||||
"ip" => "10.20.0.205",
|
||||
"role" => "controller"},
|
||||
{"mac" => "52:54:00:0E:99:99", "status" => "provisioned",
|
||||
"uid" => "5", "error_type" => nil,
|
||||
"fqdn" => "controller-5.mirantis.com",
|
||||
"network_data" => [{"gateway" => "192.168.0.1",
|
||||
"name" => "management", "dev" => "eth0",
|
||||
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 102, "ip" => "192.168.0.6/24"},
|
||||
{"gateway" => "240.0.1.1",
|
||||
"name" => "public", "dev" => "eth0",
|
||||
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
|
||||
"vlan" => 101, "ip" => "240.0.1.6/24"},
|
||||
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
|
||||
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
|
||||
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
|
||||
"id" => 5,
|
||||
"ip" => "10.20.0.206",
|
||||
"role" => "controller"}]
|
||||
@data_ha = Marshal.load(Marshal.dump(@data))
|
||||
@data_ha['args']['nodes'] = ha_nodes
|
||||
@data_ha['args']['attributes']['deployment_mode'] = "ha_compute"
|
||||
# VIPs are required for HA mode and should be passed from Nailgun (only in HA)
|
||||
@data_ha['args']['attributes']['management_vip'] = "192.168.0.111"
|
||||
@data_ha['args']['attributes']['public_vip'] = "240.0.1.111"
|
||||
end
|
||||
|
||||
it "it should call valid method depends on attrs" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'deployment_mode' => 'ha_compute'}
|
||||
attrs_modified = attrs.merge({'some' => 'somea'})
|
||||
|
||||
@deploy_engine.expects(:attrs_ha_compute).with(nodes, attrs).returns(attrs_modified)
|
||||
@deploy_engine.expects(:deploy_ha_compute).with(nodes, attrs_modified)
|
||||
# All implementations of deploy_piece go to subclasses
|
||||
@deploy_engine.respond_to?(:deploy_piece).should be_true
|
||||
@deploy_engine.deploy(nodes, attrs)
|
||||
end
|
||||
|
||||
it "it should raise an exception if deployment mode is unsupported" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'deployment_mode' => 'unknown'}
|
||||
expect {@deploy_engine.deploy(nodes, attrs)}.to raise_exception(/Method attrs_unknown is not implemented/)
|
||||
end
|
||||
|
||||
it "multinode_compute deploy should not raise any exception" do
|
||||
@data['args']['attributes']['deployment_mode'] = "multinode_compute"
|
||||
Astute::Metadata.expects(:publish_facts).times(@data['args']['nodes'].size)
|
||||
# we got two calls, one for controller, and another for all computes
|
||||
Astute::PuppetdDeployer.expects(:deploy).twice
|
||||
@deploy_engine.deploy(@data['args']['nodes'], @data['args']['attributes'])
|
||||
end
|
||||
|
||||
it "ha_compute deploy should not raise any exception" do
|
||||
Astute::Metadata.expects(:publish_facts).at_least_once
|
||||
Astute::PuppetdDeployer.expects(:deploy).times(7)
|
||||
@deploy_engine.deploy(@data_ha['args']['nodes'], @data_ha['args']['attributes'])
|
||||
end
|
||||
|
||||
it "ha_compute deploy should not raise any exception if there are only one controller" do
|
||||
Astute::Metadata.expects(:publish_facts).at_least_once
|
||||
Astute::PuppetdDeployer.expects(:deploy).times(4)
|
||||
ctrl = @data_ha['args']['nodes'].select {|n| n['role'] == 'controller'}[0]
|
||||
@deploy_engine.deploy([ctrl], @data_ha['args']['attributes'])
|
||||
end
|
||||
|
||||
it "singlenode_compute deploy should not raise any exception" do
|
||||
@data['args']['attributes']['deployment_mode'] = "singlenode_compute"
|
||||
@data['args']['nodes'] = [@data['args']['nodes'][0]] # We have only one node in singlenode
|
||||
Astute::Metadata.expects(:publish_facts).times(@data['args']['nodes'].size)
|
||||
Astute::PuppetdDeployer.expects(:deploy).once # one call for one node
|
||||
@deploy_engine.deploy(@data['args']['nodes'], @data['args']['attributes'])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -1,119 +1,119 @@
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
|
||||
describe Astute::Orchestrator do
|
||||
include SpecHelpers
|
||||
before(:each) do
|
||||
@orchestrator = Astute::Orchestrator.new
|
||||
@reporter = mock('reporter')
|
||||
@reporter.stub_everything
|
||||
end
|
||||
|
||||
it "must be able to return node type" do
|
||||
nodes = [{'uid' => '1'}]
|
||||
res = {:data => {:node_type => 'target'},
|
||||
:sender=>"1"}
|
||||
|
||||
mc_res = mock_mc_result(res)
|
||||
mc_timeout = 5
|
||||
|
||||
rpcclient = mock_rpcclient(nodes, mc_timeout)
|
||||
rpcclient.expects(:get_type).once.returns([mc_res])
|
||||
|
||||
types = @orchestrator.node_type(@reporter, 'task_uuid', nodes, mc_timeout)
|
||||
types.should eql([{"node_type"=>"target", "uid"=>"1"}])
|
||||
end
|
||||
|
||||
it "must be able to complete verify_networks" do
|
||||
nodes = [{'uid' => '1'}, {'uid' => '2'}]
|
||||
networks = [{'id' => 1, 'vlan_id' => 100, 'cidr' => '10.0.0.0/24'},
|
||||
{'id' => 2, 'vlan_id' => 101, 'cidr' => '192.168.0.0/24'}]
|
||||
res1 = {:data => {:uid=>"1",
|
||||
:neighbours => {"eth0" => {"100" => {"1" => ["eth0"], "2" => ["eth0"]},
|
||||
"101" => {"1" => ["eth0"]}
|
||||
}
|
||||
}
|
||||
},
|
||||
:sender=>"1"
|
||||
}
|
||||
res2 = {:data => {:uid=>"2",
|
||||
:neighbours => {"eth0" => {"100" => {"1" => ["eth0"], "2" => ["eth0"]},
|
||||
"101" => {"1" => ["eth0"], "2" => ["eth0"]}
|
||||
}
|
||||
}
|
||||
},
|
||||
:sender=>"2"
|
||||
}
|
||||
valid_res = {:statuscode => 0, :sender => '1'}
|
||||
mc_res1 = mock_mc_result(res1)
|
||||
mc_res2 = mock_mc_result(res2)
|
||||
mc_valid_res = mock_mc_result
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
|
||||
rpcclient.expects(:start_frame_listeners).once.returns([mc_valid_res]*2)
|
||||
rpcclient.expects(:send_probing_frames).once.returns([mc_valid_res]*2)
|
||||
rpcclient.expects(:get_probing_info).once.returns([mc_res1, mc_res2])
|
||||
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
|
||||
res = @orchestrator.verify_networks(@reporter, 'task_uuid', nodes, networks)
|
||||
expected = {"nodes" => [{"networks" => [{"iface"=>"eth0", "vlans"=>[100]}], "uid"=>"1"},
|
||||
{"networks"=>[{"iface"=>"eth0", "vlans"=>[100, 101]}], "uid"=>"2"}]}
|
||||
res.should eql(expected)
|
||||
end
|
||||
|
||||
it "verify_network returns error if nodes list is empty" do
|
||||
res = @orchestrator.verify_networks(@reporter, 'task_uuid', [], [])
|
||||
res.should eql({'status' => 'error', 'error' => "Nodes list is empty. Nothing to check."})
|
||||
end
|
||||
|
||||
it "verify_network returns all vlans passed if only one node provided" do
|
||||
nodes = [{'uid' => '1'}]
|
||||
networks = [{'id' => 1, 'vlan_id' => 100, 'cidr' => '10.0.0.0/24'},
|
||||
{'id' => 2, 'vlan_id' => 101, 'cidr' => '192.168.0.0/24'}]
|
||||
res = @orchestrator.verify_networks(@reporter, 'task_uuid', nodes, networks)
|
||||
expected = {"nodes" => [{"networks" => [{"iface"=>"eth0", "vlans"=>[100,101]}], "uid"=>"1"}]}
|
||||
res.should eql(expected)
|
||||
end
|
||||
|
||||
it "in remove_nodes, it returns empty list if nodes are not provided" do
|
||||
res = @orchestrator.remove_nodes(@reporter, 'task_uuid', [])
|
||||
res.should eql({'nodes' => []})
|
||||
end
|
||||
|
||||
it "remove_nodes cleans nodes and reboots them" do
|
||||
removed_hash = {:sender => '1',
|
||||
:data => {:rebooted => true}}
|
||||
error_hash = {:sender => '2',
|
||||
:data => {:rebooted => false, :error_msg => 'Could not reboot'}}
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}]
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
mc_removed_res = mock_mc_result(removed_hash)
|
||||
mc_error_res = mock_mc_result(error_hash)
|
||||
|
||||
rpcclient.expects(:erase_node).once.with(:reboot => true).returns([mc_removed_res, mc_error_res])
|
||||
|
||||
res = @orchestrator.remove_nodes(@reporter, 'task_uuid', nodes)
|
||||
res.should eql({'nodes' => [{'uid' => '1'}], 'status' => 'error',
|
||||
'error_nodes' => [{"uid"=>"2", "error"=>"RPC method 'erase_node' failed "\
|
||||
"with message: Could not reboot"}]})
|
||||
end
|
||||
|
||||
it "it calls deploy method with valid arguments" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'a' => 'b'}
|
||||
Astute::DeploymentEngine::NailyFact.any_instance.expects(:deploy).
|
||||
with([{'uid' => '1'}], attrs)
|
||||
@orchestrator.deploy(@reporter, 'task_uuid', nodes, attrs)
|
||||
end
|
||||
|
||||
it "deploy method raises error if nodes list is empty" do
|
||||
expect {@orchestrator.deploy(@reporter, 'task_uuid', [], {})}.
|
||||
to raise_error(/Nodes to deploy are not provided!/)
|
||||
end
|
||||
|
||||
it "remove_nodes try to call MCAgent multiple times"
|
||||
it "remove_nodes do not fail if any of nodes failed"
|
||||
end
|
||||
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
|
||||
describe Astute::Orchestrator do
|
||||
include SpecHelpers
|
||||
before(:each) do
|
||||
@orchestrator = Astute::Orchestrator.new
|
||||
@reporter = mock('reporter')
|
||||
@reporter.stub_everything
|
||||
end
|
||||
|
||||
it "must be able to return node type" do
|
||||
nodes = [{'uid' => '1'}]
|
||||
res = {:data => {:node_type => 'target'},
|
||||
:sender=>"1"}
|
||||
|
||||
mc_res = mock_mc_result(res)
|
||||
mc_timeout = 5
|
||||
|
||||
rpcclient = mock_rpcclient(nodes, mc_timeout)
|
||||
rpcclient.expects(:get_type).once.returns([mc_res])
|
||||
|
||||
types = @orchestrator.node_type(@reporter, 'task_uuid', nodes, mc_timeout)
|
||||
types.should eql([{"node_type"=>"target", "uid"=>"1"}])
|
||||
end
|
||||
|
||||
it "must be able to complete verify_networks" do
|
||||
nodes = [{'uid' => '1'}, {'uid' => '2'}]
|
||||
networks = [{'id' => 1, 'vlan_id' => 100, 'cidr' => '10.0.0.0/24'},
|
||||
{'id' => 2, 'vlan_id' => 101, 'cidr' => '192.168.0.0/24'}]
|
||||
res1 = {:data => {:uid=>"1",
|
||||
:neighbours => {"eth0" => {"100" => {"1" => ["eth0"], "2" => ["eth0"]},
|
||||
"101" => {"1" => ["eth0"]}
|
||||
}
|
||||
}
|
||||
},
|
||||
:sender=>"1"
|
||||
}
|
||||
res2 = {:data => {:uid=>"2",
|
||||
:neighbours => {"eth0" => {"100" => {"1" => ["eth0"], "2" => ["eth0"]},
|
||||
"101" => {"1" => ["eth0"], "2" => ["eth0"]}
|
||||
}
|
||||
}
|
||||
},
|
||||
:sender=>"2"
|
||||
}
|
||||
valid_res = {:statuscode => 0, :sender => '1'}
|
||||
mc_res1 = mock_mc_result(res1)
|
||||
mc_res2 = mock_mc_result(res2)
|
||||
mc_valid_res = mock_mc_result
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
|
||||
rpcclient.expects(:start_frame_listeners).once.returns([mc_valid_res]*2)
|
||||
rpcclient.expects(:send_probing_frames).once.returns([mc_valid_res]*2)
|
||||
rpcclient.expects(:get_probing_info).once.returns([mc_res1, mc_res2])
|
||||
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
|
||||
res = @orchestrator.verify_networks(@reporter, 'task_uuid', nodes, networks)
|
||||
expected = {"nodes" => [{"networks" => [{"iface"=>"eth0", "vlans"=>[100]}], "uid"=>"1"},
|
||||
{"networks"=>[{"iface"=>"eth0", "vlans"=>[100, 101]}], "uid"=>"2"}]}
|
||||
res.should eql(expected)
|
||||
end
|
||||
|
||||
it "verify_network returns error if nodes list is empty" do
|
||||
res = @orchestrator.verify_networks(@reporter, 'task_uuid', [], [])
|
||||
res.should eql({'status' => 'error', 'error' => "Nodes list is empty. Nothing to check."})
|
||||
end
|
||||
|
||||
it "verify_network returns all vlans passed if only one node provided" do
|
||||
nodes = [{'uid' => '1'}]
|
||||
networks = [{'id' => 1, 'vlan_id' => 100, 'cidr' => '10.0.0.0/24'},
|
||||
{'id' => 2, 'vlan_id' => 101, 'cidr' => '192.168.0.0/24'}]
|
||||
res = @orchestrator.verify_networks(@reporter, 'task_uuid', nodes, networks)
|
||||
expected = {"nodes" => [{"networks" => [{"iface"=>"eth0", "vlans"=>[100,101]}], "uid"=>"1"}]}
|
||||
res.should eql(expected)
|
||||
end
|
||||
|
||||
it "in remove_nodes, it returns empty list if nodes are not provided" do
|
||||
res = @orchestrator.remove_nodes(@reporter, 'task_uuid', [])
|
||||
res.should eql({'nodes' => []})
|
||||
end
|
||||
|
||||
it "remove_nodes cleans nodes and reboots them" do
|
||||
removed_hash = {:sender => '1',
|
||||
:data => {:rebooted => true}}
|
||||
error_hash = {:sender => '2',
|
||||
:data => {:rebooted => false, :error_msg => 'Could not reboot'}}
|
||||
nodes = [{'uid' => 1}, {'uid' => 2}]
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
mc_removed_res = mock_mc_result(removed_hash)
|
||||
mc_error_res = mock_mc_result(error_hash)
|
||||
|
||||
rpcclient.expects(:erase_node).once.with(:reboot => true).returns([mc_removed_res, mc_error_res])
|
||||
|
||||
res = @orchestrator.remove_nodes(@reporter, 'task_uuid', nodes)
|
||||
res.should eql({'nodes' => [{'uid' => '1'}], 'status' => 'error',
|
||||
'error_nodes' => [{"uid"=>"2", "error"=>"RPC method 'erase_node' failed "\
|
||||
"with message: Could not reboot"}]})
|
||||
end
|
||||
|
||||
it "it calls deploy method with valid arguments" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'a' => 'b'}
|
||||
Astute::DeploymentEngine::NailyFact.any_instance.expects(:deploy).
|
||||
with([{'uid' => '1'}], attrs)
|
||||
@orchestrator.deploy(@reporter, 'task_uuid', nodes, attrs)
|
||||
end
|
||||
|
||||
it "deploy method raises error if nodes list is empty" do
|
||||
expect {@orchestrator.deploy(@reporter, 'task_uuid', [], {})}.
|
||||
to raise_error(/Nodes to deploy are not provided!/)
|
||||
end
|
||||
|
||||
it "remove_nodes try to call MCAgent multiple times"
|
||||
it "remove_nodes do not fail if any of nodes failed"
|
||||
end
|
||||
|
||||
|
@ -1,126 +1,126 @@
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
include Astute
|
||||
|
||||
describe "Puppetd" do
|
||||
include SpecHelpers
|
||||
context "PuppetdDeployer" do
|
||||
before :each do
|
||||
@ctx = mock
|
||||
@ctx.stubs(:task_id)
|
||||
@reporter = mock('reporter')
|
||||
@ctx.stubs(:reporter).returns(ProxyReporter.new(@reporter))
|
||||
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
|
||||
end
|
||||
|
||||
it "reports ready status for node if puppet deploy finished successfully" do
|
||||
@reporter.expects(:report).with('nodes' => [{'uid' => '1', 'status' => 'ready'}])
|
||||
last_run_result = {:data=>
|
||||
{:time=>{"last_run"=>1358425701},
|
||||
:status => "running", :resources => {'failed' => 0},
|
||||
:running => 1, :idling => 0},
|
||||
:sender=>"1"}
|
||||
last_run_result_new = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_result_new[:data][:time]['last_run'] = 1358426000
|
||||
|
||||
last_run_result_finished = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_result_finished[:data][:status] = 'stopped'
|
||||
last_run_result_finished[:data][:time]['last_run'] = 1358427000
|
||||
|
||||
nodes = [{'uid' => '1'}]
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
|
||||
rpcclient_valid_result = mock_mc_result(last_run_result)
|
||||
rpcclient_new_res = mock_mc_result(last_run_result_new)
|
||||
rpcclient_finished_res = mock_mc_result(last_run_result_finished)
|
||||
|
||||
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_new_res]).then.
|
||||
returns([rpcclient_finished_res])
|
||||
|
||||
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
|
||||
|
||||
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=0)
|
||||
end
|
||||
|
||||
it "publishes error status for node if puppet failed" do
|
||||
@reporter.expects(:report).with('nodes' => [{'status' => 'error', 'error_type' => 'deploy', 'uid' => '1'}])
|
||||
|
||||
last_run_result = {:statuscode=>0, :data=>
|
||||
{:changes=>{"total"=>1}, :time=>{"last_run"=>1358425701},
|
||||
:resources=>{"failed"=>0}, :status => "running",
|
||||
:running => 1, :idling => 0, :runtime => 100},
|
||||
:sender=>"1"}
|
||||
last_run_result_new = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_result_new[:data][:time]['last_run'] = 1358426000
|
||||
last_run_result_new[:data][:resources]['failed'] = 1
|
||||
|
||||
nodes = [{'uid' => '1'}]
|
||||
|
||||
last_run_result_finished = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_result_finished[:data][:status] = 'stopped'
|
||||
last_run_result_finished[:data][:time]['last_run'] = 1358427000
|
||||
last_run_result_finished[:data][:resources]['failed'] = 1
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
|
||||
rpcclient_valid_result = mock_mc_result(last_run_result)
|
||||
rpcclient_new_res = mock_mc_result(last_run_result_new)
|
||||
rpcclient_finished_res = mock_mc_result(last_run_result_finished)
|
||||
|
||||
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_new_res]).then.
|
||||
returns([rpcclient_finished_res])
|
||||
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
|
||||
|
||||
MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=0)
|
||||
end
|
||||
|
||||
it "retries to run puppet if it fails" do
|
||||
@reporter.expects(:report).with('nodes' => [{'uid' => '1', 'status' => 'ready'}])
|
||||
|
||||
last_run_result = {:statuscode=>0, :data=>
|
||||
{:changes=>{"total"=>1}, :time=>{"last_run"=>1358425701},
|
||||
:resources=>{"failed"=>0}, :status => "running",
|
||||
:running => 1, :idling => 0, :runtime => 100},
|
||||
:sender=>"1"}
|
||||
last_run_failed = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_failed[:data][:time]['last_run'] = 1358426000
|
||||
last_run_failed[:data][:resources]['failed'] = 1
|
||||
last_run_failed[:data][:status] = 'stopped'
|
||||
|
||||
last_run_fixing = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_fixing[:data][:time]['last_run'] = 1358426000
|
||||
last_run_fixing[:data][:resources]['failed'] = 1
|
||||
last_run_fixing[:data][:status] = 'running'
|
||||
|
||||
last_run_success = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_success[:data][:time]['last_run'] = 1358428000
|
||||
last_run_success[:data][:status] = 'stopped'
|
||||
|
||||
nodes = [{'uid' => '1'}]
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
|
||||
rpcclient_valid_result = mock_mc_result(last_run_result)
|
||||
rpcclient_failed = mock_mc_result(last_run_failed)
|
||||
rpcclient_fixing = mock_mc_result(last_run_fixing)
|
||||
rpcclient_succeed = mock_mc_result(last_run_success)
|
||||
|
||||
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_failed]).then.
|
||||
returns([rpcclient_failed]).then.
|
||||
returns([rpcclient_fixing]).then.
|
||||
returns([rpcclient_succeed])
|
||||
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
|
||||
|
||||
MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=1)
|
||||
end
|
||||
end
|
||||
end
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
include Astute
|
||||
|
||||
describe "Puppetd" do
|
||||
include SpecHelpers
|
||||
context "PuppetdDeployer" do
|
||||
before :each do
|
||||
@ctx = mock
|
||||
@ctx.stubs(:task_id)
|
||||
@reporter = mock('reporter')
|
||||
@ctx.stubs(:reporter).returns(ProxyReporter.new(@reporter))
|
||||
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
|
||||
end
|
||||
|
||||
it "reports ready status for node if puppet deploy finished successfully" do
|
||||
@reporter.expects(:report).with('nodes' => [{'uid' => '1', 'status' => 'ready'}])
|
||||
last_run_result = {:data=>
|
||||
{:time=>{"last_run"=>1358425701},
|
||||
:status => "running", :resources => {'failed' => 0},
|
||||
:running => 1, :idling => 0},
|
||||
:sender=>"1"}
|
||||
last_run_result_new = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_result_new[:data][:time]['last_run'] = 1358426000
|
||||
|
||||
last_run_result_finished = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_result_finished[:data][:status] = 'stopped'
|
||||
last_run_result_finished[:data][:time]['last_run'] = 1358427000
|
||||
|
||||
nodes = [{'uid' => '1'}]
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
|
||||
rpcclient_valid_result = mock_mc_result(last_run_result)
|
||||
rpcclient_new_res = mock_mc_result(last_run_result_new)
|
||||
rpcclient_finished_res = mock_mc_result(last_run_result_finished)
|
||||
|
||||
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_new_res]).then.
|
||||
returns([rpcclient_finished_res])
|
||||
|
||||
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
|
||||
|
||||
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=0)
|
||||
end
|
||||
|
||||
it "publishes error status for node if puppet failed" do
|
||||
@reporter.expects(:report).with('nodes' => [{'status' => 'error', 'error_type' => 'deploy', 'uid' => '1'}])
|
||||
|
||||
last_run_result = {:statuscode=>0, :data=>
|
||||
{:changes=>{"total"=>1}, :time=>{"last_run"=>1358425701},
|
||||
:resources=>{"failed"=>0}, :status => "running",
|
||||
:running => 1, :idling => 0, :runtime => 100},
|
||||
:sender=>"1"}
|
||||
last_run_result_new = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_result_new[:data][:time]['last_run'] = 1358426000
|
||||
last_run_result_new[:data][:resources]['failed'] = 1
|
||||
|
||||
nodes = [{'uid' => '1'}]
|
||||
|
||||
last_run_result_finished = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_result_finished[:data][:status] = 'stopped'
|
||||
last_run_result_finished[:data][:time]['last_run'] = 1358427000
|
||||
last_run_result_finished[:data][:resources]['failed'] = 1
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
|
||||
rpcclient_valid_result = mock_mc_result(last_run_result)
|
||||
rpcclient_new_res = mock_mc_result(last_run_result_new)
|
||||
rpcclient_finished_res = mock_mc_result(last_run_result_finished)
|
||||
|
||||
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_new_res]).then.
|
||||
returns([rpcclient_finished_res])
|
||||
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
|
||||
|
||||
MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=0)
|
||||
end
|
||||
|
||||
it "retries to run puppet if it fails" do
|
||||
@reporter.expects(:report).with('nodes' => [{'uid' => '1', 'status' => 'ready'}])
|
||||
|
||||
last_run_result = {:statuscode=>0, :data=>
|
||||
{:changes=>{"total"=>1}, :time=>{"last_run"=>1358425701},
|
||||
:resources=>{"failed"=>0}, :status => "running",
|
||||
:running => 1, :idling => 0, :runtime => 100},
|
||||
:sender=>"1"}
|
||||
last_run_failed = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_failed[:data][:time]['last_run'] = 1358426000
|
||||
last_run_failed[:data][:resources]['failed'] = 1
|
||||
last_run_failed[:data][:status] = 'stopped'
|
||||
|
||||
last_run_fixing = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_fixing[:data][:time]['last_run'] = 1358426000
|
||||
last_run_fixing[:data][:resources]['failed'] = 1
|
||||
last_run_fixing[:data][:status] = 'running'
|
||||
|
||||
last_run_success = Marshal.load(Marshal.dump(last_run_result))
|
||||
last_run_success[:data][:time]['last_run'] = 1358428000
|
||||
last_run_success[:data][:status] = 'stopped'
|
||||
|
||||
nodes = [{'uid' => '1'}]
|
||||
|
||||
rpcclient = mock_rpcclient(nodes)
|
||||
|
||||
rpcclient_valid_result = mock_mc_result(last_run_result)
|
||||
rpcclient_failed = mock_mc_result(last_run_failed)
|
||||
rpcclient_fixing = mock_mc_result(last_run_fixing)
|
||||
rpcclient_succeed = mock_mc_result(last_run_success)
|
||||
|
||||
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_valid_result]).then.
|
||||
returns([rpcclient_failed]).then.
|
||||
returns([rpcclient_failed]).then.
|
||||
returns([rpcclient_fixing]).then.
|
||||
returns([rpcclient_succeed])
|
||||
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
|
||||
|
||||
MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
|
||||
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=1)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -1,129 +1,129 @@
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
include Astute
|
||||
|
||||
describe "ProxyReporter" do
|
||||
context "Instance of ProxyReporter class" do
|
||||
before :each do
|
||||
@msg = {'nodes' => [{'status' => 'ready', 'uid' => '1'}]}
|
||||
@msg_pr = {'nodes' => [@msg['nodes'][0],
|
||||
{'status' => 'deploying', 'uid' => '2',
|
||||
'progress' => 54}]}
|
||||
@up_reporter = mock('up_reporter')
|
||||
@reporter = ProxyReporter.new(@up_reporter)
|
||||
end
|
||||
|
||||
it "reports first-come data" do
|
||||
@up_reporter.expects(:report).with(@msg)
|
||||
@reporter.report(@msg)
|
||||
end
|
||||
|
||||
it "does not report the same message" do
|
||||
@up_reporter.expects(:report).with(@msg).once
|
||||
5.times { @reporter.report(@msg) }
|
||||
end
|
||||
|
||||
it "reports only updated node" do
|
||||
updated_node = @msg_pr['nodes'][1]
|
||||
expected_msg = {'nodes' => [updated_node]}
|
||||
@up_reporter.expects(:report).with(@msg)
|
||||
@up_reporter.expects(:report).with(expected_msg)
|
||||
@reporter.report(@msg)
|
||||
@reporter.report(@msg_pr)
|
||||
end
|
||||
|
||||
it "reports only if progress value is greater" do
|
||||
msg1 = {'nodes' => [{'status' => 'deploying', 'uid' => '1', 'progress' => 54},
|
||||
{'status' => 'deploying', 'uid' => '2', 'progress' => 54}]}
|
||||
msg2 = Marshal.load(Marshal.dump(msg1))
|
||||
msg2['nodes'][1]['progress'] = 100
|
||||
msg2['nodes'][1]['status'] = 'ready'
|
||||
updated_node = msg2['nodes'][1]
|
||||
expected_msg = {'nodes' => [updated_node]}
|
||||
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).with(expected_msg)
|
||||
@reporter.report(msg1)
|
||||
@reporter.report(msg2)
|
||||
end
|
||||
|
||||
it "raises exception if wrong key passed" do
|
||||
@msg['nodes'][0]['ups'] = 'some_value'
|
||||
lambda {@reporter.report(@msg)}.should raise_error
|
||||
end
|
||||
|
||||
it "adjusts progress to 100 if passed greater" do
|
||||
expected_msg = Marshal.load(Marshal.dump(@msg_pr))
|
||||
expected_msg['nodes'][1]['progress'] = 100
|
||||
@msg_pr['nodes'][1]['progress'] = 120
|
||||
@up_reporter.expects(:report).with(expected_msg)
|
||||
@reporter.report(@msg_pr)
|
||||
end
|
||||
|
||||
it "adjusts progress to 100 if status ready" do
|
||||
expected_msg = Marshal.load(Marshal.dump(@msg_pr))
|
||||
expected_msg['nodes'][1]['progress'] = 100
|
||||
expected_msg['nodes'][1]['status'] = 'ready'
|
||||
@msg_pr['nodes'][1]['status'] = 'ready'
|
||||
@up_reporter.expects(:report).with(expected_msg)
|
||||
@reporter.report(@msg_pr)
|
||||
end
|
||||
|
||||
it "does not report if node was in ready, and trying to set is deploying" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'ready'}]}
|
||||
msg2 = {'nodes' => [{'uid' => 2, 'status' => 'ready'}]}
|
||||
msg3 = {'nodes' => [{'uid' => 1, 'status' => 'deploying', 'progress' => 100}]}
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).with(msg2)
|
||||
@up_reporter.expects(:report).never
|
||||
@reporter.report(msg1)
|
||||
@reporter.report(msg2)
|
||||
5.times { @reporter.report(msg3) }
|
||||
end
|
||||
|
||||
it "reports even not all keys provided" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'deploying'}]}
|
||||
msg2 = {'nodes' => [{'uid' => 2, 'status' => 'ready'}]}
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).with(msg2)
|
||||
@reporter.report(msg1)
|
||||
@reporter.report(msg2)
|
||||
end
|
||||
|
||||
it "raises exception if progress provided and no status" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'ready'}]}
|
||||
msg2 = {'nodes' => [{'uid' => 1, 'progress' => 100}]}
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).never
|
||||
@reporter.report(msg1)
|
||||
lambda {@reporter.report(msg2)}.should raise_error
|
||||
end
|
||||
|
||||
it "raises exception if status of node is not supported" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'hah'}]}
|
||||
@up_reporter.expects(:report).never
|
||||
lambda {@reporter.report(msg1)}.should raise_error
|
||||
end
|
||||
|
||||
it "some other attrs are valid and passed" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'deploying'}]}
|
||||
msg2 = {'status' => 'error', 'error_type' => 'deploy',
|
||||
'nodes' => [{'uid' => 2, 'status' => 'error', 'message' => 'deploy'}]}
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).with(msg2)
|
||||
@reporter.report(msg1)
|
||||
@reporter.report(msg2)
|
||||
end
|
||||
|
||||
it "reports if status is greater" do
|
||||
msgs = [{'nodes' => [{'uid' => 1, 'status' => 'provisioned'}]},
|
||||
{'nodes' => [{'uid' => 1, 'status' => 'provisioning'}]},
|
||||
{'nodes' => [{'uid' => 1, 'status' => 'ready'}]},
|
||||
{'nodes' => [{'uid' => 1, 'status' => 'error'}]}]
|
||||
@up_reporter.expects(:report).with(msgs[0])
|
||||
@up_reporter.expects(:report).with(msgs[2])
|
||||
@up_reporter.expects(:report).with(msgs[3])
|
||||
msgs.each {|msg| @reporter.report(msg)}
|
||||
end
|
||||
end
|
||||
end
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
include Astute
|
||||
|
||||
describe "ProxyReporter" do
|
||||
context "Instance of ProxyReporter class" do
|
||||
before :each do
|
||||
@msg = {'nodes' => [{'status' => 'ready', 'uid' => '1'}]}
|
||||
@msg_pr = {'nodes' => [@msg['nodes'][0],
|
||||
{'status' => 'deploying', 'uid' => '2',
|
||||
'progress' => 54}]}
|
||||
@up_reporter = mock('up_reporter')
|
||||
@reporter = ProxyReporter.new(@up_reporter)
|
||||
end
|
||||
|
||||
it "reports first-come data" do
|
||||
@up_reporter.expects(:report).with(@msg)
|
||||
@reporter.report(@msg)
|
||||
end
|
||||
|
||||
it "does not report the same message" do
|
||||
@up_reporter.expects(:report).with(@msg).once
|
||||
5.times { @reporter.report(@msg) }
|
||||
end
|
||||
|
||||
it "reports only updated node" do
|
||||
updated_node = @msg_pr['nodes'][1]
|
||||
expected_msg = {'nodes' => [updated_node]}
|
||||
@up_reporter.expects(:report).with(@msg)
|
||||
@up_reporter.expects(:report).with(expected_msg)
|
||||
@reporter.report(@msg)
|
||||
@reporter.report(@msg_pr)
|
||||
end
|
||||
|
||||
it "reports only if progress value is greater" do
|
||||
msg1 = {'nodes' => [{'status' => 'deploying', 'uid' => '1', 'progress' => 54},
|
||||
{'status' => 'deploying', 'uid' => '2', 'progress' => 54}]}
|
||||
msg2 = Marshal.load(Marshal.dump(msg1))
|
||||
msg2['nodes'][1]['progress'] = 100
|
||||
msg2['nodes'][1]['status'] = 'ready'
|
||||
updated_node = msg2['nodes'][1]
|
||||
expected_msg = {'nodes' => [updated_node]}
|
||||
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).with(expected_msg)
|
||||
@reporter.report(msg1)
|
||||
@reporter.report(msg2)
|
||||
end
|
||||
|
||||
it "raises exception if wrong key passed" do
|
||||
@msg['nodes'][0]['ups'] = 'some_value'
|
||||
lambda {@reporter.report(@msg)}.should raise_error
|
||||
end
|
||||
|
||||
it "adjusts progress to 100 if passed greater" do
|
||||
expected_msg = Marshal.load(Marshal.dump(@msg_pr))
|
||||
expected_msg['nodes'][1]['progress'] = 100
|
||||
@msg_pr['nodes'][1]['progress'] = 120
|
||||
@up_reporter.expects(:report).with(expected_msg)
|
||||
@reporter.report(@msg_pr)
|
||||
end
|
||||
|
||||
it "adjusts progress to 100 if status ready" do
|
||||
expected_msg = Marshal.load(Marshal.dump(@msg_pr))
|
||||
expected_msg['nodes'][1]['progress'] = 100
|
||||
expected_msg['nodes'][1]['status'] = 'ready'
|
||||
@msg_pr['nodes'][1]['status'] = 'ready'
|
||||
@up_reporter.expects(:report).with(expected_msg)
|
||||
@reporter.report(@msg_pr)
|
||||
end
|
||||
|
||||
it "does not report if node was in ready, and trying to set is deploying" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'ready'}]}
|
||||
msg2 = {'nodes' => [{'uid' => 2, 'status' => 'ready'}]}
|
||||
msg3 = {'nodes' => [{'uid' => 1, 'status' => 'deploying', 'progress' => 100}]}
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).with(msg2)
|
||||
@up_reporter.expects(:report).never
|
||||
@reporter.report(msg1)
|
||||
@reporter.report(msg2)
|
||||
5.times { @reporter.report(msg3) }
|
||||
end
|
||||
|
||||
it "reports even not all keys provided" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'deploying'}]}
|
||||
msg2 = {'nodes' => [{'uid' => 2, 'status' => 'ready'}]}
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).with(msg2)
|
||||
@reporter.report(msg1)
|
||||
@reporter.report(msg2)
|
||||
end
|
||||
|
||||
it "raises exception if progress provided and no status" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'ready'}]}
|
||||
msg2 = {'nodes' => [{'uid' => 1, 'progress' => 100}]}
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).never
|
||||
@reporter.report(msg1)
|
||||
lambda {@reporter.report(msg2)}.should raise_error
|
||||
end
|
||||
|
||||
it "raises exception if status of node is not supported" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'hah'}]}
|
||||
@up_reporter.expects(:report).never
|
||||
lambda {@reporter.report(msg1)}.should raise_error
|
||||
end
|
||||
|
||||
it "some other attrs are valid and passed" do
|
||||
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'deploying'}]}
|
||||
msg2 = {'status' => 'error', 'error_type' => 'deploy',
|
||||
'nodes' => [{'uid' => 2, 'status' => 'error', 'message' => 'deploy'}]}
|
||||
@up_reporter.expects(:report).with(msg1)
|
||||
@up_reporter.expects(:report).with(msg2)
|
||||
@reporter.report(msg1)
|
||||
@reporter.report(msg2)
|
||||
end
|
||||
|
||||
it "reports if status is greater" do
|
||||
msgs = [{'nodes' => [{'uid' => 1, 'status' => 'provisioned'}]},
|
||||
{'nodes' => [{'uid' => 1, 'status' => 'provisioning'}]},
|
||||
{'nodes' => [{'uid' => 1, 'status' => 'ready'}]},
|
||||
{'nodes' => [{'uid' => 1, 'status' => 'error'}]}]
|
||||
@up_reporter.expects(:report).with(msgs[0])
|
||||
@up_reporter.expects(:report).with(msgs[2])
|
||||
@up_reporter.expects(:report).with(msgs[3])
|
||||
msgs.each {|msg| @reporter.report(msg)}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -1,57 +1,57 @@
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
|
||||
describe "SimplePuppet DeploymentEngine" do
|
||||
context "When deploy is called, " do
|
||||
before(:each) do
|
||||
@ctx = mock
|
||||
@ctx.stubs(:task_id)
|
||||
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
|
||||
@reporter = mock('reporter')
|
||||
@reporter.stub_everything
|
||||
@ctx.stubs(:reporter).returns(Astute::ProxyReporter.new(@reporter))
|
||||
@deploy_engine = Astute::DeploymentEngine::SimplePuppet.new(@ctx)
|
||||
@env = YAML.load_file(File.join(File.dirname(__FILE__), "..", "..", "examples", "no_attrs.yaml"))
|
||||
end
|
||||
|
||||
it "it should call valid method depends on attrs" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'deployment_mode' => 'ha_compute'}
|
||||
@deploy_engine.expects(:attrs_ha_compute).never # It is not supported in SimplePuppet
|
||||
@deploy_engine.expects(:deploy_ha_compute).with(nodes, attrs)
|
||||
# All implementations of deploy_piece go to subclasses
|
||||
@deploy_engine.respond_to?(:deploy_piece).should be_true
|
||||
@deploy_engine.deploy(nodes, attrs)
|
||||
end
|
||||
|
||||
it "it should raise an exception if deployment mode is unsupported" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'deployment_mode' => 'unknown'}
|
||||
expect {@deploy_engine.deploy(nodes, attrs)}.to raise_exception(
|
||||
/Method deploy_unknown is not implemented/)
|
||||
end
|
||||
|
||||
it "multinode_compute deploy should not raise any exception" do
|
||||
@env['attributes']['deployment_mode'] = "multinode_compute"
|
||||
Astute::Metadata.expects(:publish_facts).never # It is not supported in SimplePuppet
|
||||
# we got two calls, one for controller, and another for all computes
|
||||
Astute::PuppetdDeployer.expects(:deploy).twice
|
||||
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
|
||||
end
|
||||
|
||||
it "ha_compute deploy should not raise any exception" do
|
||||
@env['attributes']['deployment_mode'] = "ha_compute"
|
||||
Astute::Metadata.expects(:publish_facts).never
|
||||
Astute::PuppetdDeployer.expects(:deploy).times(5)
|
||||
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
|
||||
end
|
||||
|
||||
it "singlenode_compute deploy should not raise any exception" do
|
||||
@env['attributes']['deployment_mode'] = "singlenode_compute"
|
||||
@env['nodes'] = [@env['nodes'][0]] # We have only one node in singlenode
|
||||
Astute::Metadata.expects(:publish_facts).never
|
||||
Astute::PuppetdDeployer.expects(:deploy).once # one call for one node
|
||||
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
|
||||
end
|
||||
end
|
||||
end
|
||||
#!/usr/bin/env rspec
|
||||
require File.join(File.dirname(__FILE__), "..", "spec_helper")
|
||||
|
||||
describe "SimplePuppet DeploymentEngine" do
|
||||
context "When deploy is called, " do
|
||||
before(:each) do
|
||||
@ctx = mock
|
||||
@ctx.stubs(:task_id)
|
||||
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
|
||||
@reporter = mock('reporter')
|
||||
@reporter.stub_everything
|
||||
@ctx.stubs(:reporter).returns(Astute::ProxyReporter.new(@reporter))
|
||||
@deploy_engine = Astute::DeploymentEngine::SimplePuppet.new(@ctx)
|
||||
@env = YAML.load_file(File.join(File.dirname(__FILE__), "..", "..", "examples", "no_attrs.yaml"))
|
||||
end
|
||||
|
||||
it "it should call valid method depends on attrs" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'deployment_mode' => 'ha_compute'}
|
||||
@deploy_engine.expects(:attrs_ha_compute).never # It is not supported in SimplePuppet
|
||||
@deploy_engine.expects(:deploy_ha_compute).with(nodes, attrs)
|
||||
# All implementations of deploy_piece go to subclasses
|
||||
@deploy_engine.respond_to?(:deploy_piece).should be_true
|
||||
@deploy_engine.deploy(nodes, attrs)
|
||||
end
|
||||
|
||||
it "it should raise an exception if deployment mode is unsupported" do
|
||||
nodes = [{'uid' => 1}]
|
||||
attrs = {'deployment_mode' => 'unknown'}
|
||||
expect {@deploy_engine.deploy(nodes, attrs)}.to raise_exception(
|
||||
/Method deploy_unknown is not implemented/)
|
||||
end
|
||||
|
||||
it "multinode_compute deploy should not raise any exception" do
|
||||
@env['attributes']['deployment_mode'] = "multinode_compute"
|
||||
Astute::Metadata.expects(:publish_facts).never # It is not supported in SimplePuppet
|
||||
# we got two calls, one for controller, and another for all computes
|
||||
Astute::PuppetdDeployer.expects(:deploy).twice
|
||||
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
|
||||
end
|
||||
|
||||
it "ha_compute deploy should not raise any exception" do
|
||||
@env['attributes']['deployment_mode'] = "ha_compute"
|
||||
Astute::Metadata.expects(:publish_facts).never
|
||||
Astute::PuppetdDeployer.expects(:deploy).times(5)
|
||||
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
|
||||
end
|
||||
|
||||
it "singlenode_compute deploy should not raise any exception" do
|
||||
@env['attributes']['deployment_mode'] = "singlenode_compute"
|
||||
@env['nodes'] = [@env['nodes'][0]] # We have only one node in singlenode
|
||||
Astute::Metadata.expects(:publish_facts).never
|
||||
Astute::PuppetdDeployer.expects(:deploy).once # one call for one node
|
||||
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
@ -1,185 +1,185 @@
|
||||
module MCollective
|
||||
module Agent
|
||||
# An agent to manage the Puppet Daemon
|
||||
#
|
||||
# Configuration Options:
|
||||
# puppetd.splaytime - Number of seconds within which to splay; no splay
|
||||
# by default
|
||||
# puppetd.statefile - Where to find the state.yaml file; defaults to
|
||||
# /var/lib/puppet/state/state.yaml
|
||||
# puppetd.lockfile - Where to find the lock file; defaults to
|
||||
# /var/lib/puppet/state/puppetdlock
|
||||
# puppetd.puppetd - Where to find the puppet agent binary; defaults to
|
||||
# /usr/bin/puppet agent
|
||||
# puppetd.summary - Where to find the summary file written by Puppet
|
||||
# 2.6.8 and newer; defaults to
|
||||
# /var/lib/puppet/state/last_run_summary.yaml
|
||||
# puppetd.pidfile - Where to find puppet agent's pid file; defaults to
|
||||
# /var/run/puppet/agent.pid
|
||||
class Puppetd<RPC::Agent
|
||||
def startup_hook
|
||||
@splaytime = @config.pluginconf["puppetd.splaytime"].to_i || 0
|
||||
@lockfile = @config.pluginconf["puppetd.lockfile"] || "/var/lib/puppet/state/puppetdlock"
|
||||
@statefile = @config.pluginconf["puppetd.statefile"] || "/var/lib/puppet/state/state.yaml"
|
||||
@pidfile = @config.pluginconf["puppet.pidfile"] || "/var/run/puppet/agent.pid"
|
||||
@puppetd = @config.pluginconf["puppetd.puppetd"] || "/usr/bin/puppet agent"
|
||||
@last_summary = @config.pluginconf["puppet.summary"] || "/var/lib/puppet/state/last_run_summary.yaml"
|
||||
end
|
||||
|
||||
action "last_run_summary" do
|
||||
last_run_summary
|
||||
set_status
|
||||
end
|
||||
|
||||
action "enable" do
|
||||
enable
|
||||
end
|
||||
|
||||
action "disable" do
|
||||
disable
|
||||
end
|
||||
|
||||
action "runonce" do
|
||||
runonce
|
||||
end
|
||||
|
||||
action "status" do
|
||||
set_status
|
||||
end
|
||||
|
||||
private
|
||||
def last_run_summary
|
||||
# wrap into begin..rescue: fixes PRD-252
|
||||
begin
|
||||
summary = YAML.load_file(@last_summary)
|
||||
rescue
|
||||
summary = {}
|
||||
end
|
||||
|
||||
# It should be empty hash, if 'resources' key is not defined, because otherwise merge will fail with TypeError
|
||||
summary["resources"] ||= {}
|
||||
# Astute relies on last_run, so we must set last_run
|
||||
summary["time"] ||= {}
|
||||
summary["time"]["last_run"] ||= 0
|
||||
# if 'failed' is not provided, it means something is wrong. So default value is 1.
|
||||
reply[:resources] = {"failed"=>1, "changed"=>0, "total"=>0, "restarted"=>0, "out_of_sync"=>0}.merge(summary["resources"])
|
||||
|
||||
["time", "events", "changes", "version"].each do |dat|
|
||||
reply[dat.to_sym] = summary[dat]
|
||||
end
|
||||
end
|
||||
|
||||
def set_status
|
||||
reply[:status] = puppet_daemon_status
|
||||
reply[:running] = reply[:status] == 'running' ? 1 : 0
|
||||
reply[:enabled] = reply[:status] == 'disabled' ? 0 : 1
|
||||
reply[:idling] = reply[:status] == 'idling' ? 1 : 0
|
||||
reply[:stopped] = reply[:status] == 'stopped' ? 1 : 0
|
||||
reply[:lastrun] = 0
|
||||
reply[:lastrun] = File.stat(@statefile).mtime.to_i if File.exists?(@statefile)
|
||||
reply[:runtime] = Time.now.to_i - reply[:lastrun]
|
||||
reply[:output] = "Currently #{reply[:status]}; last completed run #{reply[:runtime]} seconds ago"
|
||||
end
|
||||
|
||||
def puppet_daemon_status
|
||||
locked = File.exists?(@lockfile)
|
||||
disabled = locked && File::Stat.new(@lockfile).zero?
|
||||
has_pid = File.exists?(@pidfile)
|
||||
|
||||
return 'disabled' if disabled
|
||||
return 'running' if locked && has_pid
|
||||
return 'idling' if ! locked && has_pid
|
||||
return 'stopped' if ! has_pid
|
||||
end
|
||||
|
||||
def runonce
|
||||
set_status
|
||||
case (reply[:status])
|
||||
when 'disabled' then # can't run
|
||||
reply.fail "Empty Lock file exists; puppet agent is disabled."
|
||||
|
||||
when 'running' then # can't run two simultaniously
|
||||
reply.fail "Lock file and PID file exist; puppet agent is running."
|
||||
|
||||
when 'idling' then # signal daemon
|
||||
pid = File.read(@pidfile)
|
||||
if pid !~ /^\d+$/
|
||||
reply.fail "PID file does not contain a PID; got #{pid.inspect}"
|
||||
else
|
||||
begin
|
||||
::Process.kill(0, Integer(pid)) # check that pid is alive
|
||||
# REVISIT: Should we add an extra round of security here, and
|
||||
# ensure that the PID file is securely owned, or that the target
|
||||
# process looks like Puppet? Otherwise a malicious user could
|
||||
# theoretically signal arbitrary processes with this...
|
||||
begin
|
||||
::Process.kill("USR1", Integer(pid))
|
||||
reply[:output] = "Signalled daemonized puppet agent to run (process #{Integer(pid)}); " + (reply[:output] || '')
|
||||
rescue Exception => e
|
||||
reply.fail "Failed to signal the puppet agent daemon (process #{pid}): #{e}"
|
||||
end
|
||||
rescue Errno::ESRCH => e
|
||||
# PID is invalid, run puppet onetime as usual
|
||||
runonce_background
|
||||
end
|
||||
end
|
||||
|
||||
when 'stopped' then # just run
|
||||
runonce_background
|
||||
|
||||
else
|
||||
reply.fail "Unknown puppet agent status: #{reply[:status]}"
|
||||
end
|
||||
end
|
||||
|
||||
def runonce_background
|
||||
cmd = [@puppetd, "--onetime", "--logdest", 'syslog']
|
||||
|
||||
unless request[:forcerun]
|
||||
if @splaytime && @splaytime > 0
|
||||
cmd << "--splaylimit" << @splaytime << "--splay"
|
||||
end
|
||||
end
|
||||
|
||||
cmd = cmd.join(" ")
|
||||
|
||||
output = reply[:output] || ''
|
||||
run(cmd, :stdout => :output, :chomp => true)
|
||||
reply[:output] = "Called #{cmd}, " + output + (reply[:output] || '')
|
||||
end
|
||||
|
||||
def enable
|
||||
if File.exists?(@lockfile)
|
||||
stat = File::Stat.new(@lockfile)
|
||||
|
||||
if stat.zero?
|
||||
File.unlink(@lockfile)
|
||||
reply[:output] = "Lock removed"
|
||||
else
|
||||
reply[:output] = "Currently running; can't remove lock"
|
||||
end
|
||||
else
|
||||
reply.fail "Already enabled"
|
||||
end
|
||||
end
|
||||
|
||||
def disable
|
||||
if File.exists?(@lockfile)
|
||||
stat = File::Stat.new(@lockfile)
|
||||
|
||||
stat.zero? ? reply.fail("Already disabled") : reply.fail("Currently running; can't remove lock")
|
||||
else
|
||||
begin
|
||||
File.open(@lockfile, "w") { |file| }
|
||||
|
||||
reply[:output] = "Lock created"
|
||||
rescue Exception => e
|
||||
reply.fail "Could not create lock: #{e}"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# vi:tabstop=2:expandtab:ai:filetype=ruby
|
||||
module MCollective
|
||||
module Agent
|
||||
# An agent to manage the Puppet Daemon
|
||||
#
|
||||
# Configuration Options:
|
||||
# puppetd.splaytime - Number of seconds within which to splay; no splay
|
||||
# by default
|
||||
# puppetd.statefile - Where to find the state.yaml file; defaults to
|
||||
# /var/lib/puppet/state/state.yaml
|
||||
# puppetd.lockfile - Where to find the lock file; defaults to
|
||||
# /var/lib/puppet/state/puppetdlock
|
||||
# puppetd.puppetd - Where to find the puppet agent binary; defaults to
|
||||
# /usr/bin/puppet agent
|
||||
# puppetd.summary - Where to find the summary file written by Puppet
|
||||
# 2.6.8 and newer; defaults to
|
||||
# /var/lib/puppet/state/last_run_summary.yaml
|
||||
# puppetd.pidfile - Where to find puppet agent's pid file; defaults to
|
||||
# /var/run/puppet/agent.pid
|
||||
class Puppetd<RPC::Agent
|
||||
def startup_hook
|
||||
@splaytime = @config.pluginconf["puppetd.splaytime"].to_i || 0
|
||||
@lockfile = @config.pluginconf["puppetd.lockfile"] || "/var/lib/puppet/state/puppetdlock"
|
||||
@statefile = @config.pluginconf["puppetd.statefile"] || "/var/lib/puppet/state/state.yaml"
|
||||
@pidfile = @config.pluginconf["puppet.pidfile"] || "/var/run/puppet/agent.pid"
|
||||
@puppetd = @config.pluginconf["puppetd.puppetd"] || "/usr/bin/puppet agent"
|
||||
@last_summary = @config.pluginconf["puppet.summary"] || "/var/lib/puppet/state/last_run_summary.yaml"
|
||||
end
|
||||
|
||||
action "last_run_summary" do
|
||||
last_run_summary
|
||||
set_status
|
||||
end
|
||||
|
||||
action "enable" do
|
||||
enable
|
||||
end
|
||||
|
||||
action "disable" do
|
||||
disable
|
||||
end
|
||||
|
||||
action "runonce" do
|
||||
runonce
|
||||
end
|
||||
|
||||
action "status" do
|
||||
set_status
|
||||
end
|
||||
|
||||
private
|
||||
def last_run_summary
|
||||
# wrap into begin..rescue: fixes PRD-252
|
||||
begin
|
||||
summary = YAML.load_file(@last_summary)
|
||||
rescue
|
||||
summary = {}
|
||||
end
|
||||
|
||||
# It should be empty hash, if 'resources' key is not defined, because otherwise merge will fail with TypeError
|
||||
summary["resources"] ||= {}
|
||||
# Astute relies on last_run, so we must set last_run
|
||||
summary["time"] ||= {}
|
||||
summary["time"]["last_run"] ||= 0
|
||||
# if 'failed' is not provided, it means something is wrong. So default value is 1.
|
||||
reply[:resources] = {"failed"=>1, "changed"=>0, "total"=>0, "restarted"=>0, "out_of_sync"=>0}.merge(summary["resources"])
|
||||
|
||||
["time", "events", "changes", "version"].each do |dat|
|
||||
reply[dat.to_sym] = summary[dat]
|
||||
end
|
||||
end
|
||||
|
||||
def set_status
|
||||
reply[:status] = puppet_daemon_status
|
||||
reply[:running] = reply[:status] == 'running' ? 1 : 0
|
||||
reply[:enabled] = reply[:status] == 'disabled' ? 0 : 1
|
||||
reply[:idling] = reply[:status] == 'idling' ? 1 : 0
|
||||
reply[:stopped] = reply[:status] == 'stopped' ? 1 : 0
|
||||
reply[:lastrun] = 0
|
||||
reply[:lastrun] = File.stat(@statefile).mtime.to_i if File.exists?(@statefile)
|
||||
reply[:runtime] = Time.now.to_i - reply[:lastrun]
|
||||
reply[:output] = "Currently #{reply[:status]}; last completed run #{reply[:runtime]} seconds ago"
|
||||
end
|
||||
|
||||
def puppet_daemon_status
|
||||
locked = File.exists?(@lockfile)
|
||||
disabled = locked && File::Stat.new(@lockfile).zero?
|
||||
has_pid = File.exists?(@pidfile)
|
||||
|
||||
return 'disabled' if disabled
|
||||
return 'running' if locked && has_pid
|
||||
return 'idling' if ! locked && has_pid
|
||||
return 'stopped' if ! has_pid
|
||||
end
|
||||
|
||||
def runonce
|
||||
set_status
|
||||
case (reply[:status])
|
||||
when 'disabled' then # can't run
|
||||
reply.fail "Empty Lock file exists; puppet agent is disabled."
|
||||
|
||||
when 'running' then # can't run two simultaniously
|
||||
reply.fail "Lock file and PID file exist; puppet agent is running."
|
||||
|
||||
when 'idling' then # signal daemon
|
||||
pid = File.read(@pidfile)
|
||||
if pid !~ /^\d+$/
|
||||
reply.fail "PID file does not contain a PID; got #{pid.inspect}"
|
||||
else
|
||||
begin
|
||||
::Process.kill(0, Integer(pid)) # check that pid is alive
|
||||
# REVISIT: Should we add an extra round of security here, and
|
||||
# ensure that the PID file is securely owned, or that the target
|
||||
# process looks like Puppet? Otherwise a malicious user could
|
||||
# theoretically signal arbitrary processes with this...
|
||||
begin
|
||||
::Process.kill("USR1", Integer(pid))
|
||||
reply[:output] = "Signalled daemonized puppet agent to run (process #{Integer(pid)}); " + (reply[:output] || '')
|
||||
rescue Exception => e
|
||||
reply.fail "Failed to signal the puppet agent daemon (process #{pid}): #{e}"
|
||||
end
|
||||
rescue Errno::ESRCH => e
|
||||
# PID is invalid, run puppet onetime as usual
|
||||
runonce_background
|
||||
end
|
||||
end
|
||||
|
||||
when 'stopped' then # just run
|
||||
runonce_background
|
||||
|
||||
else
|
||||
reply.fail "Unknown puppet agent status: #{reply[:status]}"
|
||||
end
|
||||
end
|
||||
|
||||
def runonce_background
|
||||
cmd = [@puppetd, "--onetime", "--logdest", 'syslog']
|
||||
|
||||
unless request[:forcerun]
|
||||
if @splaytime && @splaytime > 0
|
||||
cmd << "--splaylimit" << @splaytime << "--splay"
|
||||
end
|
||||
end
|
||||
|
||||
cmd = cmd.join(" ")
|
||||
|
||||
output = reply[:output] || ''
|
||||
run(cmd, :stdout => :output, :chomp => true)
|
||||
reply[:output] = "Called #{cmd}, " + output + (reply[:output] || '')
|
||||
end
|
||||
|
||||
def enable
|
||||
if File.exists?(@lockfile)
|
||||
stat = File::Stat.new(@lockfile)
|
||||
|
||||
if stat.zero?
|
||||
File.unlink(@lockfile)
|
||||
reply[:output] = "Lock removed"
|
||||
else
|
||||
reply[:output] = "Currently running; can't remove lock"
|
||||
end
|
||||
else
|
||||
reply.fail "Already enabled"
|
||||
end
|
||||
end
|
||||
|
||||
def disable
|
||||
if File.exists?(@lockfile)
|
||||
stat = File::Stat.new(@lockfile)
|
||||
|
||||
stat.zero? ? reply.fail("Already disabled") : reply.fail("Currently running; can't remove lock")
|
||||
else
|
||||
begin
|
||||
File.open(@lockfile, "w") { |file| }
|
||||
|
||||
reply[:output] = "Lock created"
|
||||
rescue Exception => e
|
||||
reply.fail "Could not create lock: #{e}"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# vi:tabstop=2:expandtab:ai:filetype=ruby
|
||||
|
Loading…
Reference in New Issue
Block a user