astute upgraded to product version
This commit is contained in:
Ivan Bondar 2013-02-18 19:32:35 +04:00
parent ef5c36eb92
commit 274f0f766e
34 changed files with 2717 additions and 571 deletions

View File

@ -1,19 +1,20 @@
$:.unshift File.expand_path('lib', File.dirname(__FILE__))
require 'astute/version'
Gem::Specification.new do |s|
s.name = 'astute'
s.version = Astute::VERSION
s.summary = 'Orchestrator for OpenStack deployment'
s.description = 'Orchestrator of deployment via Puppet & MCollective. Works both with Nailgun and from CLI.'
s.authors = ['Mike Scherbakov']
s.email = ['mscherbakov@mirantis.com']
s.add_dependency 'mcollective-client', '> 2.0.0'
s.files = Dir.glob("{bin,lib,spec}/**/*")
s.executables = ['astute', 'astute_run']
s.require_path = 'lib'
end
$:.unshift File.expand_path('lib', File.dirname(__FILE__))
require 'astute/version'
Gem::Specification.new do |s|
s.name = 'astute'
s.version = Astute::VERSION
s.summary = 'Orchestrator for OpenStack deployment'
s.description = 'Deployment Orchestrator of Puppet via MCollective. Works as a library or from CLI.'
s.authors = ['Mike Scherbakov']
s.email = ['mscherbakov@mirantis.com']
s.add_dependency 'mcollective-client', '> 2.0.0'
s.add_dependency 'symboltable', '>= 1.0.2'
s.files = Dir.glob("{bin,lib,spec}/**/*")
s.executables = ['astute', 'astute_run']
s.require_path = 'lib'
end

View File

@ -1,23 +1,55 @@
#!/usr/bin/env ruby
require 'rubygems'
require 'astute'
class ConsoleReporter
def report(msg)
p msg
end
end
reporter = ConsoleReporter.new
nodes = [{'id' => '1', 'ip' => '10.1.1.2', 'uid' => 'devnailgun.mirantis.com', 'role' => 'test_controller'}]
#nodes << {'id' => '2', 'ip' => '10.1.1.3', 'uid' => 'mcoll2', 'role' => 'test_controller'}
networks = [{'id' => 1, 'vlan_id' => 100, 'cidr' => '10.0.0.0/24'},
{'id' => 2, 'vlan_id' => 101, 'cidr' => '192.168.0.0/24'}]
task_id = `uuidgen`.strip
orchestrator = Astute::Orchestrator.new
orchestrator.deploy(reporter, task_id, nodes)
orchestrator.verify_networks(reporter, task_id, nodes, networks)
#!/usr/bin/env ruby
require 'optparse'
require 'yaml'
begin
require 'astute'
rescue LoadError
require 'rubygems'
require 'astute'
end
class ConsoleReporter
def report(msg)
puts msg.inspect
end
end
opts = {}
optparse = OptionParser.new do |o|
o.banner = "Usage: bin/astute -f FILENAME"
o.on("-v", "--[no-]verbose", "Run verbosely") do |v|
opts[:verbose] = v
end
o.on("-f FILENAME", "Environment in YAML format. Samples are in examples directory.") do |f|
opts[:filename] = f
end
o.on("-h") { puts o; exit }
end
optparse.parse!(ARGV)
if opts[:filename].nil?
puts optparse
exit
end
reporter = ConsoleReporter.new
Astute.logger = Logger.new(STDOUT) if opts[:verbose]
environment = YAML.load_file(opts[:filename])
case environment['attributes']['deployment_engine']
when 'nailyfact'
deploy_engine = Astute::DeploymentEngine::NailyFact
when 'simplepuppet'
deploy_engine = Astute::DeploymentEngine::SimplePuppet # It just calls puppet and doesn't do any magic
else
deploy_engine = nil # Orchestrator will use it's default
end
orchestrator = Astute::Orchestrator.new(deploy_engine, log_parsing=false)
orchestrator.deploy(reporter, environment['task_uuid'], environment['nodes'], environment['attributes'])
#orchestrator.verify_networks(reporter, task_id, nodes, networks)

View File

@ -1,26 +1,38 @@
#!/usr/bin/env ruby
require 'rubygems'
require 'astute'
require 'optparse'
require 'yaml'
begin
require 'astute'
rescue LoadError
require 'rubygems'
require 'astute'
end
def puppet_consecutive_call(orchestrator, context, nodes)
#---------- SETTINGS -----------
$retries = 2
$ignore_failure = false
task_id = `uuidgen`.strip
log_parser = Astute::LogParser::NoParsing.new
logger_output = STDOUT
#--------------------------------
def puppet_consecutive_call(context, nodes)
nodes = [nodes] if not nodes.is_a?(Array)
Astute.logger.info "#{context.task_id}: Starting consecutive deployment."
for node in nodes do
orchestrator.deploy_piece(context, [node], false)
Astute::PuppetdDeployer.deploy(context, [node], $retries, $ignore_failure)
end
nodes_roles = nodes.map { |n| { n['uid'] => n['role'] } }
Astute.logger.info "#{context.task_id}: Finished deployment of nodes => roles: #{nodes_roles.inspect}"
end
def puppet_parallel_call(orchestrator, context, nodes)
orchestrator.deploy_piece(context, nodes, false)
end
class Context
attr_accessor :task_id, :reporter
def initialize(task_id, reporter)
@task_id = task_id
@reporter = reporter
end
def puppet_parallel_call(context, nodes)
nodes = [nodes] if not nodes.is_a?(Array)
Astute.logger.info "#{context.task_id}: Starting parallel deployment."
Astute::PuppetdDeployer.deploy(context, nodes, $retries, $ignore_failure)
nodes_roles = nodes.map { |n| { n['uid'] => n['role'] } }
Astute.logger.info "#{context.task_id}: Finished deployment of nodes => roles: #{nodes_roles.inspect}"
end
class ConsoleReporter
@ -29,13 +41,34 @@ class ConsoleReporter
end
end
opts = {}
optparse = OptionParser.new do |o|
o.banner = "Usage: bin/astute -f FILENAME"
o.on("-v", "--[no-]verbose", "Run verbosely") do |v|
opts[:verbose] = v
end
o.on("-f FILENAME", "Environment in YAML format. Samples are in examples directory.") do |f|
opts[:filename] = f
end
o.on("-h") { puts o; exit }
end
optparse.parse!(ARGV)
if opts[:filename].nil?
puts optparse
exit
end
Astute.logger = Logger.new(logger_output) if opts[:verbose]
reporter = ConsoleReporter.new
proxy_reporter = Astute::ProxyReporter.new(reporter)
context = Astute::Context.new(task_id, proxy_reporter, log_parser)
task_id = `uuidgen`.strip
orchestrator = Astute::Orchestrator.new
context = Context.new(task_id, reporter)
pinged_nodes_list = [] # make list of nodes that respond to ping
# make list of nodes that respond to ping
pinged_nodes_list = []
IO.popen('mco ping').each do |line|
if /time=/.match(line) # in 'mco ping' output only lines with 'time=' hold node names
pinged_nodes_list += [/^[^ ]*/.match(line).to_s]
@ -43,7 +76,7 @@ IO.popen('mco ping').each do |line|
end
# load nodes structure from yaml
nodes_raw = YAML::load( File.open( (ARGV.length>0) ? (ARGV[0]) : "nodes.yaml" ) )
nodes_raw = YAML.load_file( opts[:filename] )
nodes = Array.new
domain_name = ''
@ -87,27 +120,27 @@ compute_nodes = compute_nodes.sort{|x,y| x['uid'] <=> y['uid']}
if nodes_raw['use_case'] == "minimal" or
nodes_raw['use_case'] == "simple"
puppet_consecutive_call(orchestrator, context, ctrl_nodes)
puppet_parallel_call(orchestrator, context, compute_nodes)
puppet_consecutive_call(context, ctrl_nodes)
puppet_parallel_call(context, compute_nodes)
elsif nodes_raw['use_case'] == "compact"
puppet_consecutive_call(orchestrator, context, [ctrl_nodes[0]])
puppet_parallel_call(orchestrator, context, ctrl_nodes[1..2])
puppet_parallel_call(orchestrator, context, ctrl_nodes)
puppet_consecutive_call(orchestrator, context, [ctrl_nodes[0]])
puppet_parallel_call(orchestrator, context, ctrl_nodes[1..2] + compute_nodes)
puppet_consecutive_call(context, ctrl_nodes[0])
puppet_parallel_call(context, ctrl_nodes[1..2])
puppet_parallel_call(context, ctrl_nodes)
puppet_consecutive_call(context, ctrl_nodes[0])
puppet_parallel_call(context, ctrl_nodes[1..2] + compute_nodes)
elsif nodes_raw['use_case'] == "full"
storage_nodes = nodes.select {|n| n['role'] == 'storage'}
storage_nodes = storage_nodes.sort{|x,y| x['uid'] <=> y['uid']}
proxy_nodes = nodes.select {|n| n['role'] == 'proxy'}
proxy_nodes = proxy_nodes.sort{|x,y| x['uid'] <=> y['uid']}
puppet_consecutive_call(orchestrator, context, [ctrl_nodes[0]])
puppet_parallel_call(orchestrator, context, [ctrl_nodes[1]])
puppet_parallel_call(orchestrator, context, compute_nodes)
puppet_parallel_call(orchestrator, context, storage_nodes)
puppet_parallel_call(orchestrator, context, storage_nodes)
puppet_parallel_call(orchestrator, context, [proxy_nodes[0]])
puppet_parallel_call(orchestrator, context, storage_nodes)
puppet_parallel_call(orchestrator, context, [proxy_nodes[1]])
puppet_consecutive_call(context, ctrl_nodes[0])
puppet_parallel_call(context, ctrl_nodes[1])
puppet_parallel_call(context, compute_nodes)
puppet_parallel_call(context, storage_nodes)
puppet_parallel_call(context, storage_nodes)
puppet_parallel_call(context, proxy_nodes[0])
puppet_parallel_call(context, storage_nodes)
puppet_parallel_call(context, proxy_nodes[1])
else
puts "Use case " + nodes_raw['use_case'] + " is not supported!"
end

View File

@ -1,20 +1,37 @@
require 'json'
require 'logger'
require 'astute/orchestrator'
require 'astute/mclient'
require 'astute/metadata'
require 'astute/deployer'
require 'astute/network'
module Astute
autoload 'Context', 'astute/context'
def self.logger
@logger ||= Logger.new('/var/log/astute.log')
end
def self.logger=(logger)
@logger = logger
end
end
require 'json'
require 'logger'
require 'astute/config'
require 'astute/logparser'
require 'astute/orchestrator'
require 'astute/metadata'
require 'astute/deployment_engine'
require 'astute/network'
require 'astute/puppetd'
require 'astute/rpuppet'
require 'astute/deployment_engine/simple_puppet'
require 'astute/deployment_engine/nailyfact'
module Astute
autoload 'Context', 'astute/context'
autoload 'MClient', 'astute/mclient'
autoload 'ProxyReporter', 'astute/reporter'
autoload 'NodeRemoval', 'astute/node_removal'
def self.logger
@logger ||= Logger.new('/var/log/astute.log')
@logger.formatter = proc {|severity, datetime, progname, msg|
severity_map = {'DEBUG' => 'debug', 'INFO' => 'info', 'WARN' => 'warning',
'ERROR' => 'err', 'FATAL' => 'crit'}
"#{datetime.strftime("%Y-%m-%dT%H:%M:%S")} #{severity_map[severity]}: #{msg}\n"
}
@logger
end
def self.logger=(logger)
@logger = logger
end
config_file = '/opt/astute/astute.conf'
Astute.config.update(YAML.load(File.read(config_file))) if File.exists?(config_file)
end

View File

@ -0,0 +1,48 @@
require 'symboltable'
require 'singleton'
module Astute
class ConfigError < StandardError; end
class UnknownOptionError < ConfigError
attr_reader :name
def initialize(name)
super("Unknown config option #{name}")
@name = name
end
end
class MyConfig
include Singleton
attr_reader :configtable
def initialize
@configtable = SymbolTable.new
end
end
class ParseError < ConfigError
attr_reader :line
def initialize(message, line)
super(message)
@line = line
end
end
def self.config
config = MyConfig.instance.configtable
config.update(default_config) if config.empty?
return config
end
def self.default_config
conf = {}
conf[:PUPPET_TIMEOUT] = 60*60 # maximum time it waits for the whole deployment
conf[:PUPPET_DEPLOY_INTERVAL] = 2 # sleep for ## sec, then check puppet status again
conf[:PUPPET_FADE_TIMEOUT] = 60 # How long it can take for puppet to exit after dumping to last_run_summary
conf[:MC_RETRIES] = 5 # MClient tries to call mcagent before failure
conf[:PUPPET_FADE_INTERVAL] = 1 # Retry every ## seconds to check puppet state if it was running
return conf
end
end

View File

@ -1,10 +1,11 @@
module Astute
class Context
attr_accessor :task_id, :reporter
def initialize(task_id, reporter)
@task_id = task_id
@reporter = reporter
end
end
end
module Astute
class Context
attr_accessor :task_id, :reporter, :deploy_log_parser
def initialize(task_id, reporter, deploy_log_parser=nil)
@task_id = task_id
@reporter = reporter
@deploy_log_parser = deploy_log_parser
end
end
end

View File

@ -1,61 +0,0 @@
require 'json'
require 'timeout'
PUPPET_TIMEOUT = 30*60
module Astute
module Deployer
private
def self.wait_until_puppet_done(puppetd, previous_run_status)
# Wait for first node is done, than check the next one
# Load to mcollective is reduced by checking only one machine at time in a set
# In fact we need to know if whole set of machines finished deployment
previous_run_status.each do |res|
prev_run = res.results[:data][:lastrun]
last_run = prev_run
while last_run == prev_run
puppet_status = puppetd.status
if puppet_status[0].results[:data][:status] == "stopped"
# if stopped and while doesn't end - output message and exit
puts "Puppet stopped on " + puppet_status[0].results[:sender]
break
end
puppetd.discover(:nodes => [res.results[:sender]])
puppet_status = puppetd.status
# logging to false, otherwise we get a message every second
last_run = puppet_status[0].results[:data][:lastrun]
sleep 1 if last_run == prev_run
end
end
end
public
def self.puppet_deploy_with_polling(ctx, nodes)
if nodes.empty?
Astute.logger.info "#{ctx.task_id}: Nodes to deploy are not provided. Do nothing."
return false
end
uids = nodes.map {|n| n['uid']}
puppetd = MClient.new(ctx, "puppetd", uids)
puppet_status = puppetd.status
run_results = puppetd.runonce
Astute.logger.debug "Waiting for puppet to finish deployment on all nodes (timeout = #{PUPPET_TIMEOUT} sec)..."
time_before = Time.now
Timeout::timeout(PUPPET_TIMEOUT) do # 30 min for deployment to be done
# Yes, we polling here and yes, it's temporary.
# As a better implementation we can later use separate queue to get result, ex. http://www.devco.net/archives/2012/08/19/mcollective-async-result-handling.php
# or we can rewrite puppet agent not to fork, and increase ttl for mcollective RPC.
wait_until_puppet_done(puppetd, puppet_status)
end
run_results.each do |run_result|
puts run_result.results[:data][:output] # puppet output
end
time_spent = Time.now - time_before
Astute.logger.info "#{ctx.task_id}: Spent #{time_spent} seconds on puppet run for following nodes(uids): #{nodes.map {|n| n['uid']}.join(',')}"
end
end
end

View File

@ -0,0 +1,176 @@
require 'json'
require 'timeout'
module Astute
class DeploymentEngine
def initialize(context)
if self.class.superclass.name == 'Object'
raise "Instantiation of this superclass is not allowed. Please subclass from #{self.class.name}."
end
@ctx = context
end
def deploy(nodes, attrs)
# See implementation in subclasses, this may be overriden
attrs['deployment_mode'] ||= 'multinode_compute' # simple multinode deployment is the default
Astute.logger.info "Deployment mode #{attrs['deployment_mode']}"
result = self.send("deploy_#{attrs['deployment_mode']}", nodes, attrs)
end
def method_missing(method, *args)
Astute.logger.error "Method #{method} is not implemented for #{self.class}, raising exception."
raise "Method #{method} is not implemented for #{self.class}"
end
def attrs_singlenode_compute(nodes, attrs)
ctrl_management_ip = nodes[0]['network_data'].select {|nd| nd['name'] == 'management'}[0]['ip']
ctrl_public_ip = nodes[0]['network_data'].select {|nd| nd['name'] == 'public'}[0]['ip']
attrs['controller_node_address'] = ctrl_management_ip.split('/')[0]
attrs['controller_node_public'] = ctrl_public_ip.split('/')[0]
attrs
end
def deploy_singlenode_compute(nodes, attrs)
# TODO(mihgen) some real stuff is needed
Astute.logger.info "Starting deployment of single node OpenStack"
deploy_piece(nodes, attrs)
end
# we mix all attrs and prepare them for Puppet
# Works for multinode_compute deployment mode
def attrs_multinode_compute(nodes, attrs)
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
# TODO(mihgen): we should report error back if there are not enough metadata passed
ctrl_management_ips = []
ctrl_public_ips = []
ctrl_nodes.each do |n|
ctrl_management_ips << n['network_data'].select {|nd| nd['name'] == 'management'}[0]['ip']
ctrl_public_ips << n['network_data'].select {|nd| nd['name'] == 'public'}[0]['ip']
end
attrs['controller_node_address'] = ctrl_management_ips[0].split('/')[0]
attrs['controller_node_public'] = ctrl_public_ips[0].split('/')[0]
attrs
end
# This method is called by Ruby metaprogramming magic from deploy method
# It should not contain any magic with attributes, and should not directly run any type of MC plugins
# It does only support of deployment sequence. See deploy_piece implementation in subclasses.
def deploy_multinode_compute(nodes, attrs)
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
Astute.logger.info "Starting deployment of controllers"
deploy_piece(ctrl_nodes, attrs)
@ctx.deploy_log_parser.pattern_spec['expected_line_number'] = 380
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
Astute.logger.info "Starting deployment of computes"
deploy_piece(compute_nodes, attrs)
@ctx.deploy_log_parser.pattern_spec['expected_line_number'] = 300
other_nodes = nodes - ctrl_nodes - compute_nodes
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
def attrs_ha_compute(nodes, attrs)
# TODO(mihgen): we should report error back if there are not enough metadata passed
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
ctrl_manag_addrs = {}
ctrl_public_addrs = {}
ctrl_nodes.each do |n|
# current puppet modules require `hostname -s`
hostname = n['fqdn'].split(/\./)[0]
ctrl_manag_addrs.merge!({hostname =>
n['network_data'].select {|nd| nd['name'] == 'management'}[0]['ip'].split(/\//)[0]})
ctrl_public_addrs.merge!({hostname =>
n['network_data'].select {|nd| nd['name'] == 'public'}[0]['ip'].split(/\//)[0]})
end
attrs['ctrl_hostnames'] = ctrl_nodes.map {|n| n['fqdn'].split(/\./)[0]}
attrs['master_hostname'] = ctrl_nodes[0]['fqdn'].split(/\./)[0]
attrs['ctrl_public_addresses'] = ctrl_public_addrs
attrs['ctrl_management_addresses'] = ctrl_manag_addrs
attrs
end
def deploy_ha_compute(nodes, attrs)
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
Astute.logger.info "Starting deployment of all controllers one by one, ignoring failure"
ctrl_nodes.each {|n| deploy_piece([n], attrs, retries=0, ignore_failure=true)}
Astute.logger.info "Starting deployment of all controllers, ignoring failure"
deploy_piece(ctrl_nodes, attrs, retries=0, ignore_failure=true)
Astute.logger.info "Starting deployment of 1st controller again, ignoring failure"
deploy_piece([ctrl_nodes[0]], attrs, retries=0, ignore_failure=true)
retries = 1
Astute.logger.info "Starting deployment of all controllers until it completes, "\
"allowed retries: #{retries}"
deploy_piece(ctrl_nodes, attrs, retries=retries)
# FIXME(mihgen): put right numbers for logs
@ctx.deploy_log_parser.pattern_spec['expected_line_number'] = 380
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
Astute.logger.info "Starting deployment of computes"
deploy_piece(compute_nodes, attrs)
@ctx.deploy_log_parser.pattern_spec['expected_line_number'] = 300
other_nodes = nodes - ctrl_nodes - compute_nodes
Astute.logger.info "Starting deployment of other nodes"
deploy_piece(other_nodes, attrs)
return
end
private
def nodes_status(nodes, status)
{'nodes' => nodes.map { |n| {'uid' => n['uid'], 'status' => status} }}
end
def validate_nodes(nodes)
if nodes.empty?
Astute.logger.info "#{@ctx.task_id}: Nodes to deploy are not provided. Do nothing."
return false
end
return true
end
def calculate_networks(data)
interfaces = {}
data ||= []
Astute.logger.info "calculate_networks function was provided with #{data.size} interfaces"
data.each do |iface|
Astute.logger.debug "Calculating network for #{iface.inspect}"
if iface['vlan'] and iface['vlan'] != 0
name = [iface['dev'], iface['vlan']].join('.')
interfaces[name] = {"vlan" => "yes"}
else
name = iface['dev']
interfaces[name] = {}
end
interfaces[name]['bootproto'] = 'none'
if iface['ip']
ipaddr = iface['ip'].split('/')[0]
interfaces[name]['ipaddr'] = ipaddr
interfaces[name]['netmask'] = iface['netmask'] #=IPAddr.new('255.255.255.255').mask(ipmask[1]).to_s
interfaces[name]['bootproto'] = 'static'
if iface['brd']
interfaces[name]['broadcast'] = iface['brd']
end
end
interfaces[name]['ensure'] = 'present'
Astute.logger.debug "Calculated network for interface: #{name}, data: #{interfaces[name].inspect}"
end
interfaces['lo'] = {} unless interfaces.has_key?('lo')
interfaces['eth0'] = {'bootproto' => 'dhcp',
'ensure' => 'present'} unless interfaces.has_key?('eth0')
# Example of return:
# {"eth0":{"ensure":"present","bootproto":"dhcp"},"lo":{},
# "eth0.102":{"ipaddr":"10.20.20.20","ensure":"present","vlan":"yes",
# "netmask":"255.255.255.0","broadcast":"10.20.20.255","bootproto":"static"}}
return interfaces
end
end
end

View File

@ -0,0 +1,50 @@
class Astute::DeploymentEngine::NailyFact < Astute::DeploymentEngine
def deploy(nodes, attrs)
attrs_for_mode = self.send("attrs_#{attrs['deployment_mode']}", nodes, attrs)
super(nodes, attrs_for_mode)
end
def create_facts(node, attrs)
metapublisher = Astute::Metadata.method(:publish_facts)
# calculate_networks method is common and you can find it in superclass
# if node['network_data'] is undefined, we use empty list because we later try to iterate over it
# otherwise we will get KeyError
node_network_data = node['network_data'].nil? ? [] : node['network_data']
network_data_puppet = calculate_networks(node_network_data)
metadata = {'role' => node['role'], 'uid' => node['uid'], 'network_data' => network_data_puppet.to_json }
attrs.each do |k, v|
if v.is_a? String
metadata[k] = v
else
# And it's the problem on the puppet side now to decode json
metadata[k] = v.to_json
end
end
# Let's calculate interface settings we need for OpenStack:
node_network_data.each do |iface|
device = (iface['vlan'] and iface['vlan'] > 0) ? [iface['dev'], iface['vlan']].join('.') : iface['dev']
metadata[iface['name'] + '_interface'] = device
end
# internal_address is required for HA..
metadata['internal_address'] = node['network_data'].select {|nd| nd['name'] == 'management'}[0]['ip'].split(/\//)[0]
metapublisher.call(@ctx, node['uid'], metadata)
end
def deploy_piece(nodes, attrs, retries=2, ignore_failure=false)
return false unless validate_nodes(nodes)
@ctx.reporter.report nodes_status(nodes, 'deploying')
Astute.logger.info "#{@ctx.task_id}: Calculation of required attributes to pass, include netw.settings"
nodes.each do |node|
create_facts(node, attrs)
end
Astute.logger.info "#{@ctx.task_id}: All required attrs/metadata passed via facts extension. Starting deployment."
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries, ignore_failure)
nodes_roles = nodes.map { |n| { n['uid'] => n['role'] } }
Astute.logger.info "#{@ctx.task_id}: Finished deployment of nodes => roles: #{nodes_roles.inspect}"
end
end

View File

@ -0,0 +1,12 @@
class Astute::DeploymentEngine::PuppetKernel < Astute::DeploymentEngine
# NOTE(mihgen): Not completed
def deploy_piece(nodes, attrs)
return false unless validate_nodes(nodes)
case nodes[0]['role']
when "controller"
classes = {"nailytest::test_rpuppet" => {"rpuppet" => ["controller", "privet"]}}
Astute::RpuppetDeployer.rpuppet_deploy(@ctx, nodes, attrs, classes)
# network_data = calculate_networks(node['network_data'])
end
end
end

View File

@ -0,0 +1,11 @@
class Astute::DeploymentEngine::SimplePuppet < Astute::DeploymentEngine
# It is trivial puppet run. It's assumed that user has prepared site.pp
# with all required parameters for modules
def deploy_piece(nodes, *args)
return false unless validate_nodes(nodes)
@ctx.reporter.report nodes_status(nodes, 'deploying')
Astute::PuppetdDeployer.deploy(@ctx, nodes)
nodes_roles = nodes.map { |n| { n['uid'] => n['role'] } }
Astute.logger.info "#{@ctx.task_id}: Finished deployment of nodes => roles: #{nodes_roles.inspect}"
end
end

View File

@ -0,0 +1,200 @@
module Astute
module LogParser
@separator = "SEPARATOR\n"
@log_portion = 10000
class NoParsing
attr_accessor :pattern_spec
def initialize(*args)
@pattern_spec = {}
end
def method_missing(*args)
# We just eat the call if we don't want to deal with logs
end
def progress_calculate(*args)
[]
end
end
class ParseNodeLogs
attr_accessor :pattern_spec
def initialize(filename, pattern_spec=nil)
@filename = filename
if pattern_spec.nil?
@pattern_spec = {'type' => 'count-lines',
'endlog_patterns' => [{'pattern' => /Finished catalog run in [0-9]+\.[0-9]* seconds\n/, 'progress' => 1.0}],
'expected_line_number' => 500}
else
@pattern_spec = pattern_spec
end
end
def progress_calculate(uids_to_calc, nodes)
nodes_progress = []
uids_to_calc.each do |uid|
node = nodes.select {|n| n['uid'] == uid}[0]
path = "/var/log/remote/#{node['ip']}/#{@filename}"
nodes_progress << {
'uid' => uid,
'progress' => (LogParser::get_log_progress(path, @pattern_spec)*100).to_i # Return percent of progress
}
end
return nodes_progress
end
def add_separator(nodes)
nodes.each do |node|
path = "/var/log/remote/#{node['ip']}/#{@filename}"
LogParser::add_log_separator(path)
end
end
end
public
def self.add_log_separator(path, separator=@separator)
File.open(path, 'a') {|fo| fo.write separator } if File.readable?(path)
end
def self.get_log_progress(path, pattern_spec)
# Pattern specification example:
# pattern_spec = {'type' => 'pattern-list', 'separator' => "custom separator\n",
# 'chunk_size' => 10000,
# 'pattern_list' => [
# {'pattern' => 'to step installpackages', 'progress' => 0.16},
# {'pattern' => 'Installing',
# 'number' => 210, # Now it install 205 packets. Add 5 packets for growth in future.
# 'p_min' => 0.16, # min percent
# 'p_max' => 0.87 # max percent
# }
# ]
# }
return 0 unless File.readable?(path)
progress = nil
File.open(path) do |fo|
# Try to find well-known ends of log.
endlog = find_endlog_patterns(fo, pattern_spec)
return endlog if endlog
# Start reading from end of file.
fo.pos = fo.stat.size
if pattern_spec['type'] == 'count-lines'
progress = simple_line_counter(fo, pattern_spec)
elsif pattern_spec['type'] = 'pattern-list'
progress = simple_pattern_finder(fo, pattern_spec)
end
end
unless progress
Naily.logger.warn("Wrong pattern #{pattern_spec.inspect} defined for calculating progress via logs.")
return 0
end
return progress
end
private
def self.simple_pattern_finder(fo, pattern_spec)
# Use custom separator if defined.
separator = pattern_spec['separator']
separator = @separator unless separator
log_patterns = pattern_spec['pattern_list']
unless log_patterns
Naily.logger.warn("Wrong pattern #{pattern_spec.inspect} defined for calculating progress via logs.")
return 0
end
chunk = get_chunk(fo, pattern_spec['chunk_size'])
# NOTE(mihgen): Following line fixes "undefined method `rindex' for nil:NilClass" for empty log file
return 0 unless chunk
pos = chunk.rindex(separator)
chunk = chunk.slice((pos + separator.size)..-1) if pos
block = chunk.split("\n")
return 0 unless block
while true
string = block.pop
return 0 unless string # If we found nothing
log_patterns.each do |pattern|
if string.include?(pattern['pattern'])
return pattern['progress'] if pattern['progress']
if pattern['number']
string = block.pop
counter = 1
while string
counter += 1 if string.include?(pattern['pattern'])
string = block.pop
end
progress = counter.to_f / pattern['number']
progress = 1 if progress > 1
progress = pattern['p_min'] + progress * (pattern['p_max'] - pattern['p_min'])
return progress
end
Naily.logger.warn("Wrong pattern #{pattern_spec.inspect} defined for calculating progress via log.")
end
end
end
end
def self.find_endlog_patterns(fo, pattern_spec)
endlog_patterns = pattern_spec['endlog_patterns']
return nil unless endlog_patterns
fo.pos = fo.stat.size
chunk = get_chunk(fo, 100)
endlog_patterns.each do |pattern|
return pattern['progress'] if chunk.end_with?(pattern['pattern'])
end
return nil
end
def self.simple_line_counter(fo, pattern_spec)
# Use custom separator if defined.
separator = pattern_spec['separator']
separator = @separator unless separator
counter = 0
end_of_scope = false
previous_subchunk = ''
until end_of_scope
chunk = get_chunk(fo, pattern_spec['chunk_size'])
break unless chunk
# Trying to find separator on border between chunks.
subchunk = chunk.slice((1-separator.size)..-1)
# End of file reached. Exit from cycle.
end_of_scope = true unless subchunk
if subchunk and (subchunk + previous_subchunk).include?(separator)
# Separator found on border between chunks. Exit from cycle.
end_of_scope = true
continue
end
pos = chunk.rindex(separator)
if pos
end_of_scope = true
chunk = chunk.slice((pos + separator.size)..-1)
end
counter += chunk.count("\n")
end
number = pattern_spec['expected_line_number']
unless number
Naily.logger.warn("Wrong pattern #{pattern_spec.inspect} defined for calculating progress via log.")
return 0
end
progress = counter.to_f / number
progress = 1 if progress > 1
return progress
end
def self.get_chunk(fo, size=nil)
size = @log_portion unless size
return nil if fo.pos == 0
size = fo.pos if fo.pos < size
next_pos = fo.pos - size
fo.pos = next_pos
block = fo.read(size)
fo.pos = next_pos
return block
end
end
end

View File

@ -1,52 +1,77 @@
require 'mcollective'
module Astute
class MClient
include MCollective::RPC
include Astute
def initialize(ctx, agent, nodes=nil, check_result=true)
@task_id = ctx.task_id
@agent = agent
@nodes = nodes.map { |n| n.to_s }
@check_result = check_result
@mc = rpcclient(agent, :exit_on_failure => false)
@mc.progress = false
unless @nodes.nil?
@mc.discover(:nodes => @nodes)
end
end
def method_missing(method, *args)
res = @mc.send(method, *args)
unless method == :discover
check_mcollective_result(method, res) if @check_result
else
@nodes = args[0][:nodes]
end
return res
end
private
def check_mcollective_result(method, stats)
# Following error might happen because of misconfiguration, ex. direct_addressing = 1 only on client
raise "#{@task_id}: MCollective client failed to call agent '#{@agent}' with method '#{method}' and didn't even return anything. Check logs." if stats.length == 0
if stats.length < @nodes.length
# some nodes didn't respond
nodes_responded = stats.map { |n| n.results[:sender] }
not_responded = @nodes - nodes_responded
raise "#{@task_id}: MCollective agents '#{not_responded.join(',')}' didn't respond."
end
# TODO: should we collect all errors and make one exception with all of data?
stats.each do |node|
status = node.results[:statuscode]
if status != 0
raise "#{@task_id}: MCollective call failed in agent '#{node.agent}', method '#{method}', results: #{node.results.inspect}"
else
Astute.logger.debug "#{@task_id}: MC agent '#{node.agent}', method '#{method}' succeeded, results: #{node.results.inspect}"
end
end
end
end
end
require 'mcollective'
module Astute
class MClient
include MCollective::RPC
attr_accessor :retries
def initialize(ctx, agent, nodes=nil, check_result=true, timeout=nil)
@task_id = ctx.task_id
@agent = agent
@nodes = nodes.map { |n| n.to_s }
@check_result = check_result
@mc = rpcclient(agent, :exit_on_failure => false)
@mc.timeout = timeout if timeout
@mc.progress = false
@retries = Astute.config.MC_RETRIES
unless @nodes.nil?
@mc.discover(:nodes => @nodes)
end
end
def method_missing(method, *args)
res = @mc.send(method, *args)
if method == :discover
@nodes = args[0][:nodes]
return res
end
# Enable if needed. In normal case it eats the screen pretty fast
log_result(res, method)
return res unless @check_result
err_msg = ''
# Following error might happen because of misconfiguration, ex. direct_addressing = 1 only on client
# or.. could be just some hang? Let's retry if @retries is set
if res.length < @nodes.length
# some nodes didn't respond
retry_index = 1
while retry_index <= @retries
sleep rand
nodes_responded = res.map { |n| n.results[:sender] }
not_responded = @nodes - nodes_responded
Astute.logger.debug "Retry ##{retry_index} to run mcollective agent on nodes: '#{not_responded.join(',')}'"
@mc.discover(:nodes => not_responded)
new_res = @mc.send(method, *args)
log_result(new_res, method)
# new_res can have some nodes which finally responded
res += new_res
break if res.length == @nodes.length
retry_index += 1
end
if res.length < @nodes.length
nodes_responded = res.map { |n| n.results[:sender] }
not_responded = @nodes - nodes_responded
err_msg += "#{@task_id}: MCollective agents '#{not_responded.join(',')}' didn't respond.\n"
end
end
failed = res.select { |x| x.results[:statuscode] != 0 }
if failed.any?
err_msg += "#{@task_id}: MCollective call failed in agent '#{@agent}', "\
"method '#{method}', failed nodes: #{failed.map{|x| x.results[:sender]}.join(',')}"
end
raise err_msg unless err_msg.empty?
return res
end
private
def log_result(result, method)
result.each do |node|
Astute.logger.debug "#{@task_id}: MC agent '#{node.agent}', method '#{method}', "\
"results: #{node.results.inspect}"
end
end
end
end

View File

@ -1,22 +1,15 @@
require 'json'
module Astute
module Metadata
def self.publish_facts(ctx, nodes)
if nodes.empty?
Astute.logger.info "#{ctx.task_id}: Nodes to post metadata into are not provided. Do nothing."
return false
end
uids = nodes.map {|n| n['uid']}
Astute.logger.debug "#{ctx.task_id}: nailyfact - storing metadata for nodes: #{uids.join(',')}"
nodes.each do |node|
nailyfact = MClient.new(ctx, "nailyfact", [node['uid']])
metadata = {'role' => node['role']}
# This is synchronious RPC call, so we are sure that data were sent and processed remotely
stats = nailyfact.post(:value => metadata.to_json)
end
end
end
end
require 'json'
require 'ipaddr'
module Astute
module Metadata
def self.publish_facts(ctx, uid, metadata)
# This is synchronious RPC call, so we are sure that data were sent and processed remotely
Astute.logger.info "#{ctx.task_id}: nailyfact - storing metadata for node uid=#{uid}"
Astute.logger.debug "#{ctx.task_id}: nailyfact stores metadata: #{metadata.inspect}"
nailyfact = MClient.new(ctx, "nailyfact", [uid])
# TODO(mihgen) check results!
stats = nailyfact.post(:value => metadata.to_json)
end
end
end

View File

@ -1,24 +1,48 @@
module Astute
module Network
def self.check_network(ctx, nodes, networks)
if nodes.length < 2
Astute.logger.info "#{ctx.task_id}: Network checker: at least two nodes are required to check network connectivity. Do nothing."
return []
end
uids = nodes.map {|n| n['uid']}
# TODO Everything breakes if agent not found. We have to handle that
net_probe = MClient.new(ctx, "net_probe", uids)
net_probe.start_frame_listeners(:iflist => ['eth0'].to_json)
# Interface name is hardcoded for now. Later we expect it to be passed from Nailgun backend
data_to_send = {'eth0' => networks.map {|n| n['vlan_id']}.join(',')}
net_probe.send_probing_frames(:interfaces => data_to_send.to_json)
stats = net_probe.get_probing_info
result = stats.map {|node| {'sender' => node.results[:sender], 'data' => node.results[:data]} }
Astute.logger.debug "#{ctx.task_id}: Network checking is done. Raw results: #{result.inspect}"
return result
end
end
end
module Astute
module Network
def self.check_network(ctx, nodes, networks)
if nodes.empty?
Astute.logger.error "#{ctx.task_id}: Network checker: nodes list is empty. Nothing to check."
return {'status' => 'error', 'error' => "Nodes list is empty. Nothing to check."}
elsif nodes.size == 1
Astute.logger.info "#{ctx.task_id}: Network checker: nodes list contains one node only. Do nothing."
return {'nodes' =>
[{'uid'=>nodes[0]['uid'],
'networks'=>[{'vlans' => networks.map {|n| n['vlan_id'].to_i}, 'iface'=>'eth0'}]
}]
}
end
uids = nodes.map {|n| n['uid']}
# TODO Everything breakes if agent not found. We have to handle that
net_probe = MClient.new(ctx, "net_probe", uids)
net_probe.start_frame_listeners(:iflist => ['eth0'].to_json)
ctx.reporter.report({'progress' => 30, 'status' => 'verification'})
# Interface name is hardcoded for now. Later we expect it to be passed from Nailgun backend
data_to_send = {'eth0' => networks.map {|n| n['vlan_id']}.join(',')}
net_probe.send_probing_frames(:interfaces => data_to_send.to_json)
ctx.reporter.report({'progress' => 60, 'status' => 'verification'})
stats = net_probe.get_probing_info
result = stats.map {|node| {'uid' => node.results[:sender],
'networks' => check_vlans_by_traffic(
node.results[:sender],
node.results[:data][:neighbours])} }
Astute.logger.debug "#{ctx.task_id}: Network checking is done. Results: #{result.inspect}"
return {'nodes' => result}
end
private
def self.check_vlans_by_traffic(uid, data)
return data.map{|iface, vlans| {
'iface' => iface,
'vlans' =>
vlans.reject{|k,v|
v.size==1 and v.has_key?(uid)
}.keys.map{|n| n.to_i}
}
}
end
end
end

View File

@ -0,0 +1,46 @@
module Astute
class NodeRemoval
def remove(ctx, nodes)
# TODO(mihgen): 1. Nailgun should process node error message
# 2. Should we rename nodes -> removed_nodes array?
# 3. If exception is raised here, we should not fully fall into error, but only failed node
# 4. If check_result=false, do we try to remove only once? We have to try a few times..
if nodes.empty?
Astute.logger.info "#{ctx.task_id}: Nodes to remove are not provided. Do nothing."
return {'nodes' => []}
end
uids = nodes.map {|n| n['uid'].to_s}
Astute.logger.info "#{ctx.task_id}: Starting removing of nodes: #{uids.inspect}"
remover = MClient.new(ctx, "erase_node", uids, check_result=false)
result = remover.erase_node(:reboot => true)
Astute.logger.debug "#{ctx.task_id}: Data received from nodes: #{result.inspect}"
inaccessible_uids = uids - result.map {|n| n.results[:sender]}
error_nodes = []
erased_nodes = []
result.each do |n|
if n.results[:statuscode] != 0
error_nodes << {'uid' => n.results[:sender],
'error' => "RPC agent 'erase_node' failed. "\
"Result: #{n.results.inspect}"}
elsif not n.results[:data][:rebooted]
error_nodes << {'uid' => n.results[:sender],
'error' => "RPC method 'erase_node' failed with "\
"message: #{n.results[:data][:error_msg]}"}
else
erased_nodes << {'uid' => n.results[:sender]}
end
end
error_nodes.concat(inaccessible_uids.map {|n| {'uid' => n,
'error' => "Node not answered by RPC."}})
if error_nodes.empty?
answer = {'nodes' => erased_nodes}
else
answer = {'status' => 'error', 'nodes' => erased_nodes, 'error_nodes' => error_nodes}
Astute.logger.error "#{ctx.task_id}: Removing of nodes #{uids.inspect} finished "\
"with errors: #{error_nodes.inspect}"
end
Astute.logger.info "#{ctx.task_id}: Finished removing of nodes: #{uids.inspect}"
return answer
end
end
end

View File

@ -1,106 +1,44 @@
module Astute
class Orchestrator
def initialize
@deployer = Astute::Deployer.method(:puppet_deploy_with_polling)
@metapublisher = Astute::Metadata.method(:publish_facts)
@check_network = Astute::Network.method(:check_network)
end
def node_type(reporter, task_id, nodes)
context = Context.new(task_id, reporter)
uids = nodes.map {|n| n['uid']}
systemtype = MClient.new(context, "systemtype", uids, check_result=false)
systems = systemtype.get_type
return systems.map {|n| {'uid' => n.results[:sender], 'node_type' => n.results[:data][:node_type].chomp}}
end
def deploy(reporter, task_id, nodes)
context = Context.new(task_id, reporter)
ctrl_nodes = nodes.select {|n| n['role'] == 'controller'}
for node in ctrl_nodes do
deploy_piece(context, node, false)
end
reporter.report({'progress' => 40})
compute_nodes = nodes.select {|n| n['role'] == 'compute'}
deploy_piece(context, compute_nodes, false)
reporter.report({'progress' => 60})
# other_nodes = nodes - ctrl_nodes - compute_nodes
# deploy_piece(context, other_nodes)
return
end
def remove_nodes(reporter, task_id, nodes)
context = Context.new(task_id, reporter)
result = simple_remove_nodes(context, nodes)
return result
end
def verify_networks(reporter, task_id, nodes, networks)
context = Context.new(task_id, reporter)
result = @check_network.call(context, nodes, networks)
result.map! { |node| {'uid' => node['sender'],
'networks' => check_vlans_by_traffic(node['data'][:neighbours]) }
}
return {'networks' => result}
end
def deploy_piece(ctx, nodes, publish_role_in_fact)
nodes_roles = nodes.map { |n| { n['uid'] => n['role'] } }
Astute.logger.info "#{ctx.task_id}: Starting deployment of nodes => roles: #{nodes_roles.inspect}"
ctx.reporter.report nodes_status(nodes, 'deploying')
if publish_role_in_fact
@metapublisher.call(ctx, nodes)
end
@deployer.call(ctx, nodes)
ctx.reporter.report nodes_status(nodes, 'ready')
Astute.logger.info "#{ctx.task_id}: Finished deployment of nodes => roles: #{nodes_roles.inspect}"
end
private
def simple_remove_nodes(ctx, nodes)
if nodes.empty?
Astute.logger.info "#{ctx.task_id}: Nodes to remove are not provided. Do nothing."
return {'nodes' => nodes}
end
uids = nodes.map {|n| n['uid']}
Astute.logger.info "#{ctx.task_id}: Starting removing of nodes: #{uids.inspect}"
remover = MClient.new(ctx, "erase_node", uids, check_result=false)
result = remover.erase_node(:reboot => true)
Astute.logger.debug "#{ctx.task_id}: Data resieved from nodes: #{result.inspect}"
inaccessible_uids = uids - result.map {|n| n.results[:sender]}
error_nodes = []
erased_nodes = []
result.each do |n|
if n.results[:statuscode] != 0
error_nodes << {'uid' => n.results[:sender],
'error' => "RPC agent 'erase_node' failed. Result: #{n.results.inspect}"}
elsif not n.results[:data][:rebooted]
error_nodes << {'uid' => n.results[:sender],
'error' => "RPC method 'erase_node' failed with message: #{n.results[:data][:error_msg]}"}
else
erased_nodes << {'uid' => n.results[:sender]}
end
end
error_nodes.concat(inaccessible_uids.map {|n| {'uid' => n, 'error' => "Node not answered by RPC."}})
if error_nodes.empty?
answer = {'nodes' => erased_nodes}
else
answer = {'status' => 'error', 'nodes' => erased_nodes, 'error_nodes' => error_nodes}
Astute.logger.error "#{ctx.task_id}: Removing of nodes #{uids.inspect} ends with errors: #{error_nodes.inspect}"
end
Astute.logger.info "#{ctx.task_id}: Finished removing of nodes: #{uids.inspect}"
return answer
end
def nodes_status(nodes, status)
{'nodes' => nodes.map { |n| {'uid' => n['uid'], 'status' => status} }}
end
def check_vlans_by_traffic(data)
return data.map{|iface, vlans| {'iface' => iface, 'vlans' => vlans.keys.map{|n| n.to_i} } }
end
end
end
module Astute
class Orchestrator
def initialize(deploy_engine=nil, log_parsing=false)
@deploy_engine = deploy_engine ||= Astute::DeploymentEngine::NailyFact
if log_parsing
@log_parser = LogParser::ParseNodeLogs.new('puppet-agent.log')
else
@log_parser = LogParser::NoParsing.new
end
end
def node_type(reporter, task_id, nodes, timeout=nil)
context = Context.new(task_id, reporter)
uids = nodes.map {|n| n['uid']}
systemtype = MClient.new(context, "systemtype", uids, check_result=false, timeout)
systems = systemtype.get_type
return systems.map {|n| {'uid' => n.results[:sender],
'node_type' => n.results[:data][:node_type].chomp}}
end
def deploy(up_reporter, task_id, nodes, attrs)
raise "Nodes to deploy are not provided!" if nodes.empty?
# Following line fixes issues with uids: it should always be string
nodes.map { |x| x['uid'] = x['uid'].to_s }
proxy_reporter = ProxyReporter.new(up_reporter)
context = Context.new(task_id, proxy_reporter, @log_parser)
deploy_engine_instance = @deploy_engine.new(context)
Astute.logger.info "Using #{deploy_engine_instance.class} for deployment."
deploy_engine_instance.deploy(nodes, attrs)
end
def remove_nodes(reporter, task_id, nodes)
context = Context.new(task_id, reporter)
node_removal = NodeRemoval.new
return node_removal.remove(context, nodes)
end
def verify_networks(reporter, task_id, nodes, networks)
context = Context.new(task_id, reporter)
result = Network.check_network(context, nodes, networks)
return result
end
end
end

View File

@ -0,0 +1,142 @@
require 'json'
require 'timeout'
module Astute
module PuppetdDeployer
private
# Runs puppetd.runonce only if puppet is not running on the host at the time
# If it does running, it waits a bit and tries again.
# Returns list of nodes uids which appear to be with hung puppet.
def self.puppetd_runonce(puppetd, uids)
started = Time.now.to_i
while Time.now.to_i - started < Astute.config.PUPPET_FADE_TIMEOUT
puppetd.discover(:nodes => uids)
last_run = puppetd.last_run_summary
running = last_run.select {|x| x.results[:data][:status] == 'running'}.map {|n| n.results[:sender]}
not_running = uids - running
if not_running.any?
puppetd.discover(:nodes => not_running)
puppetd.runonce
end
uids = running
break if uids.empty?
sleep Astute.config.PUPPET_FADE_INTERVAL
end
Astute.logger.debug "puppetd_runonce completed within #{Time.now.to_i - started} seconds."
Astute.logger.debug "Following nodes have puppet hung: '#{running.join(',')}'" if running.any?
return running
end
def self.calc_nodes_status(last_run, prev_run)
# Finished are those which are not in running state,
# and changed their last_run time, which is changed after application of catalog,
# at the time of updating last_run_summary file. At that particular time puppet is
# still running, and will finish in a couple of seconds.
finished = last_run.select {|x| x.results[:data][:time]['last_run'] !=
prev_run.select {|ps|
ps.results[:sender] == x.results[:sender]
}[0].results[:data][:time]['last_run'] and x.results[:data][:status] != 'running'}
# Looking for error_nodes among only finished - we don't bother previous failures
error_nodes = finished.select { |n|
n.results[:data][:resources]['failed'] != 0}.map {|x| x.results[:sender]}
succeed_nodes = finished.select { |n|
n.results[:data][:resources]['failed'] == 0}.map {|x| x.results[:sender]}
# Running are all which didn't appear in finished
running_nodes = last_run.map {|n| n.results[:sender]} - finished.map {|n| n.results[:sender]}
nodes_to_check = running_nodes + succeed_nodes + error_nodes
unless nodes_to_check.size == last_run.size
raise "Should never happen. Internal error in nodes statuses calculation. Statuses calculated for: #{nodes_to_check.inspect},"
"nodes passed to check statuses of: #{last_run.map {|n| n.results[:sender]}}"
end
return {'succeed' => succeed_nodes, 'error' => error_nodes, 'running' => running_nodes}
end
public
def self.deploy(ctx, nodes, retries=2, ignore_failure=false)
# TODO: can we hide retries, ignore_failure into @ctx ?
uids = nodes.map {|n| n['uid']}
# TODO(mihgen): handle exceptions from mclient, raised if agent does not respond or responded with error
puppetd = MClient.new(ctx, "puppetd", uids)
prev_summary = puppetd.last_run_summary
# Keep info about retries for each node
node_retries = {}
uids.each {|x| node_retries.merge!({x => retries}) }
begin
ctx.deploy_log_parser.add_separator(nodes)
rescue Exception => e
Astute.logger.warn "Some error occurred when add separator to logs: #{e.message}, trace: #{e.backtrace.inspect}"
end
Astute.logger.debug "Waiting for puppet to finish deployment on all nodes (timeout = #{Astute.config.PUPPET_TIMEOUT} sec)..."
time_before = Time.now
Timeout::timeout(Astute.config.PUPPET_TIMEOUT) do
puppetd_runonce(puppetd, uids)
nodes_to_check = uids
last_run = prev_summary
while nodes_to_check.any?
calc_nodes = calc_nodes_status(last_run, prev_summary)
Astute.logger.debug "Nodes statuses: #{calc_nodes.inspect}"
# At least we will report about successfully deployed nodes
nodes_to_report = calc_nodes['succeed'].map { |n| {'uid' => n, 'status' => 'ready'} }
# Process retries
nodes_to_retry = []
calc_nodes['error'].each do |uid|
if node_retries[uid] > 0
node_retries[uid] -= 1
Astute.logger.debug "Puppet on node #{uid.inspect} will be restarted. "\
"#{node_retries[uid]} retries remained."
nodes_to_retry << uid
else
Astute.logger.debug "Node #{uid.inspect} has failed to deploy. There is no more retries for puppet run."
nodes_to_report << {'uid' => uid, 'status' => 'error', 'error_type' => 'deploy'} unless ignore_failure
end
end
if nodes_to_retry.any?
Astute.logger.info "Retrying to run puppet for following error nodes: #{nodes_to_retry.join(',')}"
puppetd_runonce(puppetd, nodes_to_retry)
# We need this magic with prev_summary to reflect new puppetd run statuses..
prev_summary.delete_if { |x| nodes_to_retry.include?(x.results[:sender]) }
prev_summary += last_run.select { |x| nodes_to_retry.include?(x.results[:sender]) }
end
# /end of processing retries
if calc_nodes['running'].any?
begin
# Pass nodes because logs calculation needs IP address of node, not just uid
nodes_progress = ctx.deploy_log_parser.progress_calculate(calc_nodes['running'], nodes)
if nodes_progress.any?
Astute.logger.debug "Got progress for nodes: #{nodes_progress.inspect}"
# Nodes with progress are running, so they are not included in nodes_to_report yet
nodes_progress.map! {|x| x.merge!({'status' => 'deploying'})}
nodes_to_report += nodes_progress
end
rescue Exception => e
Astute.logger.warn "Some error occurred when parse logs for nodes progress: #{e.message}, "\
"trace: #{e.backtrace.inspect}"
end
end
ctx.reporter.report('nodes' => nodes_to_report) if nodes_to_report.any?
# we will iterate only over running nodes and those that we restart deployment for
nodes_to_check = calc_nodes['running'] + nodes_to_retry
break if nodes_to_check.empty?
sleep Astute.config.PUPPET_DEPLOY_INTERVAL
puppetd.discover(:nodes => nodes_to_check)
last_run = puppetd.last_run_summary
end
end
time_spent = Time.now - time_before
Astute.logger.info "#{ctx.task_id}: Spent #{time_spent} seconds on puppet run "\
"for following nodes(uids): #{nodes.map {|n| n['uid']}.join(',')}"
end
end
end

View File

@ -0,0 +1,94 @@
require 'set'
STATES = {'offline' => 0,
'discover' => 10,
'verification' => 20,
'provisioning' => 30,
'provisioned' => 40,
'deploying' => 50,
'ready' => 60,
'error' => 70}
module Astute
class ProxyReporter
def initialize(up_reporter)
@up_reporter = up_reporter
@nodes = []
end
def report(data)
nodes_to_report = []
nodes = (data['nodes'] or [])
nodes.each do |node|
node = validate(node)
node_here = @nodes.select {|x| x['uid'] == node['uid']}
if node_here.empty?
nodes_to_report << node
next
end
node_here = node_here[0]
# We need to update node here only if progress is greater, or status changed
if node_here.eql?(node)
next
end
unless node['status'].nil?
node_here_state = (STATES[node_here['status']] or 0)
if STATES[node['status']] < node_here_state
Astute.logger.error("Attempt to assign lower status detected: "\
"Status was: #{node_here['status']}, attempted to "\
"assign: #{node['status']}. Skipping this node (id=#{node['uid']})")
next
end
end
nodes_to_report << node
end
# Let's report only if nodes updated
if nodes_to_report.any?
data['nodes'] = nodes_to_report
@up_reporter.report(data)
# Replacing current list of nodes with the updated one, keeping not updated elements
uids = nodes_to_report.map {|x| x['uid']}
@nodes.delete_if {|x| uids.include?(x['uid'])}
@nodes.concat(nodes_to_report)
end
end
private
def validate(node)
err = ''
unless node['status'].nil?
err += "Status provided #{node['status']} is not supported." if STATES[node['status']].nil?
end
unless node['uid']
err += "node uid is not provided."
end
unless node['progress'].nil?
err = "progress value provided, but no status." if node['status'].nil?
end
raise "Validation of node: #{node.inspect} for report failed: #{err}" if err.any?
unless node['progress'].nil?
if node['progress'] > 100
Astute.logger.error("Passed report for node with progress > 100: "\
"#{node.inspect}. Adjusting progress to 100.")
node['progress'] = 100
end
unless node['status'].nil?
if node['status'] == 'ready' and node['progress'] != 100
Astute.logger.error("In ready state node should have progress 100, "\
"but node passed: #{node.inspect}. Setting it to 100")
node['progress'] = 100
end
if node['status'] == 'verification'
# FIXME(mihgen): Currently our backend doesn't support such status. So let's just remove it...
node.delete('status')
end
end
end
return node
end
end
end

View File

@ -0,0 +1,27 @@
require 'json'
require 'timeout'
module Astute
module RpuppetDeployer
def self.rpuppet_deploy(ctx, nodes, parameters, classes, env="production")
if nodes.empty?
Astute.logger.info "#{ctx.task_id}: Nodes to deploy are not provided. Do nothing."
return false
end
uids = nodes.map {|n| n['uid']}
rpuppet = MClient.new(ctx, "rpuppet", uids)
data = {"parameters" => parameters,
"classes" => classes,
"environment" => env}
Astute.logger.debug "Waiting for puppet to finish deployment on all nodes (timeout = #{Astute.config.PUPPET_TIMEOUT} sec)..."
time_before = Time.now
Timeout::timeout(Astute.config.PUPPET_TIMEOUT) do
rpuppet.run(:data => data.to_json)
end
time_spent = Time.now - time_before
Astute.logger.info "#{ctx.task_id}: Spent #{time_spent} seconds on puppet run for following nodes(uids): #{nodes.map {|n| n['uid']}.join(',')}"
end
end
end

View File

@ -1,11 +1,10 @@
/:=$(BUILD_DIR)/gems/
$/astute-0.0.1.gem: astute/astute.gemspec \
$(addprefix astute/bin/,$(call find-files,astute/bin)) \
$(addprefix astute/lib/,$(call find-files,astute/lib)) \
$(addprefix astute/spec/,$(call find-files,astute/spec))
@mkdir -p $(@D)
cd $(<D) && \
gem build $(<F)
mv $(<D)/astute-*.gem $@
ASTUTE_VERSION:=$(shell ruby -e "require '$(SOURCE_DIR)/astute/lib/astute/version.rb'; puts Astute::VERSION")
$(BUILD_DIR)/packages/gems/astute-$(ASTUTE_VERSION).gem: \
$(SOURCE_DIR)/astute/astute.gemspec \
$(call find-files,astute/bin) \
$(call find-files,astute/lib) \
$(call find-files,astute/spec)
@mkdir -p $(@D)
cd $(SOURCE_DIR)/astute && gem build astute.gemspec
mv $(SOURCE_DIR)/astute/astute-$(ASTUTE_VERSION).gem $@

View File

@ -0,0 +1,474 @@
2013-01-23T09:24:16 info: 09:23:58,565 INFO : kernel command line: initrd=/images/centos63-x86_64/initrd.img ksdevice=bootif lang= locale=en_US text priority=critical kssendmac ks=http://10.0.168.2/cblr/svc/op/ks/system/slave-1 BOOT_IMAGE=/images/centos63-x86_64/vmlinuz BOOTIF=01-52-54-00-9a-db-f8
2013-01-23T09:24:16 info:
2013-01-23T09:24:16 info: 09:23:58,565 INFO : text mode forced from cmdline
2013-01-23T09:24:16 debug: 09:23:58,565 DEBUG : readNetInfo /tmp/s390net not found, early return
2013-01-23T09:24:16 info: 09:23:58,565 INFO : anaconda version 13.21.176 on x86_64 starting
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module ipv6
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module iscsi_ibft
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module iscsi_boot_sysfs
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module pcspkr
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module edd
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module floppy
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module iscsi_tcp
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module libiscsi_tcp
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module libiscsi
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module scsi_transport_iscsi
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module squashfs
2013-01-23T09:24:16 debug: 09:23:58,729 DEBUG : Saving module cramfs
2013-01-23T09:24:16 debug: 09:23:58,730 DEBUG : probing buses
2013-01-23T09:24:16 debug: 09:23:58,863 DEBUG : waiting for hardware to initialize
2013-01-23T09:24:16 debug: 09:24:01,290 DEBUG : probing buses
2013-01-23T09:24:16 debug: 09:24:01,412 DEBUG : waiting for hardware to initialize
2013-01-23T09:24:16 info: 09:24:04,507 INFO : getting kickstart file
2013-01-23T09:24:16 info: 09:24:04,530 INFO : doing kickstart... setting it up
2013-01-23T09:24:16 debug: 09:24:04,531 DEBUG : activating device eth0
2013-01-23T09:24:16 info: 09:24:10,548 INFO : wait_for_iface_activation (2309): device eth0 activated
2013-01-23T09:24:16 info: 09:24:10,550 INFO : file location: http://10.0.168.2/cblr/svc/op/ks/system/slave-1
2013-01-23T09:24:16 info: 09:24:10,551 INFO : transferring http://10.0.168.2/cblr/svc/op/ks/system/slave-1
2013-01-23T09:24:16 info: 09:24:11,511 INFO : setting up kickstart
2013-01-23T09:24:16 info: 09:24:11,511 INFO : kickstart forcing text mode
2013-01-23T09:24:16 info: 09:24:11,511 INFO : kickstartFromUrl
2013-01-23T09:24:16 info: 09:24:11,511 INFO : results of url ks, url http://10.0.168.2:8080/centos/6.3/nailgun/x86_64
2013-01-23T09:24:16 err: 09:24:11,512 ERROR : got to setupCdrom without a CD device
2013-01-23T09:24:16 info: 09:24:11,512 INFO : no stage2= given, assuming http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/install.img
2013-01-23T09:24:16 debug: 09:24:11,512 DEBUG : going to set language to en_US.UTF-8
2013-01-23T09:24:16 info: 09:24:11,512 INFO : setting language to en_US.UTF-8
2013-01-23T09:24:16 info: 09:24:11,551 INFO : starting STEP_METHOD
2013-01-23T09:24:16 debug: 09:24:11,551 DEBUG : loaderData->method is set, adding skipMethodDialog
2013-01-23T09:24:16 debug: 09:24:11,551 DEBUG : skipMethodDialog is set
2013-01-23T09:24:16 info: 09:24:11,560 INFO : starting STEP_STAGE2
2013-01-23T09:24:16 info: 09:24:11,560 INFO : URL_STAGE_MAIN: url is http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/install.img
2013-01-23T09:24:16 info: 09:24:11,560 INFO : transferring http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/updates.img
2013-01-23T09:24:16 err: 09:24:11,563 ERROR : Error downloading http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/updates.img: HTTP response code said error
2013-01-23T09:24:16 info: 09:24:11,565 INFO : transferring http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/product.img
2013-01-23T09:24:16 err: 09:24:11,568 ERROR : Error downloading http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/product.img: HTTP response code said error
2013-01-23T09:24:16 info: 09:24:11,569 INFO : transferring http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/install.img
2013-01-23T09:24:16 info: 09:24:12,077 INFO : mounted loopback device /mnt/runtime on /dev/loop0 as /tmp/install.img
2013-01-23T09:24:16 info: 09:24:12,078 INFO : got stage2 at url http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/install.img
2013-01-23T09:24:16 info: 09:24:12,133 INFO : Loading SELinux policy
2013-01-23T09:24:16 info: 09:24:13,072 INFO : getting ready to spawn shell now
2013-01-23T09:24:16 info: 09:24:13,436 INFO : Running anaconda script /usr/bin/anaconda
2013-01-23T09:24:16 info: 09:24:16,109 INFO : CentOS Linux is the highest priority installclass, using it
2013-01-23T09:24:16 warning: 09:24:16,164 WARNING : /usr/lib/python2.6/site-packages/pykickstart/parser.py:713: DeprecationWarning: Script does not end with %end. This syntax has been deprecated. It may be removed from future releases, which will result in a fatal error from kickstart. Please modify your kickstart file to use this updated syntax.
2013-01-23T09:24:17 info: warnings.warn(_("%s does not end with %%end. This syntax has been deprecated. It may be removed from future releases, which will result in a fatal error from kickstart. Please modify your kickstart file to use this updated syntax.") % _("Script"), DeprecationWarning)
2013-01-23T09:24:17 info:
2013-01-23T09:24:17 info: 09:24:16,164 INFO : Running kickstart %%pre script(s)
2013-01-23T09:24:17 warning: 09:24:16,165 WARNING : '/bin/sh' specified as full path
2013-01-23T09:24:17 info: 09:24:17,369 INFO : All kickstart %%pre script(s) have been run
2013-01-23T09:24:17 info: 09:24:17,441 INFO : ISCSID is /usr/sbin/iscsid
2013-01-23T09:24:17 info: 09:24:17,442 INFO : no initiator set
2013-01-23T09:24:17 warning: 09:24:17,646 WARNING : '/usr/libexec/fcoe/fcoe_edd.sh' specified as full path
2013-01-23T09:24:18 info: 09:24:17,674 INFO : No FCoE EDD info found: No FCoE boot disk information is found in EDD!
2013-01-23T09:24:18 info:
2013-01-23T09:24:18 info: 09:24:17,674 INFO : no /etc/zfcp.conf; not configuring zfcp
2013-01-23T09:24:18 info: 09:24:17,776 INFO : created new libuser.conf at /tmp/libuser.JtvFQd with instPath="/mnt/sysimage"
2013-01-23T09:24:18 info: 09:24:17,777 INFO : anaconda called with cmdline = ['/usr/bin/anaconda', '--stage2', 'http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/images/install.img', '--kickstart', '/tmp/ks.cfg', '-T', '--selinux', '--lang', 'en_US.UTF-8', '--keymap', 'us', '--repo', 'http://10.0.168.2:8080/centos/6.3/nailgun/x86_64']
2013-01-23T09:24:18 info: 09:24:17,777 INFO : Display mode = t
2013-01-23T09:24:18 info: 09:24:17,777 INFO : Default encoding = utf-8
2013-01-23T09:24:18 info: 09:24:17,898 INFO : Detected 752M of memory
2013-01-23T09:24:18 info: 09:24:17,899 INFO : Swap attempt of 1504M
2013-01-23T09:24:18 info: 09:24:18,372 INFO : ISCSID is /usr/sbin/iscsid
2013-01-23T09:24:18 info: 09:24:18,373 INFO : no initiator set
2013-01-23T09:24:19 warning: 09:24:18,893 WARNING : Timezone UTC set in kickstart is not valid.
2013-01-23T09:24:19 info: 09:24:19,012 INFO : Detected 752M of memory
2013-01-23T09:24:19 info: 09:24:19,012 INFO : Swap attempt of 1504M
2013-01-23T09:24:19 info: 09:24:19,064 INFO : setting installation environment hostname to slave-1.mirantis.com
2013-01-23T09:24:19 warning: 09:24:19,076 WARNING : step installtype does not exist
2013-01-23T09:24:19 warning: 09:24:19,076 WARNING : step confirminstall does not exist
2013-01-23T09:24:19 warning: 09:24:19,077 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,077 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,077 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,077 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,078 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,078 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,078 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,078 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,079 WARNING : step complete does not exist
2013-01-23T09:24:19 warning: 09:24:19,079 WARNING : step complete does not exist
2013-01-23T09:24:19 info: 09:24:19,080 INFO : moving (1) to step setuptime
2013-01-23T09:24:19 debug: 09:24:19,081 DEBUG : setuptime is a direct step
2013-01-23T09:24:19 warning: 09:24:19,081 WARNING : '/usr/sbin/hwclock' specified as full path
2013-01-23T09:24:20 info: 09:24:20,002 INFO : leaving (1) step setuptime
2013-01-23T09:24:20 info: 09:24:20,003 INFO : moving (1) to step autopartitionexecute
2013-01-23T09:24:20 debug: 09:24:20,003 DEBUG : autopartitionexecute is a direct step
2013-01-23T09:24:20 info: 09:24:20,143 INFO : leaving (1) step autopartitionexecute
2013-01-23T09:24:20 info: 09:24:20,143 INFO : moving (1) to step storagedone
2013-01-23T09:24:20 debug: 09:24:20,144 DEBUG : storagedone is a direct step
2013-01-23T09:24:20 info: 09:24:20,144 INFO : leaving (1) step storagedone
2013-01-23T09:24:20 info: 09:24:20,144 INFO : moving (1) to step enablefilesystems
2013-01-23T09:24:20 debug: 09:24:20,144 DEBUG : enablefilesystems is a direct step
2013-01-23T09:25:01 debug: 09:25:00,646 DEBUG : notifying kernel of 'change' event on device /sys/class/block/vda1
2013-01-23T09:25:01 info: 09:25:01,684 INFO : failed to set SELinux context for /mnt/sysimage: [Errno 95] Operation not supported
2013-01-23T09:25:01 debug: 09:25:01,684 DEBUG : isys.py:mount()- going to mount /dev/vda1 on /mnt/sysimage as ext4 with options defaults
2013-01-23T09:25:01 debug: 09:25:01,704 DEBUG : isys.py:mount()- going to mount //dev on /mnt/sysimage/dev as bind with options defaults,bind
2013-01-23T09:25:01 debug: 09:25:01,715 DEBUG : isys.py:mount()- going to mount devpts on /mnt/sysimage/dev/pts as devpts with options gid=5,mode=620
2013-01-23T09:25:02 debug: 09:25:01,728 DEBUG : isys.py:mount()- going to mount tmpfs on /mnt/sysimage/dev/shm as tmpfs with options defaults
2013-01-23T09:25:02 info: 09:25:01,742 INFO : failed to get default SELinux context for /proc: [Errno 2] No such file or directory
2013-01-23T09:25:02 debug: 09:25:01,742 DEBUG : isys.py:mount()- going to mount proc on /mnt/sysimage/proc as proc with options defaults
2013-01-23T09:25:02 info: 09:25:01,746 INFO : failed to get default SELinux context for /proc: [Errno 2] No such file or directory
2013-01-23T09:25:02 debug: 09:25:01,755 DEBUG : isys.py:mount()- going to mount sysfs on /mnt/sysimage/sys as sysfs with options defaults
2013-01-23T09:25:02 info: 09:25:01,762 INFO : leaving (1) step enablefilesystems
2013-01-23T09:25:02 info: 09:25:01,762 INFO : moving (1) to step bootloadersetup
2013-01-23T09:25:02 debug: 09:25:01,762 DEBUG : bootloadersetup is a direct step
2013-01-23T09:25:02 info: 09:25:01,765 INFO : leaving (1) step bootloadersetup
2013-01-23T09:25:02 info: 09:25:01,765 INFO : moving (1) to step reposetup
2013-01-23T09:25:02 debug: 09:25:01,766 DEBUG : reposetup is a direct step
2013-01-23T09:25:02 err: 09:25:01,779 ERROR : Error downloading treeinfo file: [Errno 14] PYCURL ERROR 22 - "The requested URL returned error: 404"
2013-01-23T09:25:02 err: 09:25:01,917 ERROR : Error downloading treeinfo file: [Errno 14] PYCURL ERROR 22 - "The requested URL returned error: 404"
2013-01-23T09:25:02 err: 09:25:01,921 ERROR : Error downloading treeinfo file: [Errno 14] PYCURL ERROR 22 - "The requested URL returned error: 404"
2013-01-23T09:25:02 info: 09:25:01,922 INFO : added repository Nailgun with URL http://10.0.168.2:8080/centos/6.3/nailgun/x86_64
2013-01-23T09:25:02 debug: 09:25:01,930 DEBUG : Grabbing http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/repodata/repomd.xml
2013-01-23T09:25:02 debug: 09:25:01,937 DEBUG : Grabbing http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/repodata/primary.xml.gz
2013-01-23T09:25:02 debug: 09:25:01,944 DEBUG : Grabbing http://10.0.168.2:8080/centos/6.3/nailgun/x86_64/repodata/comps.xml
2013-01-23T09:25:04 info: 09:25:04,547 INFO : leaving (1) step reposetup
2013-01-23T09:25:04 info: 09:25:04,547 INFO : moving (1) to step basepkgsel
2013-01-23T09:25:04 debug: 09:25:04,547 DEBUG : basepkgsel is a direct step
2013-01-23T09:25:04 warning: 09:25:04,665 WARNING : not adding Base group
2013-01-23T09:25:05 info: 09:25:04,810 INFO : leaving (1) step basepkgsel
2013-01-23T09:25:05 info: 09:25:04,811 INFO : moving (1) to step postselection
2013-01-23T09:25:05 debug: 09:25:04,811 DEBUG : postselection is a direct step
2013-01-23T09:25:05 info: 09:25:04,814 INFO : selected kernel package for kernel
2013-01-23T09:25:05 debug: 09:25:05,546 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/fs/ext4/ext4.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,546 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/fs/mbcache.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/fs/jbd2/jbd2.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/fcoe/fcoe.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/fcoe/libfcoe.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/libfc/libfc.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/scsi_transport_fc.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/scsi_tgt.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/xts.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/lrw.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,547 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/gf128mul.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,548 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/sha256_generic.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,548 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/cbc.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,548 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-crypt.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,548 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-round-robin.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,548 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-multipath.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,548 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-snapshot.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,548 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-mirror.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,548 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-region-hash.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-log.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-zero.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/dm-mod.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/linear.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/raid10.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/raid456.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/async_tx/async_raid6_recov.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/async_tx/async_pq.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/lib/raid6/raid6_pq.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/async_tx/async_xor.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,549 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/xor.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/async_tx/async_memcpy.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/crypto/async_tx/async_tx.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/raid1.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/md/raid0.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/infiniband/hw/mlx4/mlx4_ib.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/net/mlx4/mlx4_en.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/net/mlx4/mlx4_core.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/infiniband/ulp/ipoib/ib_ipoib.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/infiniband/core/ib_cm.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,550 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/infiniband/core/ib_sa.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/infiniband/core/ib_mad.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/infiniband/core/ib_core.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/block/virtio_blk.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/net/virtio_net.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/ata/pata_acpi.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/ata/ata_generic.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/ata/ata_piix.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/virtio/virtio_pci.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/virtio/virtio_ring.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/virtio/virtio.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/net/ipv6/ipv6.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,551 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/firmware/iscsi_ibft.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,552 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/iscsi_boot_sysfs.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,552 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/input/misc/pcspkr.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,552 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/firmware/edd.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,552 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/block/floppy.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,552 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/iscsi_tcp.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,552 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/libiscsi_tcp.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,552 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/libiscsi.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,552 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/drivers/scsi/scsi_transport_iscsi.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,553 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/fs/squashfs/squashfs.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,553 DEBUG : Checking for DUD module /lib/modules/2.6.32-279.el6.x86_64/kernel/fs/cramfs/cramfs.ko.gz
2013-01-23T09:25:05 debug: 09:25:05,553 DEBUG : selecting kernel-devel
2013-01-23T09:25:05 debug: 09:25:05,561 DEBUG : no package matching kernel-devel.x86_64
2013-01-23T09:25:05 debug: 09:25:05,571 DEBUG : no package matching authconfig
2013-01-23T09:25:05 debug: 09:25:05,580 DEBUG : no package matching system-config-firewall-base
2013-01-23T09:25:08 info: 09:25:08,036 INFO : leaving (1) step postselection
2013-01-23T09:25:08 info: 09:25:08,037 INFO : moving (1) to step install
2013-01-23T09:25:08 info: 09:25:08,039 INFO : leaving (1) step install
2013-01-23T09:25:08 info: 09:25:08,040 INFO : moving (1) to step preinstallconfig
2013-01-23T09:25:08 debug: 09:25:08,040 DEBUG : preinstallconfig is a direct step
2013-01-23T09:25:08 debug: 09:25:08,045 DEBUG : isys.py:mount()- going to mount /selinux on /mnt/sysimage/selinux as selinuxfs with options defaults
2013-01-23T09:25:08 debug: 09:25:08,055 DEBUG : isys.py:mount()- going to mount /proc/bus/usb on /mnt/sysimage/proc/bus/usb as usbfs with options defaults
2013-01-23T09:25:08 info: 09:25:08,069 INFO : copy_to_sysimage: source '/etc/multipath/wwids' does not exist.
2013-01-23T09:25:08 info: 09:25:08,069 INFO : copy_to_sysimage: source '/etc/multipath/bindings' does not exist.
2013-01-23T09:25:08 info: 09:25:08,081 INFO : copy_to_sysimage: source '/etc/multipath/wwids' does not exist.
2013-01-23T09:25:08 info: 09:25:08,081 INFO : copy_to_sysimage: source '/etc/multipath/bindings' does not exist.
2013-01-23T09:25:08 info: 09:25:08,086 INFO : leaving (1) step preinstallconfig
2013-01-23T09:25:08 info: 09:25:08,086 INFO : moving (1) to step installpackages
2013-01-23T09:25:08 debug: 09:25:08,086 DEBUG : installpackages is a direct step
2013-01-23T09:25:08 info: 09:25:08,087 INFO : Preparing to install packages
2013-01-23T09:25:10 info: Installing libgcc-4.4.6-4.el6.x86_64
2013-01-23T09:25:10 info: warning: libgcc-4.4.6-4.el6.x86_64: Header V3 RSA/SHA1 Signature, key ID c105b9de: NOKEY
2013-01-23T09:25:10 info: Installing setup-2.8.14-16.el6.noarch
2013-01-23T09:25:10 info: Installing filesystem-2.4.30-3.el6.x86_64
2013-01-23T09:25:11 info: Installing basesystem-10.0-4.el6.noarch
2013-01-23T09:25:11 info: Installing kernel-headers-2.6.32-279.19.1.el6.centos.plus.x86_64
2013-01-23T09:25:11 info: Installing ca-certificates-2010.63-3.el6_1.5.noarch
2013-01-23T09:25:11 info: Installing ncurses-base-5.7-3.20090208.el6.x86_64
2013-01-23T09:25:12 info: Installing tzdata-2012i-2.el6.noarch
2013-01-23T09:25:13 info: Installing glibc-common-2.12-1.80.el6_3.6.x86_64
2013-01-23T09:25:25 info: Installing nss-softokn-freebl-3.12.9-11.el6.x86_64
2013-01-23T09:25:25 info: Installing glibc-2.12-1.80.el6_3.6.x86_64
2013-01-23T09:25:28 info: Installing ncurses-libs-5.7-3.20090208.el6.x86_64
2013-01-23T09:25:28 info: Installing bash-4.1.2-9.el6_2.x86_64
2013-01-23T09:25:28 info: Installing libattr-2.4.44-7.el6.x86_64
2013-01-23T09:25:29 info: Installing libcap-2.16-5.5.el6.x86_64
2013-01-23T09:25:29 info: Installing zlib-1.2.3-27.el6.x86_64
2013-01-23T09:25:29 info: Installing info-4.13a-8.el6.x86_64
2013-01-23T09:25:29 info: Installing db4-4.7.25-17.el6.x86_64
2013-01-23T09:25:29 info: Installing libacl-2.2.49-6.el6.x86_64
2013-01-23T09:25:29 info: Installing audit-libs-2.2-2.el6.x86_64
2013-01-23T09:25:29 info: Installing libcom_err-1.41.12-12.el6.x86_64
2013-01-23T09:25:29 info: Installing nspr-4.9.1-2.el6_3.x86_64
2013-01-23T09:25:29 info: Installing popt-1.13-7.el6.x86_64
2013-01-23T09:25:29 info: Installing chkconfig-1.3.49.3-2.el6.x86_64
2013-01-23T09:25:29 info: Installing nss-util-3.13.5-1.el6_3.x86_64
2013-01-23T09:25:30 info: Installing bzip2-libs-1.0.5-7.el6_0.x86_64
2013-01-23T09:25:30 info: Installing libsepol-2.0.41-4.el6.x86_64
2013-01-23T09:25:30 info: Installing libselinux-2.0.94-5.3.el6.x86_64
2013-01-23T09:25:30 info: Installing shadow-utils-4.1.4.2-13.el6.x86_64
2013-01-23T09:25:30 info: Installing sed-4.2.1-10.el6.x86_64
2013-01-23T09:25:30 info: Installing glib2-2.22.5-7.el6.x86_64
2013-01-23T09:25:30 info: Installing gamin-0.1.10-9.el6.x86_64
2013-01-23T09:25:31 info: Installing libstdc++-4.4.6-4.el6.x86_64
2013-01-23T09:25:31 info: Installing gmp-4.3.1-7.el6_2.2.x86_64
2013-01-23T09:25:31 info: Installing readline-6.0-4.el6.x86_64
2013-01-23T09:25:31 info: Installing sqlite-3.6.20-1.el6.x86_64
2013-01-23T09:25:31 info: Installing file-libs-5.04-13.el6.x86_64
2013-01-23T09:25:31 info: Installing dbus-libs-1.2.24-7.el6_3.x86_64
2013-01-23T09:25:32 info: Installing lua-5.1.4-4.1.el6.x86_64
2013-01-23T09:25:32 info: Installing pcre-7.8-4.el6.x86_64
2013-01-23T09:25:32 info: Installing grep-2.6.3-3.el6.x86_64
2013-01-23T09:25:32 info: Installing libidn-1.18-2.el6.x86_64
2013-01-23T09:25:32 info: Installing gawk-3.1.7-9.el6.x86_64
2013-01-23T09:25:32 info: Installing libuuid-2.17.2-12.7.el6_3.x86_64
2013-01-23T09:25:32 info: Installing libblkid-2.17.2-12.7.el6_3.x86_64
2013-01-23T09:25:32 info: Installing xz-libs-4.999.9-0.3.beta.20091007git.el6.x86_64
2013-01-23T09:25:32 info: Installing elfutils-libelf-0.152-1.el6.x86_64
2013-01-23T09:25:32 info: Installing gdbm-1.8.0-36.el6.x86_64
2013-01-23T09:25:32 info: Installing perl-Pod-Escapes-1.04-127.el6.x86_64
2013-01-23T09:25:32 info: Installing perl-libs-5.10.1-127.el6.x86_64
2013-01-23T09:25:32 info: Installing perl-Module-Pluggable-3.90-127.el6.x86_64
2013-01-23T09:25:32 info: Installing perl-version-0.77-127.el6.x86_64
2013-01-23T09:25:33 info: Installing perl-Pod-Simple-3.13-127.el6.x86_64
2013-01-23T09:25:33 info: Installing perl-5.10.1-127.el6.x86_64
2013-01-23T09:25:39 info: Installing libgpg-error-1.7-4.el6.x86_64
2013-01-23T09:25:39 info: Installing findutils-4.4.2-6.el6.x86_64
2013-01-23T09:25:39 info: Installing libselinux-utils-2.0.94-5.3.el6.x86_64
2013-01-23T09:25:39 info: Installing iptables-1.4.7-5.1.el6_2.x86_64
2013-01-23T09:25:39 info: Installing cyrus-sasl-lib-2.1.23-13.el6_3.1.x86_64
2013-01-23T09:25:39 info: Installing cpio-2.10-11.el6_3.x86_64
2013-01-23T09:25:39 info: Installing binutils-2.20.51.0.2-5.34.el6.x86_64
2013-01-23T09:25:40 info: Installing which-2.19-6.el6.x86_64
2013-01-23T09:25:40 info: Installing libedit-2.11-4.20080712cvs.1.el6.x86_64
2013-01-23T09:25:40 info: Installing sysvinit-tools-2.87-4.dsf.el6.x86_64
2013-01-23T09:25:40 info: Installing tcp_wrappers-libs-7.6-57.el6.x86_64
2013-01-23T09:25:40 info: Installing expat-2.0.1-11.el6_2.x86_64
2013-01-23T09:25:40 info: Installing pth-2.0.7-9.3.el6.x86_64
2013-01-23T09:25:41 info: Installing dbus-glib-0.86-5.el6.x86_64
2013-01-23T09:25:41 info: Installing iproute-2.6.32-20.el6.x86_64
2013-01-23T09:25:41 info: Installing libgcrypt-1.4.5-9.el6_2.2.x86_64
2013-01-23T09:25:41 info: Installing grubby-7.0.15-3.el6.x86_64
2013-01-23T09:25:41 info: Installing libnih-1.0.1-7.el6.x86_64
2013-01-23T09:25:41 info: Installing upstart-0.6.5-12.el6.x86_64
2013-01-23T09:25:41 info: Installing file-5.04-13.el6.x86_64
2013-01-23T09:25:41 info: Installing nss-softokn-3.12.9-11.el6.x86_64
2013-01-23T09:25:41 info: Installing ppl-0.10.2-11.el6.x86_64
2013-01-23T09:25:41 info: Installing cloog-ppl-0.15.7-1.2.el6.x86_64
2013-01-23T09:25:42 info: Installing mpfr-2.4.1-6.el6.x86_64
2013-01-23T09:25:42 info: Installing cpp-4.4.6-4.el6.x86_64
2013-01-23T09:25:43 info: Installing libusb-0.1.12-23.el6.x86_64
2013-01-23T09:25:43 info: Installing libutempter-1.1.5-4.1.el6.x86_64
2013-01-23T09:25:43 info: Installing MAKEDEV-3.24-6.el6.x86_64
2013-01-23T09:25:43 info: Installing vim-minimal-7.2.411-1.8.el6.x86_64
2013-01-23T09:25:43 info: Installing procps-3.2.8-23.el6.x86_64
2013-01-23T09:25:43 info: Installing psmisc-22.6-15.el6_0.1.x86_64
2013-01-23T09:25:43 info: Installing net-tools-1.60-110.el6_2.x86_64
2013-01-23T09:25:43 info: Installing checkpolicy-2.0.22-1.el6.x86_64
2013-01-23T09:25:44 info: Installing libselinux-ruby-2.0.94-5.3.el6.x86_64
2013-01-23T09:25:44 info: Installing augeas-libs-0.9.0-4.el6.x86_64
2013-01-23T09:25:44 info: Installing tar-1.23-7.el6.x86_64
2013-01-23T09:25:44 info: Installing bzip2-1.0.5-7.el6_0.x86_64
2013-01-23T09:25:44 info: Installing pinentry-0.7.6-6.el6.x86_64
2013-01-23T09:25:46 info: Installing libss-1.41.12-12.el6.x86_64
2013-01-23T09:25:46 info: Installing e2fsprogs-libs-1.41.12-12.el6.x86_64
2013-01-23T09:25:46 info: Installing db4-utils-4.7.25-17.el6.x86_64
2013-01-23T09:25:46 info: Installing libgomp-4.4.6-4.el6.x86_64
2013-01-23T09:25:46 info: Installing diffutils-2.8.1-28.el6.x86_64
2013-01-23T09:25:46 info: Installing libxml2-2.7.6-8.el6_3.3.x86_64
2013-01-23T09:25:47 info: Installing glibc-headers-2.12-1.80.el6_3.6.x86_64
2013-01-23T09:25:48 info: Installing glibc-devel-2.12-1.80.el6_3.6.x86_64
2013-01-23T09:25:49 info: Installing ncurses-5.7-3.20090208.el6.x86_64
2013-01-23T09:25:49 info: Installing groff-1.18.1.4-21.el6.x86_64
2013-01-23T09:25:50 info: Installing less-436-10.el6.x86_64
2013-01-23T09:25:50 info: Installing coreutils-libs-8.4-19.el6.x86_64
2013-01-23T09:25:50 info: Installing gzip-1.3.12-18.el6.x86_64
2013-01-23T09:25:50 info: Installing cracklib-2.8.16-4.el6.x86_64
2013-01-23T09:25:50 info: Installing cracklib-dicts-2.8.16-4.el6.x86_64
2013-01-23T09:25:51 info: Installing coreutils-8.4-19.el6.x86_64
2013-01-23T09:25:52 info: Installing pam-1.1.1-10.el6_2.1.x86_64
2013-01-23T09:25:54 info: Installing module-init-tools-3.9-20.el6.x86_64
2013-01-23T09:25:55 info: Installing hwdata-0.233-7.8.el6.noarch
2013-01-23T09:25:57 info: Installing redhat-logos-60.0.14-12.el6.centos.noarch
2013-01-23T09:25:59 info: Installing plymouth-scripts-0.8.3-24.el6.centos.x86_64
2013-01-23T09:25:59 info: Installing logrotate-3.7.8-15.el6.x86_64
2013-01-23T09:25:59 info: Installing nss-3.13.5-1.el6_3.x86_64
2013-01-23T09:25:59 info: Installing nss-sysinit-3.13.5-1.el6_3.x86_64
2013-01-23T09:25:59 info: Installing nss-tools-3.13.5-1.el6_3.x86_64
2013-01-23T09:26:00 info: Installing openldap-2.4.23-26.el6_3.2.x86_64
2013-01-23T09:26:00 info: Installing compat-readline5-5.2-17.1.el6.x86_64
2013-01-23T09:26:00 info: Installing libcap-ng-0.6.4-3.el6_0.1.x86_64
2013-01-23T09:26:00 info: Installing ethtool-2.6.33-0.3.el6.x86_64
2013-01-23T09:26:00 info: Installing mingetty-1.08-5.el6.x86_64
2013-01-23T09:26:00 info: Installing vconfig-1.9-8.1.el6.x86_64
2013-01-23T09:26:00 info: Installing dmidecode-2.11-2.el6.x86_64
2013-01-23T09:26:00 info: Installing keyutils-libs-1.4-4.el6.x86_64
2013-01-23T09:26:00 info: Installing krb5-libs-1.9-33.el6_3.3.x86_64
2013-01-23T09:26:01 info: Installing openssl-1.0.0-25.el6_3.1.x86_64
2013-01-23T09:26:01 info: Installing ruby-libs-1.8.7.352-7.el6_2.x86_64
2013-01-23T09:26:03 info: Installing ruby-1.8.7.352-7.el6_2.x86_64
2013-01-23T09:26:03 info: Installing libssh2-1.2.2-11.el6_3.x86_64
2013-01-23T09:26:03 info: Installing libcurl-7.19.7-26.el6_2.4.x86_64
2013-01-23T09:26:03 info: Installing curl-7.19.7-26.el6_2.4.x86_64
2013-01-23T09:26:03 info: Installing rpm-libs-4.8.0-27.el6.x86_64
2013-01-23T09:26:04 info: Installing rpm-4.8.0-27.el6.x86_64
2013-01-23T09:26:04 info: Installing gnupg2-2.0.14-4.el6.x86_64
2013-01-23T09:26:04 info: Installing gpgme-1.1.8-3.el6.x86_64
2013-01-23T09:26:05 info: Installing ruby-irb-1.8.7.352-7.el6_2.x86_64
2013-01-23T09:26:05 info: Installing ruby-rdoc-1.8.7.352-7.el6_2.x86_64
2013-01-23T09:26:06 info: Installing rubygems-1.3.7-1.el6.noarch
2013-01-23T09:26:06 info: Installing rubygem-stomp-1.1.8-1.el6.noarch
2013-01-23T09:26:06 info: warning: rubygem-stomp-1.1.8-1.el6.noarch: Header V3 RSA/SHA256 Signature, key ID 0608b895: NOKEY
2013-01-23T09:26:06 info: Installing mcollective-common-2.2.2-1.el6.noarch
2013-01-23T09:26:06 info: warning: mcollective-common-2.2.2-1.el6.noarch: Header V4 RSA/SHA1 Signature, key ID 4bd6ec30: NOKEY
2013-01-23T09:26:07 info: Installing mcollective-2.2.2-1.el6.noarch
2013-01-23T09:26:07 info: Installing ruby-augeas-0.4.1-1.el6.x86_64
2013-01-23T09:26:07 info: Installing ruby-shadow-1.4.1-13.el6.x86_64
2013-01-23T09:26:07 info: Installing fipscheck-lib-1.2.0-7.el6.x86_64
2013-01-23T09:26:07 info: Installing fipscheck-1.2.0-7.el6.x86_64
2013-01-23T09:26:07 info: Installing ustr-1.0.4-9.1.el6.x86_64
2013-01-23T09:26:07 info: Installing libsemanage-2.0.43-4.1.el6.x86_64
2013-01-23T09:26:07 info: Installing libffi-3.0.5-3.2.el6.x86_64
2013-01-23T09:26:07 info: Installing python-libs-2.6.6-29.el6_3.3.x86_64
2013-01-23T09:26:08 info: Installing python-2.6.6-29.el6_3.3.x86_64
2013-01-23T09:26:12 info: Installing scapy-2.0.0.10-5.el6.noarch
2013-01-23T09:26:13 info: Installing yum-metadata-parser-1.1.2-16.el6.x86_64
2013-01-23T09:26:13 info: Installing pygpgme-0.1-18.20090824bzr68.el6.x86_64
2013-01-23T09:26:13 info: Installing rpm-python-4.8.0-27.el6.x86_64
2013-01-23T09:26:13 info: Installing python-iniparse-0.3.1-2.1.el6.noarch
2013-01-23T09:26:13 info: Installing python-pycurl-7.19.0-8.el6.x86_64
2013-01-23T09:26:13 info: Installing python-urlgrabber-3.9.1-8.el6.noarch
2013-01-23T09:26:13 info: Installing yum-plugin-fastestmirror-1.1.30-14.el6.noarch
2013-01-23T09:26:13 info: Installing yum-3.2.29-30.el6.centos.noarch
2013-01-23T09:26:13 info: Installing dash-0.5.5.1-3.1.el6.x86_64
2013-01-23T09:26:14 info: Installing pciutils-libs-3.1.4-11.el6.x86_64
2013-01-23T09:26:14 info: Installing pciutils-3.1.4-11.el6.x86_64
2013-01-23T09:26:14 info: Installing facter-1.6.17-1.el6.x86_64
2013-01-23T09:26:14 info: Installing plymouth-core-libs-0.8.3-24.el6.centos.x86_64
2013-01-23T09:26:14 info: Installing kbd-misc-1.15-11.el6.noarch
2013-01-23T09:26:14 info: Installing centos-release-6-3.el6.centos.9.x86_64
2013-01-23T09:26:14 info: Installing iputils-20071127-16.el6.x86_64
2013-01-23T09:26:14 info: Installing util-linux-ng-2.17.2-12.7.el6_3.x86_64
2013-01-23T09:26:15 info: Installing initscripts-9.03.31-2.el6.centos.1.x86_64
2013-01-23T09:26:16 info: Installing udev-147-2.42.el6.x86_64
2013-01-23T09:26:16 info: Installing openssh-5.3p1-81.el6_3.x86_64
2013-01-23T09:26:16 info: Installing kbd-1.15-11.el6.x86_64
2013-01-23T09:26:16 info: Installing rsyslog-5.8.10-2.el6.x86_64
2013-01-23T09:26:17 info: Installing exim-4.72-4.el6.x86_64
2013-01-23T09:26:17 info: Installing crontabs-1.10-33.el6.noarch
2013-01-23T09:26:17 info: Installing cronie-anacron-1.4.4-7.el6.x86_64
2013-01-23T09:26:17 info: Installing cronie-1.4.4-7.el6.x86_64
2013-01-23T09:26:17 info: Installing ntpdate-4.2.4p8-2.el6.centos.x86_64
2013-01-23T09:26:17 info: Installing dhcp-common-4.1.1-31.0.1.P1.el6.centos.1.x86_64
2013-01-23T09:26:17 info: Installing kernel-firmware-2.6.32-279.19.1.el6.centos.plus.noarch
2013-01-23T09:26:19 info: Installing libdrm-2.4.25-2.el6.x86_64
2013-01-23T09:26:19 info: Installing plymouth-0.8.3-24.el6.centos.x86_64
2013-01-23T09:26:19 info: Installing dracut-004-284.el6_3.1.noarch
2013-01-23T09:26:19 info: Installing dracut-kernel-004-284.el6_3.1.noarch
2013-01-23T09:26:19 info: Installing kernel-2.6.32-279.19.1.el6.centos.plus.x86_64
2013-01-23T09:26:27 info: Installing dhclient-4.1.1-31.0.1.P1.el6.centos.1.x86_64
2013-01-23T09:26:27 info: Installing ntp-4.2.4p8-2.el6.centos.x86_64
2013-01-23T09:26:27 info: Installing openssh-clients-5.3p1-81.el6_3.x86_64
2013-01-23T09:26:27 info: Installing openssh-server-5.3p1-81.el6_3.x86_64
2013-01-23T09:26:28 info: Installing puppet-2.7.19-1.el6.noarch
2013-01-23T09:26:30 info: Installing policycoreutils-2.0.83-19.24.el6.x86_64
2013-01-23T09:26:31 info: Installing nailgun-net-check-0.0.2-1.x86_64
2013-01-23T09:26:31 info: Installing grub-0.97-77.el6.x86_64
2013-01-23T09:26:31 info: Installing nailgun-mcagents-0.1.0-1.x86_64
2013-01-23T09:26:31 info: Installing ruby-devel-1.8.7.352-7.el6_2.x86_64
2013-01-23T09:26:31 info: Installing wget-1.12-1.4.el6.x86_64
2013-01-23T09:26:31 info: Installing sudo-1.7.4p5-13.el6_3.x86_64
2013-01-23T09:26:31 info: Installing nailgun-agent-0.1.0-1.x86_64
2013-01-23T09:26:31 info: Installing gcc-4.4.6-4.el6.x86_64
2013-01-23T09:26:35 info: Installing e2fsprogs-1.41.12-12.el6.x86_64
2013-01-23T09:26:35 info: Installing iptables-ipv6-1.4.7-5.1.el6_2.x86_64
2013-01-23T09:26:35 info: Installing acl-2.2.49-6.el6.x86_64
2013-01-23T09:26:35 info: Installing make-3.81-20.el6.x86_64
2013-01-23T09:26:35 info: Installing attr-2.4.44-7.el6.x86_64
2013-01-23T09:27:14 info: 09:27:14,602 INFO : leaving (1) step installpackages
2013-01-23T09:27:14 info: 09:27:14,603 INFO : moving (1) to step postinstallconfig
2013-01-23T09:27:14 debug: 09:27:14,604 DEBUG : postinstallconfig is a direct step
2013-01-23T09:27:14 info: 09:27:14,628 INFO : leaving (1) step postinstallconfig
2013-01-23T09:27:14 info: 09:27:14,628 INFO : moving (1) to step writeconfig
2013-01-23T09:27:14 debug: 09:27:14,629 DEBUG : writeconfig is a direct step
2013-01-23T09:27:14 info: 09:27:14,629 INFO : Writing main configuration
2013-01-23T09:27:14 warning: 09:27:14,638 WARNING : '/usr/sbin/authconfig' specified as full path
2013-01-23T09:27:14 err: 09:27:14,661 ERROR : Error running /usr/sbin/authconfig: No such file or directory
2013-01-23T09:27:14 err: 09:27:14,662 ERROR : Error running ['--update', '--nostart', '--enableshadow', '--passalgo=sha512']: Error running /usr/sbin/authconfig: No such file or directory
2013-01-23T09:27:14 warning: 09:27:14,665 WARNING : '/usr/sbin/lokkit' specified as full path
2013-01-23T09:27:14 err: 09:27:14,680 ERROR : Error running /usr/sbin/lokkit: No such file or directory
2013-01-23T09:27:14 err: 09:27:14,681 ERROR : lokkit run failed: Error running /usr/sbin/lokkit: No such file or directory
2013-01-23T09:27:14 warning: 09:27:14,681 WARNING : '/usr/sbin/lokkit' specified as full path
2013-01-23T09:27:14 err: 09:27:14,694 ERROR : Error running /usr/sbin/lokkit: No such file or directory
2013-01-23T09:27:14 err: 09:27:14,695 ERROR : lokkit run failed: Error running /usr/sbin/lokkit: No such file or directory
2013-01-23T09:27:14 info: 09:27:14,798 INFO : removing libuser.conf at /tmp/libuser.JtvFQd
2013-01-23T09:27:14 info: 09:27:14,799 INFO : created new libuser.conf at /tmp/libuser.JtvFQd with instPath="/mnt/sysimage"
2013-01-23T09:27:14 info: 09:27:14,821 INFO : leaving (1) step writeconfig
2013-01-23T09:27:14 info: 09:27:14,821 INFO : moving (1) to step firstboot
2013-01-23T09:27:14 debug: 09:27:14,821 DEBUG : firstboot is a direct step
2013-01-23T09:27:14 info: 09:27:14,821 INFO : leaving (1) step firstboot
2013-01-23T09:27:14 info: 09:27:14,822 INFO : moving (1) to step instbootloader
2013-01-23T09:27:14 debug: 09:27:14,822 DEBUG : instbootloader is a direct step
2013-01-23T09:27:14 info: *** FINISHED INSTALLING PACKAGES ***
2013-01-23T09:27:15 warning: 09:27:14,989 WARNING : '/sbin/grub-install' specified as full path
2013-01-23T09:27:15 warning: 09:27:15,038 WARNING : '/sbin/grub' specified as full path
2013-01-23T09:27:17 info: 09:27:17,176 INFO : leaving (1) step instbootloader
2013-01-23T09:27:17 info: 09:27:17,177 INFO : moving (1) to step reipl
2013-01-23T09:27:17 debug: 09:27:17,177 DEBUG : reipl is a direct step
2013-01-23T09:27:17 info: 09:27:17,177 INFO : leaving (1) step reipl
2013-01-23T09:27:17 info: 09:27:17,177 INFO : moving (1) to step writeksconfig
2013-01-23T09:27:17 debug: 09:27:17,177 DEBUG : writeksconfig is a direct step
2013-01-23T09:27:17 info: 09:27:17,177 INFO : Writing autokickstart file
2013-01-23T09:27:17 info: 09:27:17,183 INFO : leaving (1) step writeksconfig
2013-01-23T09:27:17 info: 09:27:17,183 INFO : moving (1) to step setfilecon
2013-01-23T09:27:17 debug: 09:27:17,183 DEBUG : setfilecon is a direct step
2013-01-23T09:27:17 info: 09:27:17,184 INFO : setting SELinux contexts for anaconda created files
2013-01-23T09:27:19 info: 09:27:18,940 INFO : leaving (1) step setfilecon
2013-01-23T09:27:19 info: 09:27:18,940 INFO : moving (1) to step copylogs
2013-01-23T09:27:19 debug: 09:27:18,941 DEBUG : copylogs is a direct step
2013-01-23T09:27:19 info: 09:27:18,941 INFO : Copying anaconda logs
2013-01-23T09:27:19 info: 09:27:18,943 INFO : leaving (1) step copylogs
2013-01-23T09:27:19 info: 09:27:18,943 INFO : moving (1) to step methodcomplete
2013-01-23T09:27:19 debug: 09:27:18,943 DEBUG : methodcomplete is a direct step
2013-01-23T09:27:19 info: 09:27:18,943 INFO : leaving (1) step methodcomplete
2013-01-23T09:27:19 info: 09:27:18,943 INFO : moving (1) to step postscripts
2013-01-23T09:27:19 debug: 09:27:18,944 DEBUG : postscripts is a direct step
2013-01-23T09:27:19 info: 09:27:18,944 INFO : Running kickstart %%post script(s)
2013-01-23T09:27:19 warning: 09:27:18,946 WARNING : '/bin/sh' specified as full path
2013-01-23T09:28:30 info: 09:28:30,453 INFO : All kickstart %%post script(s) have been run
2013-01-23T09:28:30 info: 09:28:30,454 INFO : leaving (1) step postscripts
2013-01-23T09:28:30 info: 09:28:30,454 INFO : moving (1) to step dopostaction
2013-01-23T09:28:30 debug: 09:28:30,455 DEBUG : dopostaction is a direct step
2013-01-23T09:28:30 info: 09:28:30,455 INFO : leaving (1) step dopostaction

View File

@ -1,3 +1,43 @@
$LOAD_PATH << File.join(File.dirname(__FILE__),"..","lib")
require 'rspec'
require 'astute'
$LOAD_PATH << File.join(File.dirname(__FILE__),"..","lib")
require 'rspec'
# Following require is needed for rcov to provide valid results
require 'rspec/autorun'
require 'yaml'
require 'astute'
RSpec.configure do |config|
config.mock_with :mocha
end
# NOTE(mihgen): I hate to wait for unit tests to complete,
# resetting time to sleep significantly increases tests speed
Astute.config.PUPPET_DEPLOY_INTERVAL = 0
Astute.config.PUPPET_FADE_INTERVAL = 0
module SpecHelpers
def mock_rpcclient(discover_nodes=nil, timeout=nil)
rpcclient = mock('rpcclient') do
stubs(:progress=)
unless timeout.nil?
expects(:timeout=).with(timeout)
end
unless discover_nodes.nil?
expects(:discover).with(:nodes => discover_nodes.map {|x| x['uid'].to_s}).at_least_once
else
stubs(:discover)
end
end
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
return rpcclient
end
def mock_mc_result(result={})
mc_res = {:statuscode => 0, :data => {}, :sender => '1'}
mc_res.merge!(result)
mc_result = mock('mc_result') do
stubs(:results).returns(mc_res)
stubs(:agent).returns('mc_stubbed_agent')
end
return mc_result
end
end

View File

@ -0,0 +1,105 @@
#!/usr/bin/env rspec
require File.join(File.dirname(__FILE__), "..", "spec_helper")
require 'tempfile'
require 'date'
include Astute
describe LogParser do
context "Pattern-based progress bar calculation (anaconda.log)" do
before :each do
@pattern_spec = {'type' => 'pattern-list', 'chunk_size' => 40000, # Size of block which reads for pattern searching.
'pattern_list' => [
{'pattern' => 'Running kickstart %%pre script', 'progress' => 0.08},
{'pattern' => 'to step enablefilesystems', 'progress' => 0.09},
{'pattern' => 'to step reposetup', 'progress' => 0.13},
{'pattern' => 'to step installpackages', 'progress' => 0.16},
{'pattern' => 'Installing',
'number' => 210, # Now it install 205 packets. Add 5 packets for growth in future.
'p_min' => 0.16, # min percent
'p_max' => 0.87 # max percent
},
{'pattern' => 'to step postinstallconfig', 'progress' => 0.87},
{'pattern' => 'to step dopostaction', 'progress' => 0.92},
].reverse
}
end
def test_supposed_time_parser(pattern_spec)
date_regexp = '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}'
date_format = '%Y-%m-%dT%H:%M:%S'
fo = Tempfile.new('logparse')
logfile = File.join(File.dirname(__FILE__), "..", "example-logs", "anaconda.log_")
path = fo.path
initial_progress = Astute::LogParser.get_log_progress(path, pattern_spec)
initial_progress.should eql(0)
progress_table = []
File.open(logfile).each do |line|
fo.write(line)
fo.flush
date_string = line.match(date_regexp)
if date_string
date = DateTime.strptime(date_string[0], date_format)
progress = Astute::LogParser.get_log_progress(path, pattern_spec)
progress_table << {'date' => date, 'progress' => progress}
end
end
fo.close!
first_event_date, first_progress = progress_table[0]['date'], progress_table[0]['progress']
last_event_date, last_progress = progress_table[-1]['date'], progress_table[-1]['progress']
period = (last_event_date - first_event_date) / (last_progress - first_progress)
hours, mins, secs, frac = Date::day_fraction_to_time(period)
# FIXME(mihgen): I hope this calculation can be much simplified: needs refactoring
# Assuming server was in reboot for reboot_time
reboot_time = 30
# period will be useful for other test cases
period_in_sec = hours * 60 * 60 + mins * 60 + secs + reboot_time
# Let's normalize the time in table
progress_table.each do |el|
delta = el['date'] - first_event_date
hours, mins, secs, frac = Date::day_fraction_to_time(delta)
delta_in_sec = hours * 60 * 60 + mins * 60 + secs
el['time'] = delta_in_sec + reboot_time
end
return progress_table, period_in_sec
end
it "new progress must be equal or greater than previous" do
progress_table, period_in_sec = test_supposed_time_parser(@pattern_spec)
progress_table.each_cons(2) do |el|
el[1]['progress'].should be >= el[0]['progress']
el[0]['progress'].should be >= 0
el[1]['progress'].should be <= 1
end
end
it "it should move smoothly"
it "it must be updated at least 5 times" do
# Otherwise progress bar has no meaning I guess...
pending('Not yet implemented')
end
end
end
#pattern_spec = {'type' => 'supposed_time',
#'chunk_size' => 10000,
#'date_format' => '%Y-%m-%dT%H:%M:%S',
#'date_regexp' => '^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}',
#'pattern_list' => [
#{'pattern' => 'Running anaconda script', 'supposed_time' => 60},
#{'pattern' => 'moving (1) to step enablefilesystems', 'supposed_time' => 3},
#{'pattern' => "notifying kernel of 'change' event on device", 'supposed_time' => 97},
#{'pattern' => 'Preparing to install packages', 'supposed_time' => 8},
#{'pattern' => 'Installing glibc-common-2.12', 'supposed_time' => 9},
#{'pattern' => 'Installing bash-4.1.2', 'supposed_time' => 10},
#{'pattern' => 'Installing coreutils-8.4-19', 'supposed_time' => 20},
#{'pattern' => 'Installing centos-release-6-3', 'supposed_time' => 20},
#{'pattern' => 'Installing attr-2.4.44', 'supposed_time' => 19},
#{'pattern' => 'leaving (1) step installpackages', 'supposed_time' => 51},
#{'pattern' => 'moving (1) to step postscripts', 'supposed_time' => 3},
#{'pattern' => 'leaving (1) step postscripts', 'supposed_time' => 132},
#].reverse,
#}

View File

@ -0,0 +1,79 @@
#!/usr/bin/env rspec
require File.join(File.dirname(__FILE__), "..", "spec_helper")
include Astute
describe MClient do
include SpecHelpers
before(:each) do
@ctx = mock('context')
@ctx.stubs(:task_id)
@ctx.stubs(:reporter)
end
it "should receive method call and process valid result correctly" do
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
rpcclient = mock_rpcclient(nodes)
mc_valid_result = mock_mc_result
rpcclient.expects(:echo).with(:msg => 'hello world').once.returns([mc_valid_result]*3)
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
stats = mclient.echo(:msg => 'hello world')
stats.should eql([mc_valid_result]*3)
end
it "should return even bad result if check_result=false" do
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
rpcclient = mock_rpcclient(nodes)
mc_valid_result = mock_mc_result
mc_error_result = mock_mc_result({:statuscode => 1, :sender => '2'})
rpcclient.expects(:echo).with(:msg => 'hello world').once.\
returns([mc_valid_result, mc_error_result])
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']}, check_result=false)
stats = mclient.echo(:msg => 'hello world')
stats.should eql([mc_valid_result, mc_error_result])
end
it "should try to retry for non-responded nodes" do
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
rpcclient = mock('rpcclient') do
stubs(:progress=)
expects(:discover).with(:nodes => ['1','2','3'])
expects(:discover).with(:nodes => ['2','3'])
end
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
mc_valid_result = mock_mc_result
mc_valid_result2 = mock_mc_result({:sender => '2'})
rpcclient.stubs(:echo).returns([mc_valid_result]).then.
returns([mc_valid_result2]).then
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
mclient.retries = 1
expect { mclient.echo(:msg => 'hello world') }.to raise_error(/MCollective agents '3' didn't respond./)
end
it "should raise error if agent returns statuscode != 0" do
nodes = [{'uid' => 1}, {'uid' => 2}, {'uid' => 3}]
rpcclient = mock('rpcclient') do
stubs(:progress=)
expects(:discover).with(:nodes => ['1','2','3'])
expects(:discover).with(:nodes => ['2','3'])
end
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
mc_valid_result = mock_mc_result
mc_failed_result = mock_mc_result({:sender => '2', :statuscode => 1})
rpcclient.stubs(:echo).returns([mc_valid_result]).then.
returns([mc_failed_result]).then
mclient = MClient.new(@ctx, "faketest", nodes.map {|x| x['uid']})
mclient.retries = 1
expect { mclient.echo(:msg => 'hello world') }.to \
raise_error(/MCollective agents '3' didn't respond.\n.* failed nodes: 2/)
end
end

View File

@ -0,0 +1,170 @@
#!/usr/bin/env rspec
require File.join(File.dirname(__FILE__), "..", "spec_helper")
describe "NailyFact DeploymentEngine" do
context "When deploy is called, " do
before(:each) do
@ctx = mock
@ctx.stubs(:task_id)
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
reporter = mock
@ctx.stubs(:reporter).returns(reporter)
reporter.stubs(:report)
@deploy_engine = Astute::DeploymentEngine::NailyFact.new(@ctx)
@data = {"args" =>
{"attributes" =>
{"storage_network_range" => "172.16.0.0/24", "auto_assign_floating_ip" => false,
"mysql" => {"root_password" => "Z2EqsZo5"},
"keystone" => {"admin_token" => "5qKy0i63", "db_password" => "HHQ86Rym", "admin_tenant" => "admin"},
"nova" => {"user_password" => "h8RY8SE7", "db_password" => "Xl9I51Cb"},
"glance" => {"user_password" => "nDlUxuJq", "db_password" => "V050pQAn"},
"rabbit" => {"user" => "nova", "password" => "FLF3txKC"},
"management_network_range" => "192.168.0.0/24",
"public_network_range" => "240.0.1.0/24",
"fixed_network_range" => "10.0.0.0/24",
"floating_network_range" => "240.0.0.0/24"},
"task_uuid" => "19d99029-350a-4c9c-819c-1f294cf9e741",
"nodes" => [{"mac" => "52:54:00:0E:B8:F5", "status" => "provisioning",
"uid" => "devnailgun.mirantis.com", "error_type" => nil,
"fqdn" => "devnailgun.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.2/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.2/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
"id" => 1,
"ip" => "10.20.0.200",
"role" => "controller"},
{"mac" => "52:54:00:50:91:DD", "status" => "provisioning",
"uid" => 2, "error_type" => nil,
"fqdn" => "slave-2.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.3/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.3/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
"id" => 2,
"ip" => "10.20.0.221",
"role" => "compute"},
{"mac" => "52:54:00:C3:2C:28", "status" => "provisioning",
"uid" => 3, "error_type" => nil,
"fqdn" => "slave-3.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.4/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.4/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
"id" => 3,
"ip" => "10.20.0.68",
"role" => "compute"}]},
"method" => "deploy",
"respond_to" => "deploy_resp"}
ha_nodes = @data['args']['nodes'] +
[{"mac" => "52:54:00:0E:88:88", "status" => "provisioned",
"uid" => "4", "error_type" => nil,
"fqdn" => "controller-4.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.5/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.5/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
"id" => 4,
"ip" => "10.20.0.205",
"role" => "controller"},
{"mac" => "52:54:00:0E:99:99", "status" => "provisioned",
"uid" => "5", "error_type" => nil,
"fqdn" => "controller-5.mirantis.com",
"network_data" => [{"gateway" => "192.168.0.1",
"name" => "management", "dev" => "eth0",
"brd" => "192.168.0.255", "netmask" => "255.255.255.0",
"vlan" => 102, "ip" => "192.168.0.6/24"},
{"gateway" => "240.0.1.1",
"name" => "public", "dev" => "eth0",
"brd" => "240.0.1.255", "netmask" => "255.255.255.0",
"vlan" => 101, "ip" => "240.0.1.6/24"},
{"name" => "floating", "dev" => "eth0", "vlan" => 120},
{"name" => "fixed", "dev" => "eth0", "vlan" => 103},
{"name" => "storage", "dev" => "eth0", "vlan" => 104}],
"id" => 5,
"ip" => "10.20.0.206",
"role" => "controller"}]
@data_ha = Marshal.load(Marshal.dump(@data))
@data_ha['args']['nodes'] = ha_nodes
@data_ha['args']['attributes']['deployment_mode'] = "ha_compute"
# VIPs are required for HA mode and should be passed from Nailgun (only in HA)
@data_ha['args']['attributes']['management_vip'] = "192.168.0.111"
@data_ha['args']['attributes']['public_vip'] = "240.0.1.111"
end
it "it should call valid method depends on attrs" do
nodes = [{'uid' => 1}]
attrs = {'deployment_mode' => 'ha_compute'}
attrs_modified = attrs.merge({'some' => 'somea'})
@deploy_engine.expects(:attrs_ha_compute).with(nodes, attrs).returns(attrs_modified)
@deploy_engine.expects(:deploy_ha_compute).with(nodes, attrs_modified)
# All implementations of deploy_piece go to subclasses
@deploy_engine.respond_to?(:deploy_piece).should be_true
@deploy_engine.deploy(nodes, attrs)
end
it "it should raise an exception if deployment mode is unsupported" do
nodes = [{'uid' => 1}]
attrs = {'deployment_mode' => 'unknown'}
expect {@deploy_engine.deploy(nodes, attrs)}.to raise_exception(/Method attrs_unknown is not implemented/)
end
it "multinode_compute deploy should not raise any exception" do
@data['args']['attributes']['deployment_mode'] = "multinode_compute"
Astute::Metadata.expects(:publish_facts).times(@data['args']['nodes'].size)
# we got two calls, one for controller, and another for all computes
Astute::PuppetdDeployer.expects(:deploy).twice
@deploy_engine.deploy(@data['args']['nodes'], @data['args']['attributes'])
end
it "ha_compute deploy should not raise any exception" do
Astute::Metadata.expects(:publish_facts).at_least_once
Astute::PuppetdDeployer.expects(:deploy).times(7)
@deploy_engine.deploy(@data_ha['args']['nodes'], @data_ha['args']['attributes'])
end
it "ha_compute deploy should not raise any exception if there are only one controller" do
Astute::Metadata.expects(:publish_facts).at_least_once
Astute::PuppetdDeployer.expects(:deploy).times(4)
ctrl = @data_ha['args']['nodes'].select {|n| n['role'] == 'controller'}[0]
@deploy_engine.deploy([ctrl], @data_ha['args']['attributes'])
end
it "singlenode_compute deploy should not raise any exception" do
@data['args']['attributes']['deployment_mode'] = "singlenode_compute"
@data['args']['nodes'] = [@data['args']['nodes'][0]] # We have only one node in singlenode
Astute::Metadata.expects(:publish_facts).times(@data['args']['nodes'].size)
Astute::PuppetdDeployer.expects(:deploy).once # one call for one node
@deploy_engine.deploy(@data['args']['nodes'], @data['args']['attributes'])
end
end
end

View File

@ -0,0 +1,119 @@
#!/usr/bin/env rspec
require File.join(File.dirname(__FILE__), "..", "spec_helper")
describe Astute::Orchestrator do
include SpecHelpers
before(:each) do
@orchestrator = Astute::Orchestrator.new
@reporter = mock('reporter')
@reporter.stub_everything
end
it "must be able to return node type" do
nodes = [{'uid' => '1'}]
res = {:data => {:node_type => 'target'},
:sender=>"1"}
mc_res = mock_mc_result(res)
mc_timeout = 5
rpcclient = mock_rpcclient(nodes, mc_timeout)
rpcclient.expects(:get_type).once.returns([mc_res])
types = @orchestrator.node_type(@reporter, 'task_uuid', nodes, mc_timeout)
types.should eql([{"node_type"=>"target", "uid"=>"1"}])
end
it "must be able to complete verify_networks" do
nodes = [{'uid' => '1'}, {'uid' => '2'}]
networks = [{'id' => 1, 'vlan_id' => 100, 'cidr' => '10.0.0.0/24'},
{'id' => 2, 'vlan_id' => 101, 'cidr' => '192.168.0.0/24'}]
res1 = {:data => {:uid=>"1",
:neighbours => {"eth0" => {"100" => {"1" => ["eth0"], "2" => ["eth0"]},
"101" => {"1" => ["eth0"]}
}
}
},
:sender=>"1"
}
res2 = {:data => {:uid=>"2",
:neighbours => {"eth0" => {"100" => {"1" => ["eth0"], "2" => ["eth0"]},
"101" => {"1" => ["eth0"], "2" => ["eth0"]}
}
}
},
:sender=>"2"
}
valid_res = {:statuscode => 0, :sender => '1'}
mc_res1 = mock_mc_result(res1)
mc_res2 = mock_mc_result(res2)
mc_valid_res = mock_mc_result
rpcclient = mock_rpcclient(nodes)
rpcclient.expects(:start_frame_listeners).once.returns([mc_valid_res]*2)
rpcclient.expects(:send_probing_frames).once.returns([mc_valid_res]*2)
rpcclient.expects(:get_probing_info).once.returns([mc_res1, mc_res2])
Astute::MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
res = @orchestrator.verify_networks(@reporter, 'task_uuid', nodes, networks)
expected = {"nodes" => [{"networks" => [{"iface"=>"eth0", "vlans"=>[100]}], "uid"=>"1"},
{"networks"=>[{"iface"=>"eth0", "vlans"=>[100, 101]}], "uid"=>"2"}]}
res.should eql(expected)
end
it "verify_network returns error if nodes list is empty" do
res = @orchestrator.verify_networks(@reporter, 'task_uuid', [], [])
res.should eql({'status' => 'error', 'error' => "Nodes list is empty. Nothing to check."})
end
it "verify_network returns all vlans passed if only one node provided" do
nodes = [{'uid' => '1'}]
networks = [{'id' => 1, 'vlan_id' => 100, 'cidr' => '10.0.0.0/24'},
{'id' => 2, 'vlan_id' => 101, 'cidr' => '192.168.0.0/24'}]
res = @orchestrator.verify_networks(@reporter, 'task_uuid', nodes, networks)
expected = {"nodes" => [{"networks" => [{"iface"=>"eth0", "vlans"=>[100,101]}], "uid"=>"1"}]}
res.should eql(expected)
end
it "in remove_nodes, it returns empty list if nodes are not provided" do
res = @orchestrator.remove_nodes(@reporter, 'task_uuid', [])
res.should eql({'nodes' => []})
end
it "remove_nodes cleans nodes and reboots them" do
removed_hash = {:sender => '1',
:data => {:rebooted => true}}
error_hash = {:sender => '2',
:data => {:rebooted => false, :error_msg => 'Could not reboot'}}
nodes = [{'uid' => 1}, {'uid' => 2}]
rpcclient = mock_rpcclient(nodes)
mc_removed_res = mock_mc_result(removed_hash)
mc_error_res = mock_mc_result(error_hash)
rpcclient.expects(:erase_node).once.with(:reboot => true).returns([mc_removed_res, mc_error_res])
res = @orchestrator.remove_nodes(@reporter, 'task_uuid', nodes)
res.should eql({'nodes' => [{'uid' => '1'}], 'status' => 'error',
'error_nodes' => [{"uid"=>"2", "error"=>"RPC method 'erase_node' failed "\
"with message: Could not reboot"}]})
end
it "it calls deploy method with valid arguments" do
nodes = [{'uid' => 1}]
attrs = {'a' => 'b'}
Astute::DeploymentEngine::NailyFact.any_instance.expects(:deploy).
with([{'uid' => '1'}], attrs)
@orchestrator.deploy(@reporter, 'task_uuid', nodes, attrs)
end
it "deploy method raises error if nodes list is empty" do
expect {@orchestrator.deploy(@reporter, 'task_uuid', [], {})}.
to raise_error(/Nodes to deploy are not provided!/)
end
it "remove_nodes try to call MCAgent multiple times"
it "remove_nodes do not fail if any of nodes failed"
end

View File

@ -0,0 +1,126 @@
#!/usr/bin/env rspec
require File.join(File.dirname(__FILE__), "..", "spec_helper")
include Astute
describe "Puppetd" do
include SpecHelpers
context "PuppetdDeployer" do
before :each do
@ctx = mock
@ctx.stubs(:task_id)
@reporter = mock('reporter')
@ctx.stubs(:reporter).returns(ProxyReporter.new(@reporter))
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
end
it "reports ready status for node if puppet deploy finished successfully" do
@reporter.expects(:report).with('nodes' => [{'uid' => '1', 'status' => 'ready'}])
last_run_result = {:data=>
{:time=>{"last_run"=>1358425701},
:status => "running", :resources => {'failed' => 0},
:running => 1, :idling => 0},
:sender=>"1"}
last_run_result_new = Marshal.load(Marshal.dump(last_run_result))
last_run_result_new[:data][:time]['last_run'] = 1358426000
last_run_result_finished = Marshal.load(Marshal.dump(last_run_result))
last_run_result_finished[:data][:status] = 'stopped'
last_run_result_finished[:data][:time]['last_run'] = 1358427000
nodes = [{'uid' => '1'}]
rpcclient = mock_rpcclient(nodes)
rpcclient_valid_result = mock_mc_result(last_run_result)
rpcclient_new_res = mock_mc_result(last_run_result_new)
rpcclient_finished_res = mock_mc_result(last_run_result_finished)
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
returns([rpcclient_valid_result]).then.
returns([rpcclient_new_res]).then.
returns([rpcclient_finished_res])
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=0)
end
it "publishes error status for node if puppet failed" do
@reporter.expects(:report).with('nodes' => [{'status' => 'error', 'error_type' => 'deploy', 'uid' => '1'}])
last_run_result = {:statuscode=>0, :data=>
{:changes=>{"total"=>1}, :time=>{"last_run"=>1358425701},
:resources=>{"failed"=>0}, :status => "running",
:running => 1, :idling => 0, :runtime => 100},
:sender=>"1"}
last_run_result_new = Marshal.load(Marshal.dump(last_run_result))
last_run_result_new[:data][:time]['last_run'] = 1358426000
last_run_result_new[:data][:resources]['failed'] = 1
nodes = [{'uid' => '1'}]
last_run_result_finished = Marshal.load(Marshal.dump(last_run_result))
last_run_result_finished[:data][:status] = 'stopped'
last_run_result_finished[:data][:time]['last_run'] = 1358427000
last_run_result_finished[:data][:resources]['failed'] = 1
rpcclient = mock_rpcclient(nodes)
rpcclient_valid_result = mock_mc_result(last_run_result)
rpcclient_new_res = mock_mc_result(last_run_result_new)
rpcclient_finished_res = mock_mc_result(last_run_result_finished)
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
returns([rpcclient_valid_result]).then.
returns([rpcclient_new_res]).then.
returns([rpcclient_finished_res])
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=0)
end
it "retries to run puppet if it fails" do
@reporter.expects(:report).with('nodes' => [{'uid' => '1', 'status' => 'ready'}])
last_run_result = {:statuscode=>0, :data=>
{:changes=>{"total"=>1}, :time=>{"last_run"=>1358425701},
:resources=>{"failed"=>0}, :status => "running",
:running => 1, :idling => 0, :runtime => 100},
:sender=>"1"}
last_run_failed = Marshal.load(Marshal.dump(last_run_result))
last_run_failed[:data][:time]['last_run'] = 1358426000
last_run_failed[:data][:resources]['failed'] = 1
last_run_failed[:data][:status] = 'stopped'
last_run_fixing = Marshal.load(Marshal.dump(last_run_result))
last_run_fixing[:data][:time]['last_run'] = 1358426000
last_run_fixing[:data][:resources]['failed'] = 1
last_run_fixing[:data][:status] = 'running'
last_run_success = Marshal.load(Marshal.dump(last_run_result))
last_run_success[:data][:time]['last_run'] = 1358428000
last_run_success[:data][:status] = 'stopped'
nodes = [{'uid' => '1'}]
rpcclient = mock_rpcclient(nodes)
rpcclient_valid_result = mock_mc_result(last_run_result)
rpcclient_failed = mock_mc_result(last_run_failed)
rpcclient_fixing = mock_mc_result(last_run_fixing)
rpcclient_succeed = mock_mc_result(last_run_success)
rpcclient.stubs(:last_run_summary).returns([rpcclient_valid_result]).then.
returns([rpcclient_valid_result]).then.
returns([rpcclient_failed]).then.
returns([rpcclient_failed]).then.
returns([rpcclient_fixing]).then.
returns([rpcclient_succeed])
rpcclient.expects(:runonce).at_least_once.returns([rpcclient_valid_result])
MClient.any_instance.stubs(:rpcclient).returns(rpcclient)
Astute::PuppetdDeployer.deploy(@ctx, nodes, retries=1)
end
end
end

View File

@ -0,0 +1,129 @@
#!/usr/bin/env rspec
require File.join(File.dirname(__FILE__), "..", "spec_helper")
include Astute
describe "ProxyReporter" do
context "Instance of ProxyReporter class" do
before :each do
@msg = {'nodes' => [{'status' => 'ready', 'uid' => '1'}]}
@msg_pr = {'nodes' => [@msg['nodes'][0],
{'status' => 'deploying', 'uid' => '2',
'progress' => 54}]}
@up_reporter = mock('up_reporter')
@reporter = ProxyReporter.new(@up_reporter)
end
it "reports first-come data" do
@up_reporter.expects(:report).with(@msg)
@reporter.report(@msg)
end
it "does not report the same message" do
@up_reporter.expects(:report).with(@msg).once
5.times { @reporter.report(@msg) }
end
it "reports only updated node" do
updated_node = @msg_pr['nodes'][1]
expected_msg = {'nodes' => [updated_node]}
@up_reporter.expects(:report).with(@msg)
@up_reporter.expects(:report).with(expected_msg)
@reporter.report(@msg)
@reporter.report(@msg_pr)
end
it "reports only if progress value is greater" do
msg1 = {'nodes' => [{'status' => 'deploying', 'uid' => '1', 'progress' => 54},
{'status' => 'deploying', 'uid' => '2', 'progress' => 54}]}
msg2 = Marshal.load(Marshal.dump(msg1))
msg2['nodes'][1]['progress'] = 100
msg2['nodes'][1]['status'] = 'ready'
updated_node = msg2['nodes'][1]
expected_msg = {'nodes' => [updated_node]}
@up_reporter.expects(:report).with(msg1)
@up_reporter.expects(:report).with(expected_msg)
@reporter.report(msg1)
@reporter.report(msg2)
end
it "raises exception if wrong key passed" do
@msg['nodes'][0]['ups'] = 'some_value'
lambda {@reporter.report(@msg)}.should raise_error
end
it "adjusts progress to 100 if passed greater" do
expected_msg = Marshal.load(Marshal.dump(@msg_pr))
expected_msg['nodes'][1]['progress'] = 100
@msg_pr['nodes'][1]['progress'] = 120
@up_reporter.expects(:report).with(expected_msg)
@reporter.report(@msg_pr)
end
it "adjusts progress to 100 if status ready" do
expected_msg = Marshal.load(Marshal.dump(@msg_pr))
expected_msg['nodes'][1]['progress'] = 100
expected_msg['nodes'][1]['status'] = 'ready'
@msg_pr['nodes'][1]['status'] = 'ready'
@up_reporter.expects(:report).with(expected_msg)
@reporter.report(@msg_pr)
end
it "does not report if node was in ready, and trying to set is deploying" do
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'ready'}]}
msg2 = {'nodes' => [{'uid' => 2, 'status' => 'ready'}]}
msg3 = {'nodes' => [{'uid' => 1, 'status' => 'deploying', 'progress' => 100}]}
@up_reporter.expects(:report).with(msg1)
@up_reporter.expects(:report).with(msg2)
@up_reporter.expects(:report).never
@reporter.report(msg1)
@reporter.report(msg2)
5.times { @reporter.report(msg3) }
end
it "reports even not all keys provided" do
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'deploying'}]}
msg2 = {'nodes' => [{'uid' => 2, 'status' => 'ready'}]}
@up_reporter.expects(:report).with(msg1)
@up_reporter.expects(:report).with(msg2)
@reporter.report(msg1)
@reporter.report(msg2)
end
it "raises exception if progress provided and no status" do
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'ready'}]}
msg2 = {'nodes' => [{'uid' => 1, 'progress' => 100}]}
@up_reporter.expects(:report).with(msg1)
@up_reporter.expects(:report).never
@reporter.report(msg1)
lambda {@reporter.report(msg2)}.should raise_error
end
it "raises exception if status of node is not supported" do
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'hah'}]}
@up_reporter.expects(:report).never
lambda {@reporter.report(msg1)}.should raise_error
end
it "some other attrs are valid and passed" do
msg1 = {'nodes' => [{'uid' => 1, 'status' => 'deploying'}]}
msg2 = {'status' => 'error', 'error_type' => 'deploy',
'nodes' => [{'uid' => 2, 'status' => 'error', 'message' => 'deploy'}]}
@up_reporter.expects(:report).with(msg1)
@up_reporter.expects(:report).with(msg2)
@reporter.report(msg1)
@reporter.report(msg2)
end
it "reports if status is greater" do
msgs = [{'nodes' => [{'uid' => 1, 'status' => 'provisioned'}]},
{'nodes' => [{'uid' => 1, 'status' => 'provisioning'}]},
{'nodes' => [{'uid' => 1, 'status' => 'ready'}]},
{'nodes' => [{'uid' => 1, 'status' => 'error'}]}]
@up_reporter.expects(:report).with(msgs[0])
@up_reporter.expects(:report).with(msgs[2])
@up_reporter.expects(:report).with(msgs[3])
msgs.each {|msg| @reporter.report(msg)}
end
end
end

View File

@ -0,0 +1,57 @@
#!/usr/bin/env rspec
require File.join(File.dirname(__FILE__), "..", "spec_helper")
describe "SimplePuppet DeploymentEngine" do
context "When deploy is called, " do
before(:each) do
@ctx = mock
@ctx.stubs(:task_id)
@ctx.stubs(:deploy_log_parser).returns(Astute::LogParser::NoParsing.new)
@reporter = mock('reporter')
@reporter.stub_everything
@ctx.stubs(:reporter).returns(Astute::ProxyReporter.new(@reporter))
@deploy_engine = Astute::DeploymentEngine::SimplePuppet.new(@ctx)
@env = YAML.load_file(File.join(File.dirname(__FILE__), "..", "..", "examples", "no_attrs.yaml"))
end
it "it should call valid method depends on attrs" do
nodes = [{'uid' => 1}]
attrs = {'deployment_mode' => 'ha_compute'}
@deploy_engine.expects(:attrs_ha_compute).never # It is not supported in SimplePuppet
@deploy_engine.expects(:deploy_ha_compute).with(nodes, attrs)
# All implementations of deploy_piece go to subclasses
@deploy_engine.respond_to?(:deploy_piece).should be_true
@deploy_engine.deploy(nodes, attrs)
end
it "it should raise an exception if deployment mode is unsupported" do
nodes = [{'uid' => 1}]
attrs = {'deployment_mode' => 'unknown'}
expect {@deploy_engine.deploy(nodes, attrs)}.to raise_exception(
/Method deploy_unknown is not implemented/)
end
it "multinode_compute deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "multinode_compute"
Astute::Metadata.expects(:publish_facts).never # It is not supported in SimplePuppet
# we got two calls, one for controller, and another for all computes
Astute::PuppetdDeployer.expects(:deploy).twice
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
it "ha_compute deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "ha_compute"
Astute::Metadata.expects(:publish_facts).never
Astute::PuppetdDeployer.expects(:deploy).times(5)
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
it "singlenode_compute deploy should not raise any exception" do
@env['attributes']['deployment_mode'] = "singlenode_compute"
@env['nodes'] = [@env['nodes'][0]] # We have only one node in singlenode
Astute::Metadata.expects(:publish_facts).never
Astute::PuppetdDeployer.expects(:deploy).once # one call for one node
@deploy_engine.deploy(@env['nodes'], @env['attributes'])
end
end
end

View File

@ -149,6 +149,7 @@ module MCollective
action "last_run_summary" do
last_run_summary
set_status
end
action "enable" do
@ -169,9 +170,20 @@ module MCollective
private
def last_run_summary
summary = YAML.load_file(@last_summary)
# wrap into begin..rescue: fixes PRD-252
begin
summary = YAML.load_file(@last_summary)
rescue
summary = {}
end
reply[:resources] = {"failed"=>0, "changed"=>0, "total"=>0, "restarted"=>0, "out_of_sync"=>0}.merge(summary["resources"])
# It should be empty hash, if 'resources' key is not defined, because otherwise merge will fail with TypeError
summary["resources"] ||= {}
# Astute relies on last_run, so we must set last_run
summary["time"] ||= {}
summary["time"]["last_run"] ||= 0
# if 'failed' is not provided, it means something is wrong. So default value is 1.
reply[:resources] = {"failed"=>1, "changed"=>0, "total"=>0, "restarted"=>0, "out_of_sync"=>0}.merge(summary["resources"])
["time", "events", "changes", "version"].each do |dat|
reply[dat.to_sym] = summary[dat]
@ -186,7 +198,8 @@ module MCollective
reply[:stopped] = reply[:status] == 'stopped' ? 1 : 0
reply[:lastrun] = 0
reply[:lastrun] = File.stat(@statefile).mtime.to_i if File.exists?(@statefile)
reply[:output] = "Currently #{reply[:status]}; last completed run #{Time.now.to_i - reply[:lastrun]} seconds ago"
reply[:runtime] = Time.now.to_i - reply[:lastrun]
reply[:output] = "Currently #{reply[:status]}; last completed run #{reply[:runtime]} seconds ago"
end
def puppet_daemon_status
@ -241,7 +254,7 @@ module MCollective
end
def runonce_background
cmd = [@puppetd, "--test"]
cmd = [@puppetd, "--onetime", "--logdest", 'syslog']
unless request[:forcerun]
if @splaytime && @splaytime > 0

View File

@ -154,6 +154,7 @@ module MCollective
action "last_run_summary" do
last_run_summary
set_status
end
action "enable" do
@ -174,9 +175,20 @@ module MCollective
private
def last_run_summary
summary = YAML.load_file(@last_summary)
# wrap into begin..rescue: fixes PRD-252
begin
summary = YAML.load_file(@last_summary)
rescue
summary = {}
end
reply[:resources] = {"failed"=>0, "changed"=>0, "total"=>0, "restarted"=>0, "out_of_sync"=>0}.merge(summary["resources"])
# It should be empty hash, if 'resources' key is not defined, because otherwise merge will fail with TypeError
summary["resources"] ||= {}
# Astute relies on last_run, so we must set last_run
summary["time"] ||= {}
summary["time"]["last_run"] ||= 0
# if 'failed' is not provided, it means something is wrong. So default value is 1.
reply[:resources] = {"failed"=>1, "changed"=>0, "total"=>0, "restarted"=>0, "out_of_sync"=>0}.merge(summary["resources"])
["time", "events", "changes", "version"].each do |dat|
reply[dat.to_sym] = summary[dat]
@ -191,7 +203,8 @@ module MCollective
reply[:stopped] = reply[:status] == 'stopped' ? 1 : 0
reply[:lastrun] = 0
reply[:lastrun] = File.stat(@statefile).mtime.to_i if File.exists?(@statefile)
reply[:output] = "Currently #{reply[:status]}; last completed run #{Time.now.to_i - reply[:lastrun]} seconds ago"
reply[:runtime] = Time.now.to_i - reply[:lastrun]
reply[:output] = "Currently #{reply[:status]}; last completed run #{reply[:runtime]} seconds ago"
end
def puppet_daemon_status
@ -246,7 +259,7 @@ module MCollective
end
def runonce_background
cmd = [@puppetd, "--test"]
cmd = [@puppetd, "--onetime", "--logdest", 'syslog']
unless request[:forcerun]
if @splaytime && @splaytime > 0

View File

@ -1,172 +1,185 @@
module MCollective
module Agent
# An agent to manage the Puppet Daemon
#
# Configuration Options:
# puppetd.splaytime - Number of seconds within which to splay; no splay
# by default
# puppetd.statefile - Where to find the state.yaml file; defaults to
# /var/lib/puppet/state/state.yaml
# puppetd.lockfile - Where to find the lock file; defaults to
# /var/lib/puppet/state/puppetdlock
# puppetd.puppetd - Where to find the puppet agent binary; defaults to
# /usr/bin/puppet agent
# puppetd.summary - Where to find the summary file written by Puppet
# 2.6.8 and newer; defaults to
# /var/lib/puppet/state/last_run_summary.yaml
# puppetd.pidfile - Where to find puppet agent's pid file; defaults to
# /var/run/puppet/agent.pid
class Puppetd<RPC::Agent
def startup_hook
@splaytime = @config.pluginconf["puppetd.splaytime"].to_i || 0
@lockfile = @config.pluginconf["puppetd.lockfile"] || "/var/lib/puppet/state/puppetdlock"
@statefile = @config.pluginconf["puppetd.statefile"] || "/var/lib/puppet/state/state.yaml"
@pidfile = @config.pluginconf["puppet.pidfile"] || "/var/run/puppet/agent.pid"
@puppetd = @config.pluginconf["puppetd.puppetd"] || "/usr/bin/puppet agent"
@last_summary = @config.pluginconf["puppet.summary"] || "/var/lib/puppet/state/last_run_summary.yaml"
end
action "last_run_summary" do
last_run_summary
end
action "enable" do
enable
end
action "disable" do
disable
end
action "runonce" do
runonce
end
action "status" do
set_status
end
private
def last_run_summary
summary = YAML.load_file(@last_summary)
reply[:resources] = {"failed"=>0, "changed"=>0, "total"=>0, "restarted"=>0, "out_of_sync"=>0}.merge(summary["resources"])
["time", "events", "changes", "version"].each do |dat|
reply[dat.to_sym] = summary[dat]
end
end
def set_status
reply[:status] = puppet_daemon_status
reply[:running] = reply[:status] == 'running' ? 1 : 0
reply[:enabled] = reply[:status] == 'disabled' ? 0 : 1
reply[:idling] = reply[:status] == 'idling' ? 1 : 0
reply[:stopped] = reply[:status] == 'stopped' ? 1 : 0
reply[:lastrun] = 0
reply[:lastrun] = File.stat(@statefile).mtime.to_i if File.exists?(@statefile)
reply[:output] = "Currently #{reply[:status]}; last completed run #{Time.now.to_i - reply[:lastrun]} seconds ago"
end
def puppet_daemon_status
locked = File.exists?(@lockfile)
disabled = locked && File::Stat.new(@lockfile).zero?
has_pid = File.exists?(@pidfile)
return 'disabled' if disabled
return 'running' if locked && has_pid
return 'idling' if ! locked && has_pid
return 'stopped' if ! has_pid
end
def runonce
set_status
case (reply[:status])
when 'disabled' then # can't run
reply.fail "Empty Lock file exists; puppet agent is disabled."
when 'running' then # can't run two simultaniously
reply.fail "Lock file and PID file exist; puppet agent is running."
when 'idling' then # signal daemon
pid = File.read(@pidfile)
if pid !~ /^\d+$/
reply.fail "PID file does not contain a PID; got #{pid.inspect}"
else
begin
::Process.kill(0, Integer(pid)) # check that pid is alive
# REVISIT: Should we add an extra round of security here, and
# ensure that the PID file is securely owned, or that the target
# process looks like Puppet? Otherwise a malicious user could
# theoretically signal arbitrary processes with this...
begin
::Process.kill("USR1", Integer(pid))
reply[:output] = "Signalled daemonized puppet agent to run (process #{Integer(pid)}); " + (reply[:output] || '')
rescue Exception => e
reply.fail "Failed to signal the puppet agent daemon (process #{pid}): #{e}"
end
rescue Errno::ESRCH => e
# PID is invalid, run puppet onetime as usual
runonce_background
end
end
when 'stopped' then # just run
runonce_background
else
reply.fail "Unknown puppet agent status: #{reply[:status]}"
end
end
def runonce_background
cmd = [@puppetd, "--test"]
unless request[:forcerun]
if @splaytime && @splaytime > 0
cmd << "--splaylimit" << @splaytime << "--splay"
end
end
cmd = cmd.join(" ")
output = reply[:output] || ''
run(cmd, :stdout => :output, :chomp => true)
reply[:output] = "Called #{cmd}, " + output + (reply[:output] || '')
end
def enable
if File.exists?(@lockfile)
stat = File::Stat.new(@lockfile)
if stat.zero?
File.unlink(@lockfile)
reply[:output] = "Lock removed"
else
reply[:output] = "Currently running; can't remove lock"
end
else
reply.fail "Already enabled"
end
end
def disable
if File.exists?(@lockfile)
stat = File::Stat.new(@lockfile)
stat.zero? ? reply.fail("Already disabled") : reply.fail("Currently running; can't remove lock")
else
begin
File.open(@lockfile, "w") { |file| }
reply[:output] = "Lock created"
rescue Exception => e
reply.fail "Could not create lock: #{e}"
end
end
end
end
end
end
# vi:tabstop=2:expandtab:ai:filetype=ruby
module MCollective
module Agent
# An agent to manage the Puppet Daemon
#
# Configuration Options:
# puppetd.splaytime - Number of seconds within which to splay; no splay
# by default
# puppetd.statefile - Where to find the state.yaml file; defaults to
# /var/lib/puppet/state/state.yaml
# puppetd.lockfile - Where to find the lock file; defaults to
# /var/lib/puppet/state/puppetdlock
# puppetd.puppetd - Where to find the puppet agent binary; defaults to
# /usr/bin/puppet agent
# puppetd.summary - Where to find the summary file written by Puppet
# 2.6.8 and newer; defaults to
# /var/lib/puppet/state/last_run_summary.yaml
# puppetd.pidfile - Where to find puppet agent's pid file; defaults to
# /var/run/puppet/agent.pid
class Puppetd<RPC::Agent
def startup_hook
@splaytime = @config.pluginconf["puppetd.splaytime"].to_i || 0
@lockfile = @config.pluginconf["puppetd.lockfile"] || "/var/lib/puppet/state/puppetdlock"
@statefile = @config.pluginconf["puppetd.statefile"] || "/var/lib/puppet/state/state.yaml"
@pidfile = @config.pluginconf["puppet.pidfile"] || "/var/run/puppet/agent.pid"
@puppetd = @config.pluginconf["puppetd.puppetd"] || "/usr/bin/puppet agent"
@last_summary = @config.pluginconf["puppet.summary"] || "/var/lib/puppet/state/last_run_summary.yaml"
end
action "last_run_summary" do
last_run_summary
set_status
end
action "enable" do
enable
end
action "disable" do
disable
end
action "runonce" do
runonce
end
action "status" do
set_status
end
private
def last_run_summary
# wrap into begin..rescue: fixes PRD-252
begin
summary = YAML.load_file(@last_summary)
rescue
summary = {}
end
# It should be empty hash, if 'resources' key is not defined, because otherwise merge will fail with TypeError
summary["resources"] ||= {}
# Astute relies on last_run, so we must set last_run
summary["time"] ||= {}
summary["time"]["last_run"] ||= 0
# if 'failed' is not provided, it means something is wrong. So default value is 1.
reply[:resources] = {"failed"=>1, "changed"=>0, "total"=>0, "restarted"=>0, "out_of_sync"=>0}.merge(summary["resources"])
["time", "events", "changes", "version"].each do |dat|
reply[dat.to_sym] = summary[dat]
end
end
def set_status
reply[:status] = puppet_daemon_status
reply[:running] = reply[:status] == 'running' ? 1 : 0
reply[:enabled] = reply[:status] == 'disabled' ? 0 : 1
reply[:idling] = reply[:status] == 'idling' ? 1 : 0
reply[:stopped] = reply[:status] == 'stopped' ? 1 : 0
reply[:lastrun] = 0
reply[:lastrun] = File.stat(@statefile).mtime.to_i if File.exists?(@statefile)
reply[:runtime] = Time.now.to_i - reply[:lastrun]
reply[:output] = "Currently #{reply[:status]}; last completed run #{reply[:runtime]} seconds ago"
end
def puppet_daemon_status
locked = File.exists?(@lockfile)
disabled = locked && File::Stat.new(@lockfile).zero?
has_pid = File.exists?(@pidfile)
return 'disabled' if disabled
return 'running' if locked && has_pid
return 'idling' if ! locked && has_pid
return 'stopped' if ! has_pid
end
def runonce
set_status
case (reply[:status])
when 'disabled' then # can't run
reply.fail "Empty Lock file exists; puppet agent is disabled."
when 'running' then # can't run two simultaniously
reply.fail "Lock file and PID file exist; puppet agent is running."
when 'idling' then # signal daemon
pid = File.read(@pidfile)
if pid !~ /^\d+$/
reply.fail "PID file does not contain a PID; got #{pid.inspect}"
else
begin
::Process.kill(0, Integer(pid)) # check that pid is alive
# REVISIT: Should we add an extra round of security here, and
# ensure that the PID file is securely owned, or that the target
# process looks like Puppet? Otherwise a malicious user could
# theoretically signal arbitrary processes with this...
begin
::Process.kill("USR1", Integer(pid))
reply[:output] = "Signalled daemonized puppet agent to run (process #{Integer(pid)}); " + (reply[:output] || '')
rescue Exception => e
reply.fail "Failed to signal the puppet agent daemon (process #{pid}): #{e}"
end
rescue Errno::ESRCH => e
# PID is invalid, run puppet onetime as usual
runonce_background
end
end
when 'stopped' then # just run
runonce_background
else
reply.fail "Unknown puppet agent status: #{reply[:status]}"
end
end
def runonce_background
cmd = [@puppetd, "--onetime", "--logdest", 'syslog']
unless request[:forcerun]
if @splaytime && @splaytime > 0
cmd << "--splaylimit" << @splaytime << "--splay"
end
end
cmd = cmd.join(" ")
output = reply[:output] || ''
run(cmd, :stdout => :output, :chomp => true)
reply[:output] = "Called #{cmd}, " + output + (reply[:output] || '')
end
def enable
if File.exists?(@lockfile)
stat = File::Stat.new(@lockfile)
if stat.zero?
File.unlink(@lockfile)
reply[:output] = "Lock removed"
else
reply[:output] = "Currently running; can't remove lock"
end
else
reply.fail "Already enabled"
end
end
def disable
if File.exists?(@lockfile)
stat = File::Stat.new(@lockfile)
stat.zero? ? reply.fail("Already disabled") : reply.fail("Currently running; can't remove lock")
else
begin
File.open(@lockfile, "w") { |file| }
reply[:output] = "Lock created"
rescue Exception => e
reply.fail "Could not create lock: #{e}"
end
end
end
end
end
end
# vi:tabstop=2:expandtab:ai:filetype=ruby

View File

@ -18,7 +18,7 @@ class CobblerCase(CobblerTestCase):
remote = self.nodes().stomps[0].remote('public', login='root',
password='r00tme')
write_config(remote, '/tmp/nodes.yaml', config_text)
remote.check_stderr('astute_run /tmp/nodes.yaml')
remote.check_stderr('astute_run -v -f /tmp/nodes.yaml')
def test_orchestrating_simple(self):
Manifest().write_openstack_simple_manifest(
@ -30,7 +30,7 @@ class CobblerCase(CobblerTestCase):
remote = self.nodes().stomps[0].remote('public', login='root',
password='r00tme')
write_config(remote, '/tmp/nodes.yaml', config_text)
remote.check_stderr('astute_run /tmp/nodes.yaml')
remote.check_stderr('astute_run -v -f /tmp/nodes.yaml')
def test_orchestrating_compact(self):
Manifest().write_openstack_manifest(
@ -44,7 +44,7 @@ class CobblerCase(CobblerTestCase):
remote = self.nodes().stomps[0].remote('public', login='root',
password='r00tme')
write_config(remote, '/tmp/nodes.yaml', config_text)
remote.check_stderr('astute_run /tmp/nodes.yaml')
remote.check_stderr('astute_run -v -f /tmp/nodes.yaml')
def test_orchestrating_full(self):
Manifest().write_openstack_manifest(
@ -62,7 +62,7 @@ class CobblerCase(CobblerTestCase):
remote = self.nodes().stomps[0].remote('public', login='root',
password='r00tme')
write_config(remote, '/tmp/nodes.yaml', config_text)
remote.check_stderr('astute_run /tmp/nodes.yaml')
remote.check_stderr('astute_run -v -f /tmp/nodes.yaml')
if __name__ == '__main__':