Update makesystem for fuel-main repo

This commit is contained in:
Dmitry Pyzhov 2013-08-29 21:26:17 +04:00
parent 8634dd38e9
commit 1e80bfc7bd
378 changed files with 650 additions and 71166 deletions

6
.gitmodules vendored
View File

@ -1,6 +0,0 @@
[submodule "fuel"]
path = fuel
url = https://github.com/Mirantis/fuel.git
[submodule "astute"]
path = astute
url = https://github.com/Mirantis/astute.git

View File

@ -72,6 +72,7 @@ include $(SOURCE_DIR)/rules.mk
include $(SOURCE_DIR)/sandbox.mk
# Modules
include $(SOURCE_DIR)/repos.mk
include $(SOURCE_DIR)/mirror/module.mk
include $(SOURCE_DIR)/packages/module.mk
include $(SOURCE_DIR)/bootstrap/module.mk

80
Vagrantfile vendored
View File

@ -1,80 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
ENVIRONMENT_SETUP_SCRIPT = <<-EOS
# To use this script, you must fetch fuel submodule:
# git submodule update
#!/bin/bash
grep -q devnailgun /etc/hosts || echo "10.0.2.15 devnailgun.mirantis.com devnailgun" >> /etc/hosts
sed 's/HOSTNAME=.*/HOSTNAME=devnailgun.mirantis.com/' -i /etc/sysconfig/network
echo "Installing puppet..."
rpm -Uhv http://fedora-mirror02.rbc.ru/pub/epel/6/i386/epel-release-6-8.noarch.rpm
rpm -ivh http://yum.puppetlabs.com/el/6/products/i386/puppetlabs-release-6-6.noarch.rpm
for pkg in `grep puppet /vagrant/requirements-rpm.txt`; do yum -y install $pkg; done
echo "Configuring puppet..."
grep -q devnailgun /etc/puppet/puppet.conf || echo " server = devnailgun.mirantis.com" >> /etc/puppet/puppet.conf
grep -q autosign /etc/puppet/puppet.conf || echo "\n[master]\n autosign = true" >> /etc/puppet/puppet.conf
chkconfig puppetmaster on; service puppetmaster restart
echo "Use fuel puppet modules to install mcollective&rabbitmq"
rm -f /etc/puppet/modules.old || :
mv /etc/puppet/modules /etc/puppet/modules.old || :
ln -sfT /fuel/deployment/puppet /etc/puppet/modules
mv /etc/puppet/manifests/site.pp /etc/puppet/manifests/site.pp.old || :
cat > /etc/puppet/manifests/site.pp << EOF
node default {
Exec {path => '/usr/bin:/bin:/usr/sbin:/sbin'}
class { mcollective::rabbitmq:
user => "mcollective",
password => "marionette",
}
class { mcollective::client:
pskey => "unset",
user => "mcollective",
password => "marionette",
host => "127.0.0.1",
port => "61613"
}
}
EOF
puppet agent --test
echo "Restoring site.pp and modules to previously set.."
mv /etc/puppet/modules.old /etc/puppet/modules || :
mv /etc/puppet/manifests/site.pp.old /etc/puppet/manifests/site.pp || :
echo "Installing mcollective..."
for pkg in `grep mcollective /vagrant/requirements-rpm.txt`; do yum -y install $pkg; done
chkconfig mcollective on
service mcollective start
# Debug tools
yum -y install strace bind-utils
yum -y install httpd
EOS
Vagrant::Config.run do |config|
config.vm.define :centos63 do |vm_config|
vm_config.vm.box = "centos63"
vm_config.vm.box_url = "http://srv08-srt.srt.mirantis.net/CentOS-6.3-x86_64-minimal.box"
vm_config.vm.customize ["modifyvm", :id, "--memory", 1024]
# Boot with a GUI so you can see the screen. (Default is headless)
#config.vm.boot_mode = :gui
config.vm.share_folder "v-data", "/fuel", "./fuel"
# extra network for testing
vm_config.vm.network :hostonly, '10.1.1.2', :adapter => 2
vm_config.vm.provision :shell, :inline => ENVIRONMENT_SETUP_SCRIPT
end
end

1
astute

@ -1 +0,0 @@
Subproject commit 861af62ba2c56a389bda9893e30d2a084c10d746

463
bin/agent
View File

@ -1,463 +0,0 @@
#!/usr/bin/env ruby
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
begin
require 'rubygems'
rescue LoadError
end
require 'ohai/system'
require 'json'
require 'httpclient'
require 'logger'
require 'optparse'
require 'yaml'
require 'ipaddr'
require 'rethtool'
unless Process.euid == 0
puts "You must be root"
exit 1
end
ENV['PATH'] = "/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/bin:/usr/local/sbin"
AGENT_CONFIG = "/etc/nailgun-agent/config.yaml"
class McollectiveConfig
def initialize(logger)
@logger = logger
@configfile = '/etc/mcollective/server.cfg'
end
def get_config_by_key(find_key)
found_key = nil
found_value = nil
# This code is from mcollective's sources
File.open(@configfile, "r").each do |line|
# strip blank spaces, tabs etc off the end of all lines
line.gsub!(/\s*$/, "")
unless line =~ /^#|^$/
if line =~ /(.+?)\s*=\s*(.+)/
key = $1
val = $2
if key == find_key
found_key = key
found_value = val
end
end
end
end
found_value if found_key
end
def replace_identity(new_id)
# check if id complies reqs
raise 'Identities can only match /\w\.\-/' unless new_id.to_s.match(/^[\w\.\-]+$/)
value_from_config = get_config_by_key('identity')
if value_from_config == new_id.to_s
@logger.info "MCollective is up to date with identity = #{new_id}"
else
config = File.open(@configfile, "rb").read
if value_from_config
# Key found, but it has other value
@logger.info "Replacing identity in mcollective server.cfg to new value = '#{new_id}'"
config.gsub!(/^identity[ =].*$/, "identity = #{new_id}")
File.open(@configfile, "w") { |f| f.write(config) }
else # if key was not found
config += "\nidentity = #{new_id}\n"
@logger.info "Identity in mcollective server.cfg has not been found. Setting to '#{new_id}'"
File.open(@configfile, "w") { |f| f.write(config) }
end
# Generally because generic init script for
# mcollective is broken at least in Ubuntu and
# altering restart in such way seems better
# than shipping our own package.
puts `/etc/init.d/mcollective stop; /etc/init.d/mcollective start`
end
end
end
class NodeAgent
def initialize(logger, url=nil)
@logger = logger
@api_default_address = "localhost"
@api_default_port = "8000"
@api_url = url
if @api_url
@api_url.chomp!('/')
@api_ip = @api_url.match(/\bhttp:\/\/((\d{1,3}\.){3}\d{1,3})/)[1]
else
begin
cmdline = ::File.read("/proc/cmdline")
@api_ip = cmdline.match(/\burl=http:\/\/((\d{1,3}\.){3}\d{1,3})/)[1]
@logger.info("Found admin node IP address in kernel cmdline: #{@api_ip}")
rescue
@logger.info("Can't get API url from /proc/cmdline. Will use localhost.")
@api_ip = "127.0.0.1"
end
@api_url = "http://#{@api_ip}:#{@api_default_port}/api"
end
@os = Ohai::System.new()
@os.all_plugins
end
def put
headers = {"Content-Type" => "application/json"}
@logger.debug("Trying to put host info into #{@api_url}")
res = htclient.put("#{@api_url}/nodes/", [_data].to_json, headers)
if res.status < 200 or res.status >= 300
@logger.error("HTTP PUT failed: #{res.inspect}")
end
res
end
def post
headers = {"Content-Type" => "application/json"}
@logger.debug("Trying to create host using #{@api_url}")
res = htclient.post("#{@api_url}/nodes/", _data.to_json, headers)
res
end
def htclient
client = HTTPClient.new
client.connect_timeout = 10
client.send_timeout = 10
client.receive_timeout = 10 # (mihgen): Nailgun may hang for a while, but 10sec should be enough for him to respond
client
end
def _interfaces
interfaces = @os[:network][:interfaces].inject([]) do |result, elm|
result << { :name => elm[0], :addresses => elm[1]["addresses"] }
end
interfaces << { "default_interface" => @os["network"]["default_interface"] }
interfaces << { "default_gateway" => @os["network"]["default_gateway"] }
interfaces
end
def _detailed
detailed_meta = {
:system => _system_info,
:interfaces => [],
:cpu => {
:total => (@os[:cpu][:total].to_i rescue nil),
:real => (@os[:cpu][:real].to_i rescue nil),
:spec => [],
},
:disks => [],
:memory => (_dmi_memory or _ohai_memory),
}
begin
(@os[:network][:interfaces] or {} rescue {}).each do |int, intinfo|
next if int =~ /^lo[:]?/
next if int =~ /^vir/
next if int =~ /^vnet/
next if int =~ /^br/
next if int =~ /\d+\.\d+$/
int_meta = {:name => int}
(intinfo[:addresses] or {} rescue {}).each do |addr, addrinfo|
if (addrinfo[:family] rescue nil) =~ /lladdr/
int_meta[:mac] = addr
begin
int_info = Rethtool::InterfaceSettings.new(int)
int_meta[:max_speed] = int_info.best_mode.speed
if int_info.current_mode.speed == :unknown
int_meta[:current_speed] = nil
else
int_meta[:current_speed] = int_info.current_mode.speed
end
rescue
int_meta[:current_speed] = nil
end
elsif (addrinfo[:family] rescue nil) =~ /^inet$/
int_meta[:ip] = addr
int_meta[:netmask] = addrinfo[:netmask] if addrinfo[:netmask]
end
end
detailed_meta[:interfaces] << int_meta
end
rescue Exception => e
@logger.error("Error '#{e.message}' in gathering interfaces metadata: #{e.backtrace}")
end
begin
(@os[:cpu] or {} rescue {}).each do |cpu, cpuinfo|
if cpu =~ /^[\d]+/ and cpuinfo
frequency = cpuinfo[:mhz].to_i rescue nil
begin
# ohai returns current frequency, try to get max if possible
max_frequency = `cat /sys/devices/system/cpu/cpu#{cpu}/cpufreq/cpuinfo_max_freq 2>/dev/null`.to_i / 1000
frequency = max_frequency if max_frequency > 0
rescue
end
detailed_meta[:cpu][:spec] << {
:frequency => frequency,
:model => (cpuinfo[:model_name].gsub(/ +/, " ") rescue nil)
}
end
end
rescue Exception => e
@logger.error("Error '#{e.message}' in gathering cpu metadata: #{e.backtrace}")
end
begin
(@os[:block_device] or {} rescue {}).each do |bname, binfo|
if /^(sd|vd|hd|cciss).+$/ =~ bname and binfo
dname = bname.gsub(/!/, '/')
# 512 bytes is the size of one sector by default
block_size = 512
fn = "/sys/block/#{bname}/queue/logical_block_size"
block_size = File.read(fn).to_i if File.exist? fn
block_size = 512 if block_size == 0
detailed_meta[:disks] << {
:name => dname,
:model => binfo[:model],
:size => (binfo[:size].to_i * block_size),
:disk => _disk_path_by_name(dname) || dname
}
end
end
rescue Exception => e
@logger.error("Error '#{e.message}' in gathering disks metadata: #{e.backtrace}")
end
detailed_meta
end
def _disk_path_by_name(name)
dn = "/dev/disk/by-path"
basepath = Dir["#{dn}/**?"].find{|f| /\/#{name}$/.match(File.readlink(f))}
basepath.split("/")[2..-1].join("/") if basepath
end
def _is_virtualbox
@os[:dmi][:system][:product_name] == "VirtualBox" rescue false
end
def _is_virtual
_is_virtualbox or @os[:virtualization][:role] == "guest" rescue false
end
def _manufacturer
if _is_virtualbox
@os[:dmi][:system][:product_name] rescue nil
elsif _is_virtual
@os[:virtualization][:system].upcase.strip rescue nil
else
@os[:dmi][:system][:manufacturer].strip rescue nil
end
end
def _product_name
unless _is_virtual
@os[:dmi][:system][:product_name].strip rescue nil
end
end
def _serial
@os[:dmi][:system][:serial_number].strip rescue nil
end
def _system_info
{
:manufacturer => _manufacturer,
:serial => _serial,
:product => _product_name,
:family => (@os[:dmi][:system][:family].strip rescue nil),
:version => (@os[:dmi][:system][:version].strip rescue nil),
:fqdn => (@os[:fqdn].strip rescue @os[:hostname].strip rescue nil),
}.delete_if { |key, value| value.nil? or value.empty? or value == "Not Specified" }
end
def _size(size, unit)
case unit
when /^kb$/i
size * 1024
when /^mb$/i
size * 1048576
when /^gb$/i
size * 1073741824
end
end
def _dmi_memory
dmi = `/usr/sbin/dmidecode`
info = {:devices => [], :total => 0, :maximum_capacity => 0, :slots => 0}
return nil if $?.to_i != 0
dmi.split(/\n\n/).each do |group|
if /^Physical Memory Array$/.match(group)
if /^\s*Maximum Capacity:\s+(\d+)\s+(mb|gb|kb)/i.match(group)
info[:maximum_capacity] += _size($1.to_i, $2)
end
if /^\s*Number Of Devices:\s+(\d+)/i.match(group)
info[:slots] += $1.to_i
end
elsif /^Memory Device$/.match(group)
device_info = {}
if /^\s*Size:\s+(\d+)\s+(mb|gb|kb)/i.match(group)
size = _size($1.to_i, $2)
device_info[:size] = size
info[:total] += size
else
next
end
if /^\s*Speed:\s+(\d+)\s+MHz/i.match(group)
device_info[:frequency] = $1.to_i
end
if /^\s*Type:\s+(.*?)$/i.match(group)
device_info[:type] = $1
end
#if /^\s*Locator:\s+(.*?)$/i.match(group)
# device_info[:locator] = $1
#end
info[:devices].push(device_info)
end
end
if info[:total] == 0
nil
else
info
end
end
def _ohai_memory
info = {}
size = @os['memory']['total'].gsub(/(kb|mb|gb)$/i, "").to_i rescue (return nil)
info[:total] = _size(size, $1)
info
end
def _master_ip detailed_data
detailed_data.each do |k, v|
if k.to_s =~ /^interfaces$/
detailed_data[k].each do |i|
begin
net = IPAddr.new "#{i[:ip]}/#{i[:netmask]}"
return i[:ip] if net.include? @api_ip
rescue
end
end
end
end
nil
end
def _data
res = {
:mac => (@os[:macaddress] rescue nil),
:ip => (@os[:ipaddress] rescue nil),
:os_platform => (@os[:platform] rescue nil)
}
begin
detailed_data = _detailed
res.merge!({
:ip => ((_master_ip detailed_data or @os[:ipaddress]) rescue nil),
:manufacturer => _manufacturer,
:platform_name => _product_name,
:meta => detailed_data
})
rescue Exception => e
@logger.error("Error '#{e.message}' in metadata calculation: #{e.backtrace}")
end
res[:status] = @node_state if @node_state
res[:is_agent] = true
res
end
def update_state
@node_state = nil
if File.exist?("/etc/nailgun_systemtype")
fl = File.open("/etc/nailgun_systemtype", "r")
system_type = fl.readline.rstrip
@node_state = "discover" if system_type == "bootstrap"
end
end
def prevent_discover?
File.exist?('/var/run/nodiscover')
end
end
def write_data_to_file(logger, filename, data)
if File.exist?(filename)
File.open(filename, 'r') do |fo|
text = fo.read
end
else
text = ''
end
if text != data
begin
File.open(filename, 'w') do |fo|
fo.write(data)
end
logger.info("Wrote data to file '#{filename}'. Data: #{data}")
rescue Exception => e
logger.warning("Can't write data to file '#{filename}'. Reason: #{e.message}")
end
else
logger.info("File '#{filename}' is up to date.")
end
end
logger = Logger.new(STDOUT)
logger.level = Logger::DEBUG
begin
logger.info("Trying to load agent config #{AGENT_CONFIG}")
url = YAML.load_file(AGENT_CONFIG)['url']
logger.info("Obtained service url from config file: '#{url}'")
rescue Exception => e
logger.info("Could not get url from configuration file: #{e.message}, trying other ways..")
end
agent = NodeAgent.new(logger, url)
if agent.prevent_discover?
logger.info("Discover prevented by /var/run/nodiscover presence.")
exit 1
end
agent.update_state
begin
post_res = agent.post
if post_res.status == 409
put_res = agent.put
new_id = JSON.parse(put_res.body)[0]['id']
elsif post_res.status == 201
new_id = JSON.parse(post_res.body)['id']
else
logger.error post_res.body
exit 1
end
mc_config = McollectiveConfig.new(logger)
mc_config.replace_identity(new_id)
write_data_to_file(logger, '/etc/nailgun_uid', new_id.to_s)
rescue => ex
# NOTE(mihgen): There is no need to retry - cron will do it for us
logger.error "#{ex.message}\n#{ex.backtrace}"
end

View File

@ -1 +0,0 @@
* * * * * root flock -w 0 -o /var/lock/agent.lock -c "/opt/nailgun/bin/agent >> /var/log/nailgun-agent.log 2>&1"

View File

@ -1,498 +0,0 @@
#!/usr/bin/python
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import signal
import string
import re
import json
import time
import logging
from logging.handlers import SysLogHandler
from optparse import OptionParser, OptionGroup
# Add syslog levels to logging module.
logging.NOTICE = 25
logging.ALERT = 60
logging.EMERG = 70
logging.addLevelName(logging.NOTICE, 'NOTICE')
logging.addLevelName(logging.ALERT, 'ALERT')
logging.addLevelName(logging.EMERG, 'EMERG')
SysLogHandler.priority_map['NOTICE'] = 'notice'
SysLogHandler.priority_map['ALERT'] = 'alert'
SysLogHandler.priority_map['EMERG'] = 'emerg'
# Define data and message format according to RFC 5424.
rfc5424_format = '{version} {timestamp} {hostname} {appname} {procid}'\
' {msgid} {structured_data} {msg}'
date_format = '%Y-%m-%dT%H:%M:%SZ'
# Define global semaphore.
sending_in_progress = 0
# Define file types.
msg_levels = {'ruby': {'regex': '(?P<level>[DIWEF]), \[[0-9-]{10}T',
'levels': {'D': logging.DEBUG,
'I': logging.INFO,
'W': logging.WARNING,
'E': logging.ERROR,
'F': logging.FATAL
}
},
'syslog': {'regex': ('[0-9-]{10}T[0-9:]{8}Z (?P<level>'
'debug|info|notice|warning|err|crit|'
'alert|emerg)'),
'levels': {'debug': logging.DEBUG,
'info': logging.INFO,
'notice': logging.NOTICE,
'warning': logging.WARNING,
'err': logging.ERROR,
'crit': logging.CRITICAL,
'alert': logging.ALERT,
'emerg': logging.EMERG
}
},
'anaconda': {'regex': ('[0-9:]{8},[0-9]+ (?P<level>'
'DEBUG|INFO|WARNING|ERROR|CRITICAL)'),
'levels': {'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
}
}
relevel_errors = {
'anaconda': [
{
'regex': 'Error downloading \
http://.*/images/(product|updates).img: HTTP response code said error',
'levelfrom': logging.ERROR,
'levelto': logging.WARNING
},
{
'regex': 'got to setupCdrom without a CD device',
'levelfrom': logging.ERROR,
'levelto': logging.WARNING
}
]
}
# Create a main logger.
logging.basicConfig(format='%(levelname)s: %(message)s')
main_logger = logging.getLogger()
main_logger.setLevel(logging.NOTSET)
class WatchedFile:
""" WatchedFile(filename) => Object that read lines from file if exist. """
def __init__(self, name):
self.name = name
self.fo = None
self.where = 0
def reset(self):
if self.fo:
self.fo.close()
self.fo = None
self.where = 0
def _checkRewrite(self):
try:
if os.stat(self.name)[6] < self.where:
self.reset()
except OSError:
self.close()
def readLines(self):
"""Return list of last append lines from file if exist. """
self._checkRewrite()
if not self.fo:
try:
self.fo = open(self.name, 'r')
except IOError:
return ()
lines = self.fo.readlines()
self.where = self.fo.tell()
return lines
def close(self):
self.reset()
class WatchedGroup:
""" Can send data from group of specified files to specified servers. """
def __init__(self, servers, files, name):
self.servers = servers
self.files = files
self.log_type = files.get('log_type', 'syslog')
self.name = name
self._createLogger()
def _createLogger(self):
self.watchedfiles = []
logger = logging.getLogger(self.name)
logger.setLevel(logging.NOTSET)
logger.propagate = False
# Create log formatter.
format_dict = {'version': '1',
'timestamp': '%(asctime)s',
'hostname': config['hostname'],
'appname': self.files['tag'],
'procid': '-',
'msgid': '-',
'structured_data': '-',
'msg': '%(message)s'
}
log_format = rfc5424_format.format(**format_dict)
formatter = logging.Formatter(log_format, date_format)
# Add log handler for each server.
for server in self.servers:
port = 'port' in server and server['port'] or 514
syslog = SysLogHandler((server["host"], port))
syslog.setFormatter(formatter)
logger.addHandler(syslog)
self.logger = logger
# Create WatchedFile objects from list of files.
for name in self.files['files']:
self.watchedfiles.append(WatchedFile(name))
def send(self):
""" Send append data from files to servers. """
for watchedfile in self.watchedfiles:
for line in watchedfile.readLines():
line = line.strip()
level = self._get_msg_level(line, self.log_type)
# Get rid of duplicated information in anaconda logs
line = re.sub(
msg_levels[self.log_type]['regex'] + "\s*:?\s?",
"",
line
)
# Ignore meaningless errors
try:
for r in relevel_errors[self.log_type]:
if level == r['levelfrom'] and \
re.match(r['regex'], line):
level = r['levelto']
except KeyError:
pass
self.logger.log(level, line)
main_logger and main_logger.log(
level,
'From file "%s" send: %s' % (watchedfile.name, line)
)
@staticmethod
def _get_msg_level(line, log_type):
if log_type in msg_levels:
msg_type = msg_levels[log_type]
regex = re.match(msg_type['regex'], line)
if regex:
return msg_type['levels'][regex.group('level')]
return logging.INFO
def sig_handler(signum, frame):
""" Send all new data when signal arrived. """
if not sending_in_progress:
send_all()
exit(signum)
else:
config['run_once'] = True
def send_all():
""" Send any updates. """
sending_in_progress = 1
for group in watchlist:
group.send()
sending_in_progress = 0
def main_loop():
""" Periodicaly call sendlogs() for each group in watchlist. """
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
while watchlist:
time.sleep(0.5)
send_all()
# If asked to run_once, exit now
if config['run_once']:
break
class Config:
""" Collection of config generation methods.
Usage: config = Config.getConfig()
"""
@classmethod
def getConfig(cls):
""" Generate config from command line arguments and config file. """
# example_config = {
# "daemon": True,
# "run_once": False,
# "debug": False,
# "watchlist": [
# {"servers": [ {"host": "localhost", "port": 514} ],
# "watchfiles": [
# {"tag": "anaconda",
# "log_type": "anaconda",
# "files": ["/tmp/anaconda.log",
# "/mnt/sysimage/root/install.log"]
# }
# ]
# }
# ]
# }
default_config = {"daemon": True,
"run_once": False,
"debug": False,
"hostname": cls._getHostname(),
"watchlist": []
}
# First use default config as running config.
config = dict(default_config)
# Get command line options and validate it.
cmdline = cls.cmdlineParse()[0]
# Check config file source and read it.
if cmdline.config_file or cmdline.stdin_config:
try:
if cmdline.stdin_config is True:
fo = sys.stdin
else:
fo = open(cmdline.config_file, 'r')
parsed_config = json.load(fo)
if cmdline.debug:
print parsed_config
except IOError: # Raised if IO operations failed.
main_logger.error("Can not read config file %s\n" %
cmdline.config_file)
exit(1)
except ValueError as e: # Raised if json parsing failed.
main_logger.error("Can not parse config file. %s\n" %
e.message)
exit(1)
# Validate config from config file.
cls.configValidate(parsed_config)
# Copy gathered config from config file to running config
# structure.
for key, value in parsed_config.items():
config[key] = value
else:
# If no config file specified use watchlist setting from
# command line.
watchlist = {"servers": [{"host": cmdline.host,
"port": cmdline.port}],
"watchfiles": [{"tag": cmdline.tag,
"log_type": cmdline.log_type,
"files": cmdline.watchfiles}]}
config['watchlist'].append(watchlist)
# Apply behavioural command line options to running config.
if cmdline.no_daemon:
config["daemon"] = False
if cmdline.run_once:
config["run_once"] = True
if cmdline.debug:
config["debug"] = True
return config
@staticmethod
def _getHostname():
""" Generate hostname by BOOTIF kernel option or use os.uname()."""
with open('/proc/cmdline') as fo:
cpu_cmdline = fo.readline().strip()
regex = re.search('(?<=BOOTIF=)([0-9a-fA-F-]*)', cpu_cmdline)
if regex:
mac = regex.group(0).upper()
return ''.join(mac.split('-'))
return os.uname()[1]
@staticmethod
def cmdlineParse():
""" Parse command line config options. """
parser = OptionParser()
parser.add_option("-c", "--config", dest="config_file", metavar="FILE",
help="Read config from FILE.")
parser.add_option("-i", "--stdin", dest="stdin_config", default=False,
action="store_true", help="Read config from Stdin.")
# FIXIT Add optionGroups.
parser.add_option("-r", "--run-once", dest="run_once",
action="store_true", help="Send all data and exit.")
parser.add_option("-n", "--no-daemon", dest="no_daemon",
action="store_true", help="Do not daemonize.")
parser.add_option("-d", "--debug", dest="debug",
action="store_true", help="Print debug messages.")
parser.add_option("-t", "--tag", dest="tag", metavar="TAG",
help="Set tag of sending messages as TAG.")
parser.add_option("-T", "--type", dest="log_type", metavar="TYPE",
default='syslog',
help="Set type of files as TYPE"
"(default: %default).")
parser.add_option("-f", "--watchfile", dest="watchfiles",
action="append",
metavar="FILE", help="Add FILE to watchlist.")
parser.add_option("-s", "--host", dest="host", metavar="HOSTNAME",
help="Set destination as HOSTNAME.")
parser.add_option("-p", "--port", dest="port", type="int", default=514,
metavar="PORT",
help="Set remote port as PORT (default: %default).")
options, args = parser.parse_args()
# Validate gathered options.
if options.config_file and options.stdin_config:
parser.error("You must not set both options --config"
" and --stdin at the same time.")
exit(1)
if ((options.config_file or options.stdin_config) and
(options.tag or options.watchfiles or options.host)):
main_logger.warning("If --config or --stdin is set up options"
" --tag, --watchfile, --type,"
" --host and --port will be ignored.")
if (not (options.config_file or options.stdin_config) and
not (options.tag and options.watchfiles and options.host)):
parser.error("Options --tag, --watchfile and --host"
" must be set up at the same time.")
exit(1)
return options, args
@staticmethod
def _checkType(value, value_type, value_name='', msg=None):
""" Check correctness of type of value and exit if not. """
if not isinstance(value, value_type):
message = msg or "Value %r in config have type %r but"\
" %r is expected." %\
(value_name, type(value).__name__, value_type.__name__)
main_logger.error(message)
exit(1)
@classmethod
def configValidate(cls, config):
""" Validate types and names of data items in config. """
cls._checkType(config, dict, msg='Config must be a dict.')
for key in ("daemon", "run_once", "debug"):
if key in config:
cls._checkType(config[key], bool, key)
key = "hostname"
if key in config:
cls._checkType(config[key], basestring, key)
key = "watchlist"
if key in config:
cls._checkType(config[key], list, key)
else:
main_logger.error("There must be key %r in config." % key)
exit(1)
for item in config["watchlist"]:
cls._checkType(item, dict, "watchlist[n]")
key, name = "servers", "watchlist[n] => servers"
if key in item:
cls._checkType(item[key], list, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key, '"watchlist[n]" item'))
exit(1)
key, name = "watchfiles", "watchlist[n] => watchfiles"
if key in item:
cls._checkType(item[key], list, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key, '"watchlist[n]" item'))
exit(1)
for item2 in item["servers"]:
cls._checkType(item2, dict, "watchlist[n] => servers[n]")
key, name = "host", "watchlist[n] => servers[n] => host"
if key in item2:
cls._checkType(item2[key], basestring, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key,
'"watchlist[n] => servers[n]" item'))
exit(1)
key, name = "port", "watchlist[n] => servers[n] => port"
if key in item2:
cls._checkType(item2[key], int, name)
for item2 in item["watchfiles"]:
cls._checkType(item2, dict, "watchlist[n] => watchfiles[n]")
key, name = "tag", "watchlist[n] => watchfiles[n] => tag"
if key in item2:
cls._checkType(item2[key], basestring, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key,
'"watchlist[n] => watchfiles[n]" item'))
exit(1)
key = "log_type"
name = "watchlist[n] => watchfiles[n] => log_type"
if key in item2:
cls._checkType(item2[key], basestring, name)
key, name = "files", "watchlist[n] => watchfiles[n] => files"
if key in item2:
cls._checkType(item2[key], list, name)
else:
main_logger.error("There must be key %r in %s in config." %
(key,
'"watchlist[n] => watchfiles[n]" item'))
exit(1)
for item3 in item2["files"]:
name = "watchlist[n] => watchfiles[n] => files[n]"
cls._checkType(item3, basestring, name)
# Create global config.
config = Config.getConfig()
# Create list of WatchedGroup objects with different log names.
watchlist = []
i = 0
for item in config["watchlist"]:
for files in item['watchfiles']:
watchlist.append(WatchedGroup(item['servers'], files, str(i)))
i = i + 1
# Fork and loop
if config["daemon"]:
if not os.fork():
# Redirect the standard I/O file descriptors to the specified file.
main_logger = None
DEVNULL = getattr(os, "devnull", "/dev/null")
os.open(DEVNULL, os.O_RDWR) # standard input (0)
os.dup2(0, 1) # Duplicate standard input to standard output (1)
os.dup2(0, 2) # Duplicate standard input to standard error (2)
main_loop()
sys.exit(1)
sys.exit(0)
else:
if not config['debug']:
main_logger = None
main_loop()

View File

@ -105,7 +105,8 @@ $(BUILD_DIR)/bootstrap/customize-initram-root.done: \
$(BUILD_DIR)/packages/rpm/build.done \
$(BUILD_DIR)/bootstrap/prepare-initram-root.done \
$(call find-files,$(SOURCE_DIR)/bootstrap/sync) \
$(SOURCE_DIR)/bin/send2syslog.py \
$(BUILD_DIR)/repos/nailgun.done \
$(call find-files,$(BUILD_DIR)/repos/nailgun/bin/send2syslog.py) \
$(SOURCE_DIR)/bootstrap/ssh/id_rsa.pub \
$(BUILD_DIR)/bootstrap/etc/yum.conf \
$(BUILD_DIR)/bootstrap/etc/yum.repos.d/base.repo
@ -118,7 +119,7 @@ $(BUILD_DIR)/bootstrap/customize-initram-root.done: \
# Copying custom files
sudo rsync -aK $(SOURCE_DIR)/bootstrap/sync/ $(INITRAMROOT)
sudo cp -r $(SOURCE_DIR)/bin/send2syslog.py $(INITRAMROOT)/usr/bin
sudo cp -r $(BUILD_DIR)/repos/nailgun/bin/send2syslog.py $(INITRAMROOT)/usr/bin
# Enabling pre-init boot interface discovery
sudo chroot $(INITRAMROOT) chkconfig setup-bootdev on

View File

@ -18,9 +18,7 @@ MASTER_DNS?=10.20.0.1
MASTER_NETMASK?=255.255.255.0
MASTER_GW?=10.20.0.1
COMMIT_SHA:=$(shell git rev-parse --verify HEAD)
PRODUCT_VERSION:=3.2
FUEL_COMMIT_SHA:=$(shell cd fuel && git rev-parse --verify HEAD)
CENTOS_MAJOR:=6
CENTOS_MINOR:=4
@ -40,6 +38,20 @@ NO_UI_OPTIMIZE:=0
# Do not copy RHEL repo to the iso
CACHE_RHEL:=0
# Repos and versions
NAILGUN_COMMIT?=origin/master
ASTUTE_COMMIT?=origin/master
FUELLIB_COMMIT?=origin/master
OSTF_TESTS_COMMIT?=92b4e5e8d10f1a45f7433d06eb3a5936adb4050e
OSTF_PLUGIN_COMMIT?=f1c7870793a3aa724673c30391d3255a0d9465d5
NAILGUN_REPO?=https://github.com/Mirantis/fuelweb.git
ASTUTE_REPO?=https://github.com/Mirantis/astute.git
FUELLIB_REPO?=https://github.com/Mirantis/fuel.git
OSTF_TESTS_REPO?=https://github.com/Mirantis/fuel-ostf-tests.git
OSTF_PLUGIN_REPO?=https://github.com/Mirantis/fuel-ostf-plugin.git
LOCAL_MIRROR_SRC:=$(LOCAL_MIRROR)/src
LOCAL_MIRROR_EGGS:=$(LOCAL_MIRROR)/eggs
LOCAL_MIRROR_GEMS:=$(LOCAL_MIRROR)/gems
@ -129,18 +141,12 @@ MIRROR_EGGS?=http://pypi.python.org/simple
# NOTE(mihgen): removed gemcutter - it redirects to rubygems.org and has issues w/certificate now
MIRROR_GEMS?=http://rubygems.org
# FYI: For rhel cache we parse fuel/deployment/puppet/rpmcache/files/required-rpms.txt
REQUIRED_RPMS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/requirements-rpm.txt)
REQUIRED_DEBS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/requirements-deb.txt)
# FYI: Also we get eggs for ostf from fuel/deployment/puppet/nailgun/files/venv-ostf.txt file
REQUIRED_EGGS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/requirements-eggs.txt)
OSTF_EGGS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/fuel/deployment/puppet/nailgun/files/venv-ostf.txt)
REQUIRED_SRCS:=$(shell grep -v ^\\s*\# $(SOURCE_DIR)/requirements-src.txt)
REQ_RHEL_RPMS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/fuel/deployment/puppet/rpmcache/files/required-rpms.txt)
REQ_FUEL_RHEL_RPMS:=$(shell grep -v "^\\s*\#" $(SOURCE_DIR)/fuel/deployment/puppet/rpmcache/files/req-fuel-rhel.txt)
OSTF_PLUGIN_SHA?=f1c7870793a3aa724673c30391d3255a0d9465d5
OSTF_PLUGIN_VER?=0.2
OSTF_TESTS_SHA?=92b4e5e8d10f1a45f7433d06eb3a5936adb4050e
OSTF_TESTS_VER?=0.1
# Which repositories to use for making local centos mirror.
# Possible values you can find out from mirror/centos/yum_repos.mk file.

2
docs/.gitignore vendored
View File

@ -1,2 +0,0 @@
_build/
plantuml.jar

View File

@ -1,159 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
PLANTUML = plantuml.jar
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
$(PLANTUML):
wget http://downloads.sourceforge.net/project/plantuml/plantuml.jar -O $(PLANTUML)
$(ACTION.TOUCH)
html: $(PLANTUML)
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/scaffold.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/scaffold.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/scaffold"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/scaffold"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View File

@ -1,4 +0,0 @@
<li class="dropdown">
<a href="{{ pathto(master_doc) }}" class="dropdown-toggle" data-toggle="dropdown">{{ _('Site') }} <b class="caret"></b></a>
<ul class="dropdown-menu globaltoc">{{ toctree(maxdepth=1) }}</ul>
</li>

View File

@ -1,134 +0,0 @@
{% extends "basic/layout.html" %}
{% set script_files = script_files + ['_static/bootstrap.js'] %}
{% set css_files = ['_static/bootstrap.css', '_static/bootstrap-sphinx.css'] + css_files %}
{# Sidebar: Rework into our Boostrap nav section. #}
{% macro navBar() %}
<div id="navbar" class="navbar navbar-fixed-top">
<div class="navbar-inner">
<div class="container-fluid">
<a class="brand" href="{{ pathto(master_doc) }}">{{ project|e }}</a>
<span class="navbar-text pull-left"><b>{{ version|e }}</b></span>
<ul class="nav">
<li class="divider-vertical"></li>
{% block sidebartoc %}
{% include "globaltoc.html" %}
{% include "localtoc.html" %}
{% endblock %}
{% block sidebarrel %}
{% include "relations.html" %}
{% endblock %}
{% block sidebarsourcelink %}
{% include "sourcelink.html" %}
{% endblock %}
</ul>
{% block sidebarsearch %}
{% include "searchbox.html" %}
{% endblock %}
</ul>
</div>
</div>
</div>
</div>
{% endmacro %}
{%- block extrahead %}
<script type="text/javascript">
(function () {
/**
* Patch TOC list.
*
* Will mutate the underlying span to have a correct ul for nav.
*
* @param $span: Span containing nested UL's to mutate.
* @param minLevel: Starting level for nested lists. (1: global, 2: local).
*/
var patchToc = function ($ul, minLevel) {
var findA;
// Find all a "internal" tags, traversing recursively.
findA = function ($elem, level) {
var level = level || 0,
$items = $elem.find("> li > a.internal, > ul, > li > ul");
// Iterate everything in order.
$items.each(function (index, item) {
var $item = $(item),
tag = item.tagName.toLowerCase(),
pad = 15 + ((level - minLevel) * 10);
if (tag === 'a' && level >= minLevel) {
// Add to existing padding.
$item.css('padding-left', pad + "px");
console.log(level, $item, 'padding-left', pad + "px");
} else if (tag === 'ul') {
// Recurse.
findA($item, level + 1);
}
});
};
console.log("HERE");
findA($ul);
};
$(document).ready(function () {
// Add styling, structure to TOC's.
$(".dropdown-menu").each(function () {
$(this).find("ul").each(function (index, item){
var $item = $(item);
$item.addClass('unstyled');
});
$(this).find("li").each(function () {
$(this).parent().append(this);
});
});
// Patch in level.
patchToc($("ul.globaltoc"), 2);
patchToc($("ul.localtoc"), 2);
// Enable dropdown.
$('.dropdown-toggle').dropdown();
});
}());
</script>
{% endblock %}
{% block header %}{{ navBar() }}{% endblock %}
{# Silence the sidebar's, relbar's #}
{% block sidebar1 %}{% endblock %}
{% block sidebar2 %}{% endblock %}
{% block relbar1 %}{% endblock %}
{% block relbar2 %}{% endblock %}
{%- block content %}
<div class="container">
{% block body %} {% endblock %}
</div>
{%- endblock %}
{%- block footer %}
<footer class="footer">
<div class="container">
<p class="pull-right"><a href="#">Back to top</a></p>
<p>
{%- if show_copyright %}
{%- if hasdoc('copyright') %}
{% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}<br/>
{%- else %}
{% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}<br/>
{%- endif %}
{%- endif %}
{%- if last_updated %}
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}<br/>
{%- endif %}
{%- if show_sphinx %}
{% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx.pocoo.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}<br/>
{%- endif %}
</p>
</div>
</footer>
{%- endblock %}

View File

@ -1,5 +0,0 @@
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">{{ _('Page') }} <b class="caret"></b></a>
<ul class="dropdown-menu localtoc">{{ toc }}</ul>
<!--<span class="localtoc">{{ toc }}</span>-->
</li>

View File

@ -1,8 +0,0 @@
{%- if prev %}
<li><a href="{{ prev.link|e }}"
title="{{ _('previous chapter') }}">{{ "&laquo;"|safe }} {{ prev.title }}</a></li>
{%- endif %}
{%- if next %}
<li><a href="{{ next.link|e }}"
title="{{ _('next chapter') }}">{{ next.title }} {{ "&raquo;"|safe }}</a></li>
{%- endif %}

View File

@ -1,7 +0,0 @@
{%- if pagename != "search" %}
<form class="navbar-search pull-right" style="margin-bottom:-3px;" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" placeholder="Search" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
{%- endif %}

View File

@ -1,4 +0,0 @@
{%- if show_source and has_source and sourcename %}
<li><a href="{{ pathto('_sources/' + sourcename, true)|e }}"
rel="nofollow">{{ _('Source') }}</a></li>
{%- endif %}

File diff suppressed because one or more lines are too long

View File

@ -1,24 +0,0 @@
/*
* bootstrap-sphinx.css
* ~~~~~~~~~~~~~~~~~~~~
*
* Sphinx stylesheet -- Twitter Bootstrap theme.
*/
body {
padding-top: 52px;
}
.navbar .brand {
color: #FFF;
text-shadow: #777 2px 2px 3px;
}
{%- block sidebarlogo %}
{%- if logo %}
.navbar h3 a, .navbar .brand {
background: transparent url("{{ logo }}") no-repeat 22px 3px;
padding-left: 62px;
}
{%- endif %}
{%- endblock %}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

File diff suppressed because one or more lines are too long

View File

@ -1,5 +0,0 @@
# Twitter Bootstrap Theme
[theme]
inherit = basic
stylesheet = basic.css
pygments_style = tango

View File

@ -1,98 +0,0 @@
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions += ['sphinx.ext.inheritance_diagram', 'sphinxcontrib.blockdiag', 'sphinxcontrib.actdiag', 'sphinxcontrib.seqdiag', 'sphinxcontrib.nwdiag']
# The encoding of source files.
source_encoding = 'utf-8-sig'
#source_encoding = 'shift_jis'
# The language for content autogenerated by Sphinx.
language = 'en'
#language = 'ja'
# The theme to use for HTML and HTML Help pages.
#html_theme = 'default'
#html_theme = 'sphinxdoc'
#html_theme = 'scrolls'
#html_theme = 'agogo'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'haiku'
# If this is not the empty string, a 'Last updated on:' timestamp
# is inserted at every page bottom, using the given strftime() format.
# Default is '%b %d, %Y' (or a locale-dependent equivalent).
html_last_updated_fmt = '%Y/%m/%d'
# Enable Antialiasing
blockdiag_antialias = True
acttdiag_antialias = True
seqdiag_antialias = True
nwdiag_antialias = True
extensions += ['rst2pdf.pdfbuilder']
pdf_documents = [
(master_doc, project, project, copyright),
]
pdf_stylesheets = ['sphinx','kerning','a4','en']
pdf_language = "en"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
#pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
#pdf_break_level = 0
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
pdf_verbosity = 0
# If false, no index is generated.
pdf_use_index = True
# If false, no modindex is generated.
pdf_use_modindex = True
# If false, no coverpage is generated.
pdf_use_coverpage = True
# Name of the cover page template to use
#pdf_cover_template = 'sphinxcover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is only vectorpdf)
# you need vectorpdf if you want to use sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
#pdf_page_template = 'cutePage'
# Show Table Of Contents at the beginning?
pdf_use_toc = True
# How many levels deep should the table of contents be?
pdf_toc_depth = 2
# Add section number to section references
pdf_use_numbered_links = False
# Background images fitting mode
pdf_fit_background_mode = 'scale'

View File

@ -1,254 +0,0 @@
# -*- coding: utf-8 -*-
#
# scaffold documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 25 14:02:29 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.join(os.path.abspath('.'), "..", "nailgun"))
autodoc_default_flags = ['members']
autodoc_member_order = 'bysource'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'rst2pdf.pdfbuilder',
'sphinxcontrib.plantuml',
'nailgun.autoapidoc'
]
plantuml = ['java','-jar','plantuml.jar']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fuel'
copyright = u'2012, Mirantis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '3.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_templates"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'scaffolddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'scaffold.tex', u'scaffold Documentation',
u'Mike Scherbakov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scaffold', u'scaffold Documentation',
[u'Mike Scherbakov'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'scaffold', u'scaffold Documentation',
u'Mike Scherbakov', 'scaffold', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Additional Settings -------------------------------------------------------
execfile('./common_conf.py')

View File

@ -1,14 +0,0 @@
.. _develop:
Development Documentation
=========================
.. toctree::
:maxdepth: 2
develop/logical_arch
develop/sequence
develop/env
develop/nailgun
develop/alternatives
develop/api_doc

View File

@ -1,121 +0,0 @@
Alternatives
============
Metadata via Puppet ENC (part of architecture)
----------------------------------------------
This is alternative possible architecture.
See corresponding sequence diagram and more information here: :ref:`deploy_via_enc_sequence`.
.. uml::
package "Master Node" {
[Async RPC consumer(Naily)] --> [Orchestrator]
[Orchestrator] --> [MCollective]
[Orchestrator] <-- [YAML data source]
[Puppet Master] --> [ENC Script]
[ENC Script] --> [YAML data source]
}
package "Target Node" {
[MCollective Agent] --> [Puppet]
}
actor CLI_User
CLI_User --> [YAML data source]
CLI_User --> [Orchestrator]
[MCollective] --> [MCollective Agent]
[Puppet] --> [Puppet Master]
.. _deploy_via_enc_sequence:
Alternative Implementation for deployment via ENC
-------------------------------------------------
.. uml::
title Diagram of ALTERNATIVE Implementation of Cluster Deployment
autonumber
actor WebUser
Nailgun -> Naily: Deploy cluster
Naily -> YAML_file: Store configuration
Naily -> Orchestrator: Deploy
Orchestrator -> YAML_file: get data
YAML_file --> Orchestrator: data
Orchestrator -> MC: nodes ready?
MC --> Orchestrator: ready
Orchestrator --> Naily: ready
Naily -> Nailgun: nodes booted
Nailgun --> WebUser: status on UI
|||
Orchestrator -> MC: run puppet
MC -> Puppet: runonce
Puppet -> Puppet_master: get modules,class
Puppet_master -> ENC: get class
ENC -> YAML_file: get class
YAML_file --> ENC: class to deploy
ENC --> Puppet_master: class
Puppet_master --> Puppet: modules, class
Puppet -> Puppet: applies $role
Puppet --> MC: done
MC --> Orchestrator: deploy is done
Orchestrator -> YAML_file: update info
Orchestrator --> Naily: deploy is done
Naily --> Nailgun: deploy is done
Nailgun --> WebUser: deploy is done
Alternative schema of deployment is different in following:
* Naily stores all data about deployment into YAML file before the deployment, and then calls Orchestrator
* Orchestrator loads nodes information from YAML and calls puppet via MCollective
* Puppet requests data from Puppet master
* Puppet uses `ENC extension <http://docs.puppetlabs.com/guides/external_nodes.html>`_ to get information what
classes should be applied on particular node. If try to explain in a few
words what ENC is - it is Puppet Master's extension to call external user defined script
* ENC script loads all required data from YAML file
* YAML file could be replaced by some NoSQL DB
Comparison of deployment approaches
-----------------------------------
Data from Facter
^^^^^^^^^^^^^^^^
Pros:
* Easy. Put file on node via MCollective, and we know what will be executed there. It's easy to check what have been
executed last time.
* No additional stateful components. Otherwise it could lead to data inconsistency
* Easy to switch into configuration without Puppet Master or even replace it to Chef Solo
* Requires time to place data on nodes before puppet run, and implementation in syncronious way - puppet should not
run before the node receive it's role.
Cons:
* Doesn't look like a "Puppet" way, when desired state of Cluster should be defined beforeahead and Puppet
will converge the existing state to the desired state
Data from ENC
^^^^^^^^^^^^^
Pros:
* "Puppet" way, everything what is needed is defined in YAML file
* All information could be found in one place - YAML file
Cons:
* Naily should know the data structure in YAML file to do the merge. (however it can just call Orchestrator with
metadata, and Orchestrator will write data to YAML file)
* Requires additional stateful component - YAML file, what may lead to data inconsistency
* Puppet Master must be installed on the same node as Orchestrator (to access YAML file). Even if YAML file
is replaced to NoSQL DB, ENC script still has to be present on Puppet Master node.
* With increase of deployment complexity and metadata, YAML file will increase in size. It also should contain
information about all clusters and all nodes consequently, which could become a bottleneck for loading data
in case of hundrends nodes and thousand requests. Separation of YAML structure in cluster-based will not help
because there will be need to pass cluster identifier to puppet, what's unclear how to do besides facter
extension.
* More complex code for Naily(or Orchestrator) is required to do merges of existing data in YAML file and new data,
code to prevent concurrency issues. It would be even more complex with Updates feature, when it would require
of a sequence of actions performed in a specific order.
* Let's say we have attribute { 'keystone' => { 'data_dir' => '/var/lib/keystone' } }, and we want to update our
cluster to new version of OpenStack, node by node, where data_dir location is different. In case with NailyFact,
it's easy - just write facts on target node and run puppet on it, other nodes will not be affected (they still
have settings for old data_dir location). In case with data from ENC it's much more complex, because there is
only single DB - YAML file for the whole cluster. It means it would not be possible to run puppet on old nodes
if they should not be updated yet.

View File

@ -1,70 +0,0 @@
REST API Reference
==================
.. contents:: :local:
Releases API
------------
.. automodule:: nailgun.api.handlers.release
Clusters API
------------
.. automodule:: nailgun.api.handlers.cluster
Nodes API
---------
.. automodule:: nailgun.api.handlers.node
Disks API
---------
.. automodule:: nailgun.api.handlers.disks
Network Configuration API
-------------------------
.. automodule:: nailgun.api.handlers.network_configuration
Notifications API
-----------------
.. automodule:: nailgun.api.handlers.notifications
Tasks API
-----------------
.. automodule:: nailgun.api.handlers.tasks
Logs API
-----------------
.. automodule:: nailgun.api.handlers.logs
Plugin API
----------
.. automodule:: nailgun.api.handlers.plugin
Redhat API
-----------------
.. automodule:: nailgun.api.handlers.redhat
Version API
-----------------
.. automodule:: nailgun.api.handlers.version

View File

@ -1,273 +0,0 @@
Fuel Development Environment
============================
Basic OS for Fuel development is Ubuntu Linux. Setup instructions below
assume Ubuntu 13.04, most of them should be applicable to other Ubuntu
and Debian versions, too.
Each subsequent section below assumes that you have followed the steps
described in all preceding sections. By the end of this document, you
should be able to run and test all key components of Fuel, build Fuel
master node installation ISO, and generate documentation.
Getting the Source Code
-----------------------
Clone the Mirantis FuelWeb repository from GitHub::
git clone git@github.com:Mirantis/fuelweb.git
cd fuelweb
git submodule init
git submodule update
All sections below assume you start in your clone of this repository.
Setup for Nailgun Unit Tests
----------------------------
#. Install Python dependencies (fysom has no deb package, and the
jsonschema deb is outdated, so these modules have to be installed
from PyPi)::
sudo apt-get install python-dev python-pip python-psycopg2 python-jinja2
sudo apt-get install python-paste python-yaml python-sqlalchemy python-kombu
sudo apt-get install python-crypto python-simplejson python-webpy
sudo apt-get install python-nose python-mock python-decorator python-flake8
sudo apt-get install python-netaddr python-netifaces
sudo easy_install -U setuptools==1.0
sudo easy_install -U pip==1.2.1
sudo pip install fysom jsonschema hacking==0.7 nose-timer
#. Install and configure PostgreSQL database::
sudo apt-get install postgresql postgresql-server-dev-9.1
sudo -u postgres createuser -SDRP nailgun (enter password nailgun)
sudo -u postgres createdb nailgun
#. Create required folder for log files::
sudo mkdir /var/log/nailgun
sudo chown -R `whoami`.`whoami` /var/log/nailgun
#. Run the Nailgun backend unit tests::
cd nailgun
./run_tests.sh --no-jslint --no-ui-tests
#. Run the Nailgun flake8 test::
cd nailgun
./run_tests.sh --flake8
Setup for Web UI Tests
----------------------
#. Install NodeJS (on Debian, you may need to use 'apt-get install -t
experimental' to get the latest npm, on Ubuntu 12.04, use nodejs package
instead of nodejs-legacy)) and CasperJS::
sudo apt-get install npm nodejs-legacy phantomjs
sudo npm install -g jslint requirejs
cd ~
git clone git://github.com/n1k0/casperjs.git
cd casperjs
git checkout tags/1.0.0-RC4
sudo ln -sf `pwd`/bin/casperjs /usr/local/bin/casperjs
#. Run full Web UI test suite (this will wipe your Nailgun database in
PostgreSQL)::
cd nailgun
./run_tests.sh --jslint
./run_tests.sh --ui-tests
Running Nailgun in Fake Mode
----------------------------
#. Populate the database from fixtures::
cd nailgun
./manage.py syncdb
./manage.py loaddefault # It loads all basic fixtures listed in settings.yaml
./manage.py loaddata nailgun/fixtures/sample_environment.json # Loads fake nodes
#. Start application in "fake" mode, when no real calls to orchestrator
are performed::
python manage.py run -p 8000 --fake-tasks | egrep --line-buffered -v '^$|HTTP' >> /var/log/nailgun.log 2>&1 &
#. (optional) You can also use --fake-tasks-amqp option if you want to
make fake environment use real RabbitMQ instead of fake one::
python manage.py run -p 8000 --fake-tasks-amqp | egrep --line-buffered -v '^$|HTTP' >> /var/log/nailgun.log 2>&1 &
Astute and Naily
----------------
#. Install Ruby dependencies::
sudo apt-get install git curl
\curl -L https://get.rvm.io | bash -s stable
rvm install 1.9.3
#. Install or update dependencies and run unit tests::
cd astute
./run_tests.sh
#. (optional) Run Astute MCollective integration test (you'll need to
have MCollective server running for this to work)::
cd astute
bundle exec rspec spec/integration/mcollective_spec.rb
Building the Fuel ISO
---------------------
#. Following software is required to build the Fuel ISO images on Ubuntu
12.10 or newer (on Ubuntu 12.04, use nodejs package instead of
nodejs-legacy)::
sudo apt-get install build-essential make git ruby ruby-dev rubygems debootstrap
sudo apt-get install python-setuptools yum yum-utils libmysqlclient-dev isomd5sum
sudo apt-get install python-nose libvirt-bin python-ipaddr python-paramiko python-yaml
sudo apt-get install python-pip kpartx extlinux npm nodejs-legacy unzip genisoimage
sudo gem install bundler -v 1.2.1
sudo gem install builder
sudo pip install xmlbuilder jinja2
sudo npm install -g requirejs
#. (alternative) If you have completed the instructions in the previous
sections of Fuel development environment setup guide, the list of
additional packages required to build the ISO becomes shorter::
sudo apt-get install ruby-dev ruby-builder bundler libmysqlclient-dev
sudo apt-get install yum-utils kpartx extlinux genisoimage isomd5sum
#. ISO build process requires sudo permissions, allow yourself to run
commands as root user without request for a password::
echo "`whoami` ALL=(ALL) NOPASSWD: ALL" | sudo tee -a /etc/sudoers
#. If you haven't already done so, get the source code::
git clone https://github.com/Mirantis/fuelweb.git
cd fuelweb
git submodule init
git submodule update
#. Now you can build the Fuel ISO image::
make iso
Running the FuelWeb Integration Test
------------------------------------
#. Install libvirt and Devops library dependencies::
sudo apt-get install libvirt-bin python-libvirt python-ipaddr python-paramiko
sudo pip install xmlbuilder django==1.4.3
#. Configure permissions for libvirt and relogin or restart your X for
the group changes to take effect (consult /etc/libvirt/libvirtd.conf
for the group name)::
GROUP=`grep unix_sock_group /etc/libvirt/libvirtd.conf|cut -d'"' -f2`
sudo useradd `whoami` kvm
sudo useradd `whoami` $GROUP
chgrp $GROUP /var/lib/libvirt/images
chmod g+w /var/lib/libvirt/images
#. Clone the Mirantis Devops virtual environment manipulation library
from GitHub and install it where FuelWeb Integration Test can find
it::
git clone git@github.com:Mirantis/devops.git
cd devops
python setup.py build
sudo python setup.py install
#. Configure and populate the Devops DB::
SETTINGS=/usr/local/lib/python2.7/dist-packages/devops-2.0-py2.7.egg/devops/settings.py
sed -i "s/'postgres'/'devops'/" $SETTINGS
echo "SECRET_KEY = 'secret'" >> $SETTINGS
sudo -u postgres createdb devops
sudo -u postgres createuser -SDR devops
django-admin.py syncdb --settings=devops.settings
#. Run the integration test::
cd fuelweb
make test-integration
#. To save time, you can execute individual test cases from the
integration test suite like this (nice thing about TestAdminNode
is that it takes you from nothing to a Fuel master with 9 blank nodes
connected to 3 virtual networks)::
cd fuelweb
export ENV_NAME=fuelweb
export PUBLIC_FORWARD=nat
export ISO_PATH=`pwd`/build/iso/fuelweb-centos-6.4-x86_64.iso
nosetests -w fuelweb_test -s fuelweb_test.integration.test_admin_node:TestAdminNode.test_cobbler_alive
#. The test harness creates a snapshot of all nodes called 'empty'
before starting the tests, and creates a new snapshot if a test
fails. You can revert to a specific snapshot with this command::
dos.py revert --snapshot-name <snapshot_name> <env_name>
#. To fully reset your test environment, tell the Devops toolkit to erase it::
dos.py list
dos.py erase <env_name>
Running Fuel Puppet Modules Unit Tests
--------------------------------------
#. Install PuppetLabs RSpec Helper::
cd ~
gem2deb puppetlabs_spec_helper
sudo dpkg -i ruby-puppetlabs-spec-helper_0.4.1-1_all.deb
gem2deb rspec-puppet
sudo dpkg -i ruby-rspec-puppet_0.1.6-1_all.deb
#. Run unit tests for a Puppet module::
cd fuel/deployment/puppet/module
rake spec
Installing Cobbler
------------------
Install Cobbler from GitHub (it can't be installed from PyPi, and deb
package in Ubuntu is outdated)::
cd ~
git clone git://github.com/cobbler/cobbler.git
cd cobbler
git checkout release24
sudo make install
Building Documentation
----------------------
#. You will need the following software to build documentation::
sudo apt-get install librsvg2-bin rst2pdf python-sphinx
sudo pip install sphinxcontrib-plantuml
sudo apt-get install python-sphinxcontrib.blockdiag # on Ubuntu 12.10 or higher
sudo pip install sphinxcontrib-blockdiag # on Ubuntu 12.04
#. Look at the list of available formats and generate the one you need::
cd docs
make help
make html
You will also need to install Java and PlantUML to automatically
generate UML diagrams from the source. You can also use `PlantUML Server
<http://www.plantuml.com/plantuml/>`_ for a quick preview of your
diagrams.

View File

@ -1,87 +0,0 @@
Logical Architecture
====================
Current architecture uses so-called "Metadata via Facter Extension"
approach, inspired by blog posts
`self-classifying puppet nodes <http://nuknad.com/2011/02/11/self-classifying-puppet-nodes/>`_,
`pulling a list of hosts from mcollective for puppet <http://nuknad.com/2011/01/07/pulling-a-list-of-hosts-from-mcollective-for-puppet/>`_,
`A Simple Puppet Function to Retrieve Information From the Stored Config
DB <http://blog.thesilentpenguin.com/blog/2012/02/22/a-simple-puppet-function-to-retrieve-information-from-the-stored-config-db/>`_,
`nodeless-puppet example <https://github.com/jordansissel/puppet-examples/tree/master/nodeless-puppet>`_.
In a nutshell, the Fuel deployment orchestration engine `Astute
<https://github.com/Mirantis/astute>`_ manages OS provisioning via
Cobbler, and uses an MCollective plugin to distribute a Facter facts
file that defines node's role and other deployment variables for Puppet.
You can find a detailed breakdown of how this works in the
:doc:`Sequence Diagrams </develop/sequence>`.
Following components are involved in managing this process:
- Astute: deployment orchestrator, manages the Puppet cluster (via
MCollective) and the Cobbler provisioning service (over XML-RPC)
- Naily: RPC consumer implementing communication between Nailgun and
Astute over AMQP protocol
- Nailgun [#fn1]_: Web UI backend based on the web.py framework,
includes following sub-components:
- Nailgun DB: a relational database holding the current state all
OpenStack clusters and provisioning tasks
- Data Model (api/models.py, fixtures/): the definition of NailgunDB
using SQLAlchemy ORM
- REST API (api/handlers/): controller layer of the Web UI, receives
REST requests from the JavaScript UI and routes them to other
Nailgun components
- RPC Receiver (rpc/): handles AMQP messages from Astute
- Task Manager (task/): creates and tracks background tasks
- JavaScript UI (static/js/): Web UI frontend based on Twitter Bootcamp
framework, communicates with Nailgun using REST API
In the current implementation the deployment business logic is spread
between Nailgun (primarily in Task, Network, and Volume Manager
components) and Astute. Going forward, all logic should be moved from
Astute to Nailgun, and Astute should become a simple executor of tasks
defined by Nailgun.
Communication paths between these components are illustrated on the
Logical Architecture Diagram:
.. uml::
package "Master Node" {
[JavaScript UI]
package "Nailgun Backend" {
package "SQL Database" <<Database>> {
[Nailgun DB]
}
[Nailgun DB] --> [Data Model]
[Data Model] <-- [REST API]
[RPC Receiver] --> [Nailgun DB]
}
[Provisioner (Cobbler)] --> [DHCP, DNS, TFTP]
[RPC Consumer (Naily)] --> [RPC Receiver] : AMQP
[RPC Consumer (Naily)] --> [Orchestrator (Astute)] : AMQP
[Orchestrator (Astute)] --> [MCollective]
[Puppet Master]
}
package "Target Node" {
[MCollective Agent] --> [Puppet]
}
actor Web_User
actor CLI_User
Web_User --> [JavaScript UI]
CLI_User --> [REST API]
[JavaScript UI] --> [REST API]
[Orchestrator (Astute)] --> [Provisioner (Cobbler)] : XML-RPC
[MCollective] --> [MCollective Agent]
[Puppet] --> [Puppet Master]
.. CLI User --> [Provisioner(cobbler)]
.. rubric:: Footnotes
.. [#fn1] Not to be confused with Nailgun the Java CLI accelerator

View File

@ -1,9 +0,0 @@
.. _nailgun:
Nailgun Development Instructions
================================
.. toctree::
nailgun/partitions
nailgun/reliability

View File

@ -1,176 +0,0 @@
Creating Partitions on Nodes
============================
Fuel uses Cobbler and Anaconda Kickstart to partition block devices on
new nodes. Most of the work is done in the *pmanager.py* Cobbler script
(found under templates/scripts/ in the Cobbler Puppet module) using the
data from the "ks_spaces" Cobbler variable generated by the *Nailgun
VolumeManager* class (nailgun/volumes/manager.py) based on the volumes
metadata defined in the *openstack.json* release fixture
(nailgun/fixtures/).
The preferred type of a volume for a Fuel slave node is "vg": an LVM
volume group that can contain one or more volumes with type set to "lv".
In cases where LVM is not suitable, it is also possible to create plain
partitions by setting volume type to "partition".
Typical slave node will always have an "os" volume group and one or more
volumes of other types, depending on the roles assigned to that node and
the role-to-volumes mapping defined in the "volumes_roles_mapping"
section of openstack.json.
There are a few different ways to add another volume to a slave node:
#. Add a new logical volume definition to one of the existing LVM volume
groups.
#. Create a new volume group containing your new logical volumes.
#. Create a new plain partition.
Adding an LV to an Existing Volume Group
----------------------------------------
If you need to add a new volume to an existing volume group, for
example "os", your volume definition in openstack.json will look like
this::
{
"id": "os",
"type": "vg",
"min_size": {"generator": "calc_min_os_size"},
"label": "Base System",
"volumes": [
{
"mount": "/",
"type": "lv",
"name": "root",
"size": {"generator": "calc_total_root_vg"},
"file_system": "ext4"
},
{
"mount": "swap",
"type": "lv",
"name": "swap",
"size": {"generator": "calc_swap_size"},
"file_system": "swap"
},
{
"mount": "/mnt/some/path",
"type": "lv",
"name": "LOGICAL_VOLUME_NAME",
"size": {
"generator": "calc_LOGICAL_VOLUME_size",
"generator_args": ["arg1", "arg2"]
},
"file_system": "ext4"
}
]
}
Make sure that your logical volume name ("LOGICAL_VOLUME_NAME" in the
example above) is not the same as the volume group name ("os").
Adding Generators to Nailgun VolumeManager
------------------------------------------
The "size" field in a volume definition can be defined either directly
as an integer number in megabytes, or indirectly via a so called
generator. Generator is a Python lambda that can be called to calculate
logical volume size dynamically. In the json example above size is
defined as a dictionary with two keys: "generator" is the name of the
generator lambda and "generator_args" is the list of arguments that will
be passed to the generator lambda.
There is the method in the VolumeManager class where generators are
defined. New volume generator 'NEW_GENERATOR_TO_CALCULATE_SIZ' needs to
be added in the generators dictionary inside this method.
.. code-block:: python
class VolumeManager(object):
...
def call_generator(self, generator, *args):
generators = {
...
'NEW_GENERATOR_TO_CALCULATE_SIZE': lambda: 1000,
...
}
Creating a New Volume Group
---------------------------
Another way to add new volume to slave nodes is to create new volume
group and to define one or more logical volume inside the volume group
definition::
{
"id": "NEW_VOLUME_GROUP_NAME",
"type": "vg",
"min_size": {"generator": "calc_NEW_VOLUME_NAME_size"},
"label": "Label for NEW VOLUME GROUP as it will be shown on UI",
"volumes": [
{
"mount": "/path/to/mount/point",
"type": "lv",
"name": "LOGICAL_VOLUME_NAME",
"size": {
"generator": "another_generator_to_calc_LOGICAL_VOLUME_size",
"generator_args": ["arg"]
},
"file_system": "xfs"
}
]
}
Creating a New Plain Partition
------------------------------
Some node roles may be incompatible with LVM and would require plain
partitions. If that's the case, you may have to define a standalone
volume with type "partition" instead of "vg"::
{
"id": "NEW_PARTITION_NAME",
"type": "partition",
"min_size": {"generator": "calc_NEW_PARTITION_NAME_size"},
"label": "Label for NEW PARTITION as it will be shown on UI",
"mount": "none",
"disk_label": "LABEL",
"file_system": "xfs"
}
Note how you can set mount point to "none" and define a disk label to
identify the partition instead. Its only possible to set a disk label on
a formatted portition, so you have to set "file_system" parameter to use
disk labels.
Updating the Node Role to Volumes Mapping
-----------------------------------------
Unlike a new logical volume added to a pre-existing logical volume
group, a new logical volume group or partition will not be allocated on
the node unless it is included in the role-to-volumes mapping
corresponding to one of the node's roles, like this::
{
"volumes_roles_mapping": {
"controller": ["os", "image"],
"compute": ["os", "vm", "VOLUME_GROUP_NAME"],
"cinder": ["os", "cinder"]
}
}
Setting Volume Parameters from Nailgun Settings
-----------------------------------------------
In addition to VolumeManager generators, it is also possible to define
sizes or whatever you want in the nailgun configuration file
(/etc/nailgun/settings.yaml). All fixture files are templated using
Jinja2 templating engine just before being loaded into nailgun database.
For example, we can define mount point for a new volume as follows::
"mount": "{{settings.NEW_LOGICAL_VOLUME_MOUNT_POINT}}"
Of course, *NEW_LOGICAL_VOLUME_MOUNT_POINT* must be defined in the
settings file.

View File

@ -1,66 +0,0 @@
Nailgun is the core of FuelWeb.
To allow an enterprise features be easily connected,
and open source commity to extend it as well, Nailgun must
have simple, very well defined and documented core,
with the great pluggable capabilities.
Reliability
___________
All software contains bugs and may fail, and Nailgun is not an exception of this rule.
In reality, it is not possible to cover all failure scenarios,
even to come close to 100%.
The question is how we can design the system to avoid bugs in one module causing the damage
of the whole system.
Example from the Nailgun's past:
Agent collected hardware information, include current_speed param on the interfaces.
One of the interfaces had current_speed=0. At the registration attempt, Nailgun's validator
checked that current_speed > 0, and validator raised an exception InvalidData,
which declined node discovery.
current_speed is one of the attibutes which we can easily skip, it is not even
used for deployment in any way at the moment and used only for the information provided to the user.
But it prevented node discovery, and it made the server unusable.
Another example. Due to the coincedence of bug and wrong metadata of one of the nodes,
GET request on that node would return 500 Internal Server Error.
Looks like it should affect the only one node, and logically we could remove such
failing node from the environment to get it discovered again.
However, UI + API handlers were written in the following way:
* UI calls /api/nodes to fetch info about all nodes to just show how many nodes are allocated, and how many are not
* NodesCollectionHandler would return 500 if any of nodes raise an exception
It is simple to guess, that the whole UI was completely destroyed by just one
failed node. It was impossible to do any action on UI.
These two examples give us the starting point to rethink on how to avoid
Nailgun crash just if one of the meta attr is wrong.
First, we must devide the meta attributes discovered by agent on two categories:
* absolutely required for node discovering (i.e. MAC address)
* non-required for discovering
* required for deployment (i.e. disks)
* non-required for deployment (i.e. current_speed)
Second, we must have UI refactored to fetch only the information required,
not the whole DB to just show two numbers. To be more specific,
we have to make sure that issues in one environment must not
affect the other environment. Such a refactoring will require additional
handlers in Nailgun, as well as some additions, such as pagination and etc.
From Nailgun side, it is bad idea to fail the whole CollectionHandler if one
of the objects fail to calculate some attribute. My(mihgen) idea is to simply set
attrubute to Null if failed to calculate, and program UI to handle it properly.
Unit tests must help in testing of this.
Another idea is to limit the /api/nodes,
/api/networks and other calls
to work only if cluster_id param provided, whether set to None or some of cluster Ids.
In such a way we can be sure that one env will not be able to break the whole UI.

View File

@ -1,119 +0,0 @@
Sequence Diagrams
=================
OS Provisioning
---------------
.. uml::
title Nodes Provisioning
actor WebUser
box "Physical Server"
participant NodePXE
participant NodeAgent
end box
NodePXE -> Cobbler: PXE discovery
Cobbler --> NodePXE: bootstrap OS image
NodePXE -> Cobbler: network settings request
Cobbler --> NodePXE: IP, DNS response
NodePXE -> NodePXE: OS installation
NodePXE -> NodeAgent: starts agent
NodePXE -> MC: starts MCollective
NodeAgent -> Ohai: get info
Ohai --> NodeAgent: info
NodeAgent -> NodePXE: get admin node IP
NodePXE --> NodeAgent: admin node IP
NodeAgent -> Nailgun: Registration
|||
WebUser -> Nailgun: create cluster
WebUser -> Nailgun: add nodes to cluster
WebUser -> Nailgun: deploy cluster
|||
Nailgun -> Naily: Provision CentOS
Naily -> Astute: Provision CentOS
Astute -> Cobbler: Provision CentOS
Cobbler -> NodePXE: ssh to reboot
Cobbler --> NodePXE: CentOS image
NodePXE -> NodeAgent: starts agent
NodePXE -> MC: starts MC agent
NodeAgent -> Nailgun: Node metadata
Networks Verification
---------------------
.. uml::
title Network Verification
actor WebUser
WebUser -> Nailgun: verify networks (cluster #1)
Nailgun -> Naily: verify nets (100-120 vlans)
Naily -> Astute: verify nets
Astute -> MC: start listeners
MC -> net_probe.py: forks to listen
MC --> Astute: listening
Astute -> MC: send frames
MC -> net_probe.py: send frames
net_probe.py --> MC: sent
MC --> Astute: sent
Astute -> MC: get result
MC -> net_probe.py: stop listeners
net_probe.py --> MC: result
MC --> Astute: result graph
Astute --> Naily: vlans Ok
Naily --> Nailgun: response
Nailgun --> WebUser: response
Details on Cluster Provisioning & Deployment (via Facter extension)
-------------------------------------------------------------------
.. uml::
title Cluster Deployment
actor WebUser
Nailgun -> Naily: Provision,Deploy
Naily -> Astute: Provision,Deploy
Astute -> MC: Type of nodes?
MC -> Astute: bootstrap
Astute -> Cobbler: create system,reboot
Astute -> MC: Type of nodes?
MC --> Astute: booted in target OS
Astute --> Naily: provisioned
Naily --> Nailgun: provisioned
Nailgun --> WebUser: status on UI
Astute -> MC: Create /etc/naily.facts
Astute -> MC: run puppet
MC -> Puppet: runonce
Puppet -> Puppet_master: get modules,class
Puppet_master --> Puppet: modules, class
Puppet -> Facter: get facts
Facter --> Puppet: set of facts
Puppet -> Puppet: applies $role
Puppet --> MC: done
MC --> Astute: deploy is done
Astute --> Naily: deploy is done
Naily --> Nailgun: deploy is done
Nailgun --> WebUser: deploy is done
Once deploy and provisioning messages are accepted by Naily, provisioining method is called in Astute.
Provisioning part creates system in Cobbler and calls reboot over Cobbler. Then
Astute uses `MCollective direct addressing mode <http://www.devco.net/archives/2012/06/19/mcollective-direct-addressing-mode.php>`_
to check if all required nodes are available,
include puppet agent on them. If some nodes are not ready yet, Astute waits for a few seconds and does request again.
When nodes are booted in target OS,
Astute uses naily_fact MCollective plugin to post data to a special file /etc/naily.fact on target system.
Data include role and all other variables needed for deployment. Then, Astute calls puppetd MCollective plugin
to start deployment. Puppet is started on nodes, and requests Puppet master for modules and manifests.
site.pp on Master node defines one common class for every node.
Accordingly, puppet agent starts its run. Modules contain facter extension, which runs before deployment. Extension
reads facts from /etc/naily.fact placed by mcollective, and extends Facter data with these facts, which can be
easily used in Puppet modules. Case structure in running class chooses appropriate class to import, based on $role
variable, received from /etc/naily.fact. It loads and starts to execute. All variables from file are available
like ordinary facts from Facter.
It is possible to use the system without Nailgun and Naily: user creates a YAML file with all required
data, and calls Astute binary script. Script loads data from YAML and instantiates Astute instance
the same way as it's instanciated from Naily.

View File

@ -1,11 +0,0 @@
.. _contents:
Table of contents
=================
.. toctree::
:maxdepth: 2
:numbered:
develop
user

View File

@ -1,199 +0,0 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. pdf to make PDF files
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\scaffold.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\scaffold.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "pdf" (
%SPHINXBUILD% -bpdf %ALLSPHINXOPTS% %BUILDDIR%/pdf
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/pdf.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
:end

View File

@ -1,4 +0,0 @@
.. _user:
User guide has been moved to `docs.mirantis.com <http://docs.mirantis.com/>`_.
If you want to contribute, checkout the sources from `github <https://github.com/Mirantis/fuel-docs.git>`_.

1
fuel

@ -1 +0,0 @@
Subproject commit e79dd4a6b93c88040bb513efb73126d0112c3eaa

View File

@ -1 +0,0 @@
naMu7aej

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -82,19 +82,19 @@ $(ISOROOT)/ks.cfg: $(SOURCE_DIR)/iso/ks.template $(SOURCE_DIR)/iso/ks.py $(KSYAM
python $(SOURCE_DIR)/iso/ks.py -t $(SOURCE_DIR)/iso/ks.template -c $(KSYAML) -o $@
$(ISOROOT)/bootstrap_admin_node.sh: $(SOURCE_DIR)/iso/bootstrap_admin_node.sh ; $(ACTION.COPY)
$(ISOROOT)/bootstrap_admin_node.conf: $(SOURCE_DIR)/iso/bootstrap_admin_node.conf ; $(ACTION.COPY)
$(ISOROOT)/send2syslog.py: $(SOURCE_DIR)/bin/send2syslog.py ; $(ACTION.COPY)
$(ISOROOT)/version.yaml: $(call depv,COMMIT_SHA)
$(ISOROOT)/send2syslog.py: $(BUILD_DIR)/repos/nailgun/bin/send2syslog.py ; $(ACTION.COPY)
$(BUILD_DIR)/repos/nailgun/bin/send2syslog.py: $(BUILD_DIR)/repos/nailgun.done
$(ISOROOT)/version.yaml: $(call depv,PRODUCT_VERSION)
$(ISOROOT)/version.yaml: $(call depv,FUEL_COMMIT_SHA)
$(ISOROOT)/version.yaml:
echo "COMMIT_SHA: $(COMMIT_SHA)" > $@
echo "PRODUCT_VERSION: $(PRODUCT_VERSION)" >> $@
echo "FUEL_COMMIT_SHA: $(FUEL_COMMIT_SHA)" >> $@
$(ISOROOT)/version.yaml: $(BUILD_DIR)/repos/repos.done
echo "VERSION:" > $@
echo " release: $(PRODUCT_VERSION)" >> $@
cat $(BUILD_DIR)/repos/version.yaml >> $@
$(ISOROOT)/puppet-slave.tgz: \
$(call find-files,$(SOURCE_DIR)/fuel/deployment/puppet)
(cd $(SOURCE_DIR)/fuel/deployment/puppet && tar rf $(ISOROOT)/puppet-slave.tar ./*)
$(BUILD_DIR)/repos/fuellib.done \
$(call find-files,$(BUILD_DIR)/repos/fuellib/deployment/puppet)
(cd $(BUILD_DIR)/repos/fuellib/deployment/puppet && tar rf $(ISOROOT)/puppet-slave.tar ./*)
gzip -c -9 $(ISOROOT)/puppet-slave.tar > $@ && \
rm $(ISOROOT)/puppet-slave.tar

View File

@ -11,7 +11,8 @@ clean-mirror-eggs:
$(BUILD_DIR)/mirror/eggs/build.done: $(call depv,LOCAL_MIRROR_EGGS)
$(BUILD_DIR)/mirror/eggs/build.done: $(call depv,REQUIRED_EGGS)
$(BUILD_DIR)/mirror/eggs/build.done: $(call depv,OSTF_EGGS)
$(BUILD_DIR)/mirror/eggs/build.done: $(BUILD_DIR)/repos/fuellib.done
$(BUILD_DIR)/mirror/eggs/build.done: $(call find-files,$(BUILD_DIR)/repos/fuellib/deployment/puppet/nailgun/files/venv-ostf.txt)
$(BUILD_DIR)/mirror/eggs/build.done: $(call depv,SANDBOX_PACKAGES)
$(BUILD_DIR)/mirror/eggs/build.done: SANDBOX:=$(BUILD_DIR)/mirror/eggs/SANDBOX
$(BUILD_DIR)/mirror/eggs/build.done: export SANDBOX_UP:=$(SANDBOX_UP)
@ -47,12 +48,12 @@ $(BUILD_DIR)/mirror/eggs/build.done: \
--find-links $(MIRROR_EGGS) \
--download /tmp/$(notdir $(LOCAL_MIRROR_EGGS)) \
$(REQUIRED_EGGS)
grep -v "^\\s*\#" $(BUILD_DIR)/repos/fuellib/deployment/puppet/nailgun/files/venv-ostf.txt | xargs \
sudo chroot $(SANDBOX) pip install \
--exists-action=i \
--index-url $(MIRROR_EGGS) \
--find-links $(MIRROR_EGGS) \
--download /tmp/$(notdir $(LOCAL_MIRROR_EGGS)) \
$(OSTF_EGGS)
--download /tmp/$(notdir $(LOCAL_MIRROR_EGGS))
# # Copying downloaded eggs into eggs mirror
rsync -a $(SANDBOX)/tmp/$(notdir $(LOCAL_MIRROR_EGGS))/ $(LOCAL_MIRROR_EGGS)

View File

@ -1,34 +1,11 @@
$(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile: $(call depv,MIRROR_GEMS)
$(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile:
mkdir -p $(@D)
echo -n > $@
for i in $(MIRROR_GEMS); do \
echo "source \"$$i\"" >> $@; \
done
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile: $(call depv,MIRROR_GEMS)
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile: \
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile.lock \
$(BUILD_DIR)/packages/gems/build.done \
$(BUILD_DIR)/packages/rpm/build.done
echo -n > $@
for i in $(MIRROR_GEMS); do \
echo "source \"$$i\"" >> $@; \
done
echo "source \"file://$(BUILD_MIRROR_GEMS)\"" >> $@
echo "gemspec :path => \"$(SOURCE_DIR)/naily\"" >> $@
$(ACTION.TOUCH)
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile.lock: \
$(SOURCE_DIR)/naily/Gemfile.lock
mkdir -p $(@D)
cp $(SOURCE_DIR)/naily/Gemfile.lock $@
$(BUILD_DIR)/mirror/gems/gems-bundle-gemfile.done: $(call depv,MIRROR_GEMS)
$(BUILD_DIR)/mirror/gems/gems-bundle-gemfile.done: \
$(SOURCE_DIR)/requirements-gems.txt \
$(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile \
$(BUILD_DIR)/mirror/gems/gems-bundle/naily/Gemfile
$(SOURCE_DIR)/requirements-gems.txt
mkdir -p $(BUILD_DIR)/mirror/gems/gems-bundle
/bin/echo -n > $(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile
for i in $(MIRROR_GEMS); do \
echo "source \"$$i\"" >> $(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile; \
done
cat $(SOURCE_DIR)/requirements-gems.txt | while read gem ver; do \
echo "gem \"$${gem}\", \"$${ver}\"" >> $(BUILD_DIR)/mirror/gems/gems-bundle/Gemfile; \
done
@ -36,8 +13,6 @@ $(BUILD_DIR)/mirror/gems/gems-bundle-gemfile.done: \
$(BUILD_DIR)/mirror/gems/gems-bundle.done: $(BUILD_DIR)/mirror/gems/gems-bundle-gemfile.done
( cd $(BUILD_DIR)/mirror/gems/gems-bundle && bundle install --path=. && bundle package )
find $(BUILD_DIR)/mirror/gems/gems-bundle/naily \( -name "astute*.gem*" \) -exec rm '{}' \+
( cd $(BUILD_DIR)/mirror/gems/gems-bundle/naily && bundle install --path=. && bundle package )
( cd $(BUILD_DIR)/mirror/gems/gems-bundle/vendor/cache/ && \
gem fetch `for i in $(MIRROR_GEMS); do echo -n "--source $$i "; done` -v 1.3.4 bundler )
$(ACTION.TOUCH)
@ -47,8 +22,5 @@ $(BUILD_DIR)/mirror/gems/build.done: $(call depv,BUILD_MIRROR_GEMS)
$(BUILD_DIR)/mirror/gems/build.done: $(BUILD_DIR)/mirror/gems/gems-bundle.done
@mkdir -p $(LOCAL_MIRROR_GEMS)/gems
cp $(BUILD_DIR)/mirror/gems/gems-bundle/vendor/cache/*.gem $(LOCAL_MIRROR_GEMS)/gems
find $(BUILD_DIR)/mirror/gems/gems-bundle/naily/vendor/cache/ \
\( -name "*.gem" -a ! -name "astute*" -a ! -name "mcollective*" -a ! -name "raemon*" \) \
-exec cp '{}' $(LOCAL_MIRROR_GEMS)/gems \;
(cd $(LOCAL_MIRROR_GEMS) && gem generate_index gems)
$(ACTION.TOUCH)

View File

@ -31,26 +31,28 @@ $(BUILD_DIR)/mirror/rhel/yum-config.done: \
$(BUILD_DIR)/mirror/rhel/etc/yum/pluginconf.d/priorities.conf
$(ACTION.TOUCH)
$(BUILD_DIR)/mirror/rhel/yum.done: $(call depv,REQ_RHEL_RPMS)
$(BUILD_DIR)/mirror/rhel/yum.done: \
$(BUILD_DIR)/repos/fuellib.done \
$(call find-files,$(BUILD_DIR)/repos/fuellib/deployment/puppet/rpmcache/files/required-rpms.txt \
$(BUILD_DIR)/mirror/rhel/yum-config.done
yum -c $(BUILD_DIR)/mirror/rhel/etc/yum.conf clean all
rm -rf /var/tmp/yum-$$USER-*/
grep -v "^\\s*\#" $(BUILD_DIR)/repos/fuellib/deployment/puppet/rpmcache/files/required-rpms.txt | /bin/sed 's/-[0-9][0-9\.a-zA-Z_-]\+//g' | xargs \
yumdownloader -q --resolve --archlist=$(CENTOS_ARCH) \
-c $(BUILD_DIR)/mirror/rhel/etc/yum.conf \
--destdir=$(LOCAL_MIRROR_RHEL)/Packages \
`echo $(REQ_RHEL_RPMS) | /bin/sed 's/-[0-9][0-9\.a-zA-Z_-]\+//g'`
--destdir=$(LOCAL_MIRROR_RHEL)/Packages
$(ACTION.TOUCH)
show-yum-urls-rhel: $(call depv,REQ_RHEL_RPMS)
show-yum-urls-rhel: \
$(BUILD_DIR)/repos/fuellib.done \
$(call find-files,$(BUILD_DIR)/repos/fuellib/deployment/puppet/rpmcache/files/required-rpms.txt \
$(BUILD_DIR)/mirror/rhel/yum-config.done
yum -c $(BUILD_DIR)/mirror/rhel/etc/yum.conf clean all
rm -rf /var/tmp/yum-$$USER-*/
grep -v "^\\s*\#" $(BUILD_DIR)/repos/fuellib/deployment/puppet/rpmcache/files/required-rpms.txt | /bin/sed 's/-[0-9][0-9\.a-zA-Z_-]\+//g' | xargs \
yumdownloader --urls -q --resolve --archlist=$(CENTOS_ARCH) \
-c $(BUILD_DIR)/mirror/rhel/etc/yum.conf \
--destdir=$(LOCAL_MIRROR_RHEL)/Packages \
`echo $(REQ_RHEL_RPMS) | /bin/sed 's/-[0-9][0-9\.a-zA-Z_-]\+//g'`
--destdir=$(LOCAL_MIRROR_RHEL)/Packages
$(LOCAL_MIRROR_RHEL)/comps.xml: \
export COMPSXML=$(shell wget -qO- $(MIRROR_RHEL)/repodata/repomd.xml | grep -m 1 '$(@F)' | awk -F'"' '{ print $$2 }')

View File

@ -6,17 +6,6 @@ else
wget --no-use-server-timestamps -c -P $(LOCAL_MIRROR_SRC) $(MIRROR_SRC)/$(notdir $@)
endif
#
# Download ostf packages directly from the github, because
# it updates often and we don't want to update main mirrors
# on each commit
#
$(LOCAL_MIRROR_SRC)/$(OSTF_TESTS_SHA).zip:
wget --no-use-server-timestamps -c -P $(LOCAL_MIRROR_SRC) https://github.com/Mirantis/fuel-ostf-tests/archive/$(OSTF_TESTS_SHA).zip
$(LOCAL_MIRROR_SRC)/$(OSTF_PLUGIN_SHA).zip:
wget --no-use-server-timestamps -c -P $(LOCAL_MIRROR_SRC) https://github.com/Mirantis/fuel-ostf-plugin/archive/$(OSTF_PLUGIN_SHA).zip
$(BUILD_DIR)/mirror/src/build.done: $(SOURCE_DIR)/requirements-src.txt \
| $(addprefix $(LOCAL_MIRROR_SRC)/, $(notdir $(REQUIRED_SRCS)) $(OSTF_TESTS_SHA).zip $(OSTF_PLUGIN_SHA).zip)
| $(addprefix $(LOCAL_MIRROR_SRC)/, $(notdir $(REQUIRED_SRCS)))
$(ACTION.TOUCH)

View File

@ -1,4 +0,0 @@
include manage.py
include fuel-cli/fuel
recursive-include nailgun *
recursive-include static *

View File

@ -1,24 +0,0 @@
/*
* Copyright 2013 Mirantis, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
**/
({
baseUrl: ".",
appDir: "static",
dir: "/tmp/static",
mainConfigFile: "static/js/main.js",
modules: [{name: "js/main"}],
waitSeconds: 60,
optimize: "uglify2"
})

View File

@ -1,2 +0,0 @@
You should copy file fake-target-mcollective.log to /var/tmp for emulate node logs when FAKE-TASKS enabled.

View File

@ -1,309 +0,0 @@
2013-01-16T12:26:36 info: # Logfile created on Wed Jan 16 12:26:36 +0000 2013 by logger.rb/1.2.6
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.492877 #834] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading Mcollective::Facts::Yaml_facts from mcollective/facts/yaml_facts.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.503049 #834] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin facts_plugin with class MCollective::Facts::Yaml_facts single_instance: true
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.503142 #834] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading Mcollective::Connector::Stomp from mcollective/connector/stomp.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.503832 #834] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin connector_plugin with class MCollective::Connector::Stomp single_instance: true
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.503940 #834] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading Mcollective::Security::Psk from mcollective/security/psk.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.504627 #834] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin security_plugin with class MCollective::Security::Psk single_instance: true
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.504751 #834] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading Mcollective::Registration::Agentlist from mcollective/registration/agentlist.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.505116 #834] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin registration_plugin with class MCollective::Registration::Agentlist single_instance: true
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.505348 #834] DEBUG -- : pluginmanager.rb:47:in `<<' Registering plugin global_stats with class MCollective::RunnerStats single_instance: true
2013-01-16T12:26:36 info: I, [2013-01-16T12:26:36.505395 #834] INFO -- : mcollectived:31 The Marionette Collective 2.2.1 started logging at debug level
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.505433 #834] DEBUG -- : mcollectived:34 Starting in the background (true)
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.512411 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin global_stats with class MCollective::RunnerStats
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.512524 #838] DEBUG -- : pluginmanager.rb:80:in `[]' Returning new plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.512598 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin global_stats with class MCollective::RunnerStats
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.512662 #838] DEBUG -- : pluginmanager.rb:80:in `[]' Returning new plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.512756 #838] DEBUG -- : stomp.rb:150:in `connect' Connecting to 10.20.0.2:61613
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.518718 #838] DEBUG -- : agents.rb:26:in `loadagents' Reloading all agents from disk
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.518854 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found erase_node at /usr/libexec/mcollective/mcollective/agent/erase_node.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.518923 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Erase_node from mcollective/agent/erase_node.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.539956 #838] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for erase_node
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.540066 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin erase_node_agent with class MCollective::Agent::Erase_node single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.540121 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin erase_node_agent with class MCollective::Agent::Erase_node
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.540416 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/erase_node'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.541162 #838] DEBUG -- : base.rb:93:in `findddlfile' Found erase_node ddl at /usr/libexec/mcollective/mcollective/agent/erase_node.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.541332 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.541417 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.erase_node.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.541706 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found discovery at /usr/libexec/mcollective/mcollective/agent/discovery.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.541796 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Discovery from mcollective/agent/discovery.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545131 #838] DEBUG -- : agents.rb:91:in `activate_agent?' MCollective::Agent::Discovery does not have an activate? method, activating as default
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545221 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin discovery_agent with class MCollective::Agent::Discovery single_instance: true
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545274 #838] DEBUG -- : pluginmanager.rb:80:in `[]' Returning new plugin discovery_agent with class MCollective::Agent::Discovery
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545353 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545422 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.discovery.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545540 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found systemtype at /usr/libexec/mcollective/mcollective/agent/systemtype.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545602 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Systemtype from mcollective/agent/systemtype.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545788 #838] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for systemtype
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545846 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin systemtype_agent with class MCollective::Agent::Systemtype single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545894 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin systemtype_agent with class MCollective::Agent::Systemtype
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.545998 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/systemtype'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.546101 #838] DEBUG -- : base.rb:93:in `findddlfile' Found systemtype ddl at /usr/libexec/mcollective/mcollective/agent/systemtype.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.546248 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.546315 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.systemtype.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.546400 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found net_probe at /usr/libexec/mcollective/mcollective/agent/net_probe.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.546456 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Net_probe from mcollective/agent/net_probe.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.546846 #838] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for net_probe
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.546903 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin net_probe_agent with class MCollective::Agent::Net_probe single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.546958 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin net_probe_agent with class MCollective::Agent::Net_probe
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.547054 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/net_probe'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.547148 #838] DEBUG -- : base.rb:93:in `findddlfile' Found net_probe ddl at /usr/libexec/mcollective/mcollective/agent/net_probe.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.547339 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.547402 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.net_probe.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.547492 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found rpuppet at /usr/libexec/mcollective/mcollective/agent/rpuppet.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.547549 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Rpuppet from mcollective/agent/rpuppet.rb
2013-01-16T12:26:36 err: E, [2013-01-16T12:26:36.551471 #838] ERROR -- : pluginmanager.rb:171:in `loadclass' Failed to load MCollective::Agent::Rpuppet: no such file to load -- puppet/util/command_line
2013-01-16T12:26:36 err: E, [2013-01-16T12:26:36.551553 #838] ERROR -- : agents.rb:71:in `loadagent' Loading agent rpuppet failed: no such file to load -- puppet/util/command_line
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.551626 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found rapply at /usr/libexec/mcollective/mcollective/agent/rapply.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.551688 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Rapply from mcollective/agent/rapply.rb
2013-01-16T12:26:36 err: E, [2013-01-16T12:26:36.555693 #838] ERROR -- : pluginmanager.rb:171:in `loadclass' Failed to load MCollective::Agent::Rapply: no such file to load -- puppet/application
2013-01-16T12:26:36 err: E, [2013-01-16T12:26:36.555757 #838] ERROR -- : agents.rb:71:in `loadagent' Loading agent rapply failed: no such file to load -- puppet/application
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.555823 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found puppetd at /usr/libexec/mcollective/mcollective/agent/puppetd.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.555882 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Puppetd from mcollective/agent/puppetd.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.556361 #838] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for puppetd
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.556425 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin puppetd_agent with class MCollective::Agent::Puppetd single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.556474 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin puppetd_agent with class MCollective::Agent::Puppetd
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.556562 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/puppetd'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.556661 #838] DEBUG -- : base.rb:93:in `findddlfile' Found puppetd ddl at /usr/libexec/mcollective/mcollective/agent/puppetd.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.556942 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.557042 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.puppetd.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.557142 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found rpcutil at /usr/libexec/mcollective/mcollective/agent/rpcutil.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.557197 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Rpcutil from mcollective/agent/rpcutil.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.557546 #838] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for rpcutil
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.557603 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin rpcutil_agent with class MCollective::Agent::Rpcutil single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.557650 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin rpcutil_agent with class MCollective::Agent::Rpcutil
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.557725 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/rpcutil'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.557815 #838] DEBUG -- : base.rb:93:in `findddlfile' Found rpcutil ddl at /usr/libexec/mcollective/mcollective/agent/rpcutil.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.558307 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.558369 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.rpcutil.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.558456 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found nailyfact at /usr/libexec/mcollective/mcollective/agent/nailyfact.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.558512 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Nailyfact from mcollective/agent/nailyfact.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.558816 #838] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for nailyfact
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.558872 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin nailyfact_agent with class MCollective::Agent::Nailyfact single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.558919 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin nailyfact_agent with class MCollective::Agent::Nailyfact
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.559015 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/nailyfact'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.559106 #838] DEBUG -- : base.rb:93:in `findddlfile' Found nailyfact ddl at /usr/libexec/mcollective/mcollective/agent/nailyfact.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.559285 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.559348 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.nailyfact.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.559435 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found node_indirector at /usr/libexec/mcollective/mcollective/agent/node_indirector.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.559490 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Node_indirector from mcollective/agent/node_indirector.rb
2013-01-16T12:26:36 err: E, [2013-01-16T12:26:36.564361 #838] ERROR -- : pluginmanager.rb:171:in `loadclass' Failed to load MCollective::Agent::Node_indirector: no such file to load -- puppet/node
2013-01-16T12:26:36 err: E, [2013-01-16T12:26:36.564424 #838] ERROR -- : agents.rb:71:in `loadagent' Loading agent node_indirector failed: no such file to load -- puppet/node
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.564490 #838] DEBUG -- : agents.rb:104:in `findagentfile' Found fake at /usr/libexec/mcollective/mcollective/agent/fake.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.564549 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Fake from mcollective/agent/fake.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.564715 #838] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for fake
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.564775 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin fake_agent with class MCollective::Agent::Fake single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.564824 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin fake_agent with class MCollective::Agent::Fake
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.564907 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/fake'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.565043 #838] DEBUG -- : base.rb:93:in `findddlfile' Found fake ddl at /usr/libexec/mcollective/mcollective/agent/fake.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.565181 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.565245 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.fake.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.565666 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Data::Agent_data from mcollective/data/agent_data.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.566017 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin agent_data with class MCollective::Data::Agent_data single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.566082 #838] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Data::Fstat_data from mcollective/data/fstat_data.rb
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.566294 #838] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin fstat_data with class MCollective::Data::Fstat_data single_instance: false
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.566365 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin fstat_data with class MCollective::Data::Fstat_data
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.566440 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'data/fstat_data'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.566660 #838] DEBUG -- : base.rb:93:in `findddlfile' Found fstat_data ddl at /usr/libexec/mcollective/mcollective/data/fstat_data.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.567041 #838] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin agent_data with class MCollective::Data::Agent_data
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.567117 #838] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'data/agent_data'
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.567201 #838] DEBUG -- : base.rb:93:in `findddlfile' Found agent_data ddl at /usr/libexec/mcollective/mcollective/data/agent_data.ddl
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.567366 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.567427 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.mcollective.command
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.567507 #838] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.567603 #838] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /queue/mcollective.ca4c50b905dc21ea17a10549a6f5944f
2013-01-16T12:26:36 debug: D, [2013-01-16T12:26:36.567676 #838] DEBUG -- : stomp.rb:197:in `receive' Waiting for a message from Stomp
2013-01-16T12:26:37 warning: W, [2013-01-16T12:26:37.452256 #838] WARN -- : runner.rb:60:in `run' Exiting after signal: SIGTERM
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.452333 #838] DEBUG -- : stomp.rb:270:in `disconnect' Disconnecting from Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.609291 #958] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading Mcollective::Facts::Yaml_facts from mcollective/facts/yaml_facts.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.619522 #958] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin facts_plugin with class MCollective::Facts::Yaml_facts single_instance: true
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.619617 #958] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading Mcollective::Connector::Stomp from mcollective/connector/stomp.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.620322 #958] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin connector_plugin with class MCollective::Connector::Stomp single_instance: true
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.620433 #958] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading Mcollective::Security::Psk from mcollective/security/psk.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.621145 #958] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin security_plugin with class MCollective::Security::Psk single_instance: true
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.621264 #958] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading Mcollective::Registration::Agentlist from mcollective/registration/agentlist.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.621654 #958] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin registration_plugin with class MCollective::Registration::Agentlist single_instance: true
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.621894 #958] DEBUG -- : pluginmanager.rb:47:in `<<' Registering plugin global_stats with class MCollective::RunnerStats single_instance: true
2013-01-16T12:26:37 info: I, [2013-01-16T12:26:37.621939 #958] INFO -- : mcollectived:31 The Marionette Collective 2.2.1 started logging at debug level
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.621974 #958] DEBUG -- : mcollectived:34 Starting in the background (true)
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.627841 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin global_stats with class MCollective::RunnerStats
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.627954 #962] DEBUG -- : pluginmanager.rb:80:in `[]' Returning new plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.628056 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin global_stats with class MCollective::RunnerStats
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.628129 #962] DEBUG -- : pluginmanager.rb:80:in `[]' Returning new plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.628224 #962] DEBUG -- : stomp.rb:150:in `connect' Connecting to 10.20.0.2:61613
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.636559 #962] DEBUG -- : agents.rb:26:in `loadagents' Reloading all agents from disk
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.636706 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found erase_node at /usr/libexec/mcollective/mcollective/agent/erase_node.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.636773 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Erase_node from mcollective/agent/erase_node.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.652064 #962] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for erase_node
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.652159 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin erase_node_agent with class MCollective::Agent::Erase_node single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.652207 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin erase_node_agent with class MCollective::Agent::Erase_node
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.652505 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/erase_node'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.653250 #962] DEBUG -- : base.rb:93:in `findddlfile' Found erase_node ddl at /usr/libexec/mcollective/mcollective/agent/erase_node.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.653417 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.653487 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.erase_node.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.653736 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found discovery at /usr/libexec/mcollective/mcollective/agent/discovery.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.653795 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Discovery from mcollective/agent/discovery.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.653972 #962] DEBUG -- : agents.rb:91:in `activate_agent?' MCollective::Agent::Discovery does not have an activate? method, activating as default
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.656743 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin discovery_agent with class MCollective::Agent::Discovery single_instance: true
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.656800 #962] DEBUG -- : pluginmanager.rb:80:in `[]' Returning new plugin discovery_agent with class MCollective::Agent::Discovery
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.656877 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.656941 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.discovery.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657062 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found systemtype at /usr/libexec/mcollective/mcollective/agent/systemtype.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657124 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Systemtype from mcollective/agent/systemtype.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657296 #962] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for systemtype
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657353 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin systemtype_agent with class MCollective::Agent::Systemtype single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657400 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin systemtype_agent with class MCollective::Agent::Systemtype
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657499 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/systemtype'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657601 #962] DEBUG -- : base.rb:93:in `findddlfile' Found systemtype ddl at /usr/libexec/mcollective/mcollective/agent/systemtype.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657741 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657801 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.systemtype.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657900 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found net_probe at /usr/libexec/mcollective/mcollective/agent/net_probe.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.657955 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Net_probe from mcollective/agent/net_probe.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.658388 #962] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for net_probe
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.658447 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin net_probe_agent with class MCollective::Agent::Net_probe single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.658496 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin net_probe_agent with class MCollective::Agent::Net_probe
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.658572 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/net_probe'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.658664 #962] DEBUG -- : base.rb:93:in `findddlfile' Found net_probe ddl at /usr/libexec/mcollective/mcollective/agent/net_probe.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.658853 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.658919 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.net_probe.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.664250 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found rpuppet at /usr/libexec/mcollective/mcollective/agent/rpuppet.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.664346 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Rpuppet from mcollective/agent/rpuppet.rb
2013-01-16T12:26:37 err: E, [2013-01-16T12:26:37.665259 #962] ERROR -- : pluginmanager.rb:171:in `loadclass' Failed to load MCollective::Agent::Rpuppet: no such file to load -- puppet/util/command_line
2013-01-16T12:26:37 err: E, [2013-01-16T12:26:37.665313 #962] ERROR -- : agents.rb:71:in `loadagent' Loading agent rpuppet failed: no such file to load -- puppet/util/command_line
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.665388 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found rapply at /usr/libexec/mcollective/mcollective/agent/rapply.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.665444 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Rapply from mcollective/agent/rapply.rb
2013-01-16T12:26:37 err: E, [2013-01-16T12:26:37.668623 #962] ERROR -- : pluginmanager.rb:171:in `loadclass' Failed to load MCollective::Agent::Rapply: no such file to load -- puppet/application
2013-01-16T12:26:37 err: E, [2013-01-16T12:26:37.668684 #962] ERROR -- : agents.rb:71:in `loadagent' Loading agent rapply failed: no such file to load -- puppet/application
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.668751 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found puppetd at /usr/libexec/mcollective/mcollective/agent/puppetd.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.668811 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Puppetd from mcollective/agent/puppetd.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.669313 #962] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for puppetd
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.669376 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin puppetd_agent with class MCollective::Agent::Puppetd single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.669426 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin puppetd_agent with class MCollective::Agent::Puppetd
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.669547 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/puppetd'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.669647 #962] DEBUG -- : base.rb:93:in `findddlfile' Found puppetd ddl at /usr/libexec/mcollective/mcollective/agent/puppetd.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.669926 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.670016 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.puppetd.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.670124 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found rpcutil at /usr/libexec/mcollective/mcollective/agent/rpcutil.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.670181 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Rpcutil from mcollective/agent/rpcutil.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.670528 #962] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for rpcutil
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.670583 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin rpcutil_agent with class MCollective::Agent::Rpcutil single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.670630 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin rpcutil_agent with class MCollective::Agent::Rpcutil
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.670703 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/rpcutil'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.670792 #962] DEBUG -- : base.rb:93:in `findddlfile' Found rpcutil ddl at /usr/libexec/mcollective/mcollective/agent/rpcutil.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.674272 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.674374 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.rpcutil.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.674490 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found nailyfact at /usr/libexec/mcollective/mcollective/agent/nailyfact.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.674556 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Nailyfact from mcollective/agent/nailyfact.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.674931 #962] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for nailyfact
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.675019 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin nailyfact_agent with class MCollective::Agent::Nailyfact single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.675072 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin nailyfact_agent with class MCollective::Agent::Nailyfact
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.675164 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/nailyfact'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.675272 #962] DEBUG -- : base.rb:93:in `findddlfile' Found nailyfact ddl at /usr/libexec/mcollective/mcollective/agent/nailyfact.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.675488 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.675556 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.nailyfact.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.675656 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found node_indirector at /usr/libexec/mcollective/mcollective/agent/node_indirector.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.675718 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Node_indirector from mcollective/agent/node_indirector.rb
2013-01-16T12:26:37 err: E, [2013-01-16T12:26:37.676526 #962] ERROR -- : pluginmanager.rb:171:in `loadclass' Failed to load MCollective::Agent::Node_indirector: no such file to load -- puppet/node
2013-01-16T12:26:37 err: E, [2013-01-16T12:26:37.676585 #962] ERROR -- : agents.rb:71:in `loadagent' Loading agent node_indirector failed: no such file to load -- puppet/node
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.676652 #962] DEBUG -- : agents.rb:104:in `findagentfile' Found fake at /usr/libexec/mcollective/mcollective/agent/fake.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.676711 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Agent::Fake from mcollective/agent/fake.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.676898 #962] DEBUG -- : agent.rb:138:in `activate?' Starting default activation checks for fake
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.676960 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin fake_agent with class MCollective::Agent::Fake single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.680570 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin fake_agent with class MCollective::Agent::Fake
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.680716 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'agent/fake'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.680834 #962] DEBUG -- : base.rb:93:in `findddlfile' Found fake ddl at /usr/libexec/mcollective/mcollective/agent/fake.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.681027 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.681100 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.fake.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.681567 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Data::Agent_data from mcollective/data/agent_data.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.681902 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin agent_data with class MCollective::Data::Agent_data single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.681965 #962] DEBUG -- : pluginmanager.rb:167:in `loadclass' Loading MCollective::Data::Fstat_data from mcollective/data/fstat_data.rb
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.682202 #962] DEBUG -- : pluginmanager.rb:44:in `<<' Registering plugin fstat_data with class MCollective::Data::Fstat_data single_instance: false
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.682275 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin fstat_data with class MCollective::Data::Fstat_data
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.682349 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'data/fstat_data'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.682577 #962] DEBUG -- : base.rb:93:in `findddlfile' Found fstat_data ddl at /usr/libexec/mcollective/mcollective/data/fstat_data.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.682933 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin agent_data with class MCollective::Data::Agent_data
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.685273 #962] DEBUG -- : cache.rb:117:in `ttl' Cache miss on 'ddl' key 'data/agent_data'
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.685379 #962] DEBUG -- : base.rb:93:in `findddlfile' Found agent_data ddl at /usr/libexec/mcollective/mcollective/data/agent_data.ddl
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.685570 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.685631 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /topic/mcollective.mcollective.command
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.685714 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.685809 #962] DEBUG -- : stomp.rb:241:in `subscribe' Subscribing to /queue/mcollective.c4ca4238a0b923820dcc509a6f75849b
2013-01-16T12:26:37 debug: D, [2013-01-16T12:26:37.685883 #962] DEBUG -- : stomp.rb:197:in `receive' Waiting for a message from Stomp
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222263 #962] DEBUG -- : runnerstats.rb:49:in `received' Incrementing total stat
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222338 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222419 #962] DEBUG -- : runnerstats.rb:38:in `validated' Incrementing validated stat
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222469 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222546 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222626 #962] DEBUG -- : base.rb:117:in `validate_filter?' Passing based on agent rpcutil
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222675 #962] DEBUG -- : base.rb:117:in `validate_filter?' Passing based on agent rpcutil
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222723 #962] DEBUG -- : base.rb:153:in `validate_filter?' Message passed the filter checks
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222764 #962] DEBUG -- : runnerstats.rb:26:in `passed' Incrementing passed stat
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222806 #962] DEBUG -- : runner.rb:80:in `agentmsg' Handling message for agent 'discovery' on collective 'mcollective'
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222853 #962] DEBUG -- : agents.rb:119:in `dispatch' Dispatching a message to agent discovery
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.222943 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin discovery_agent with class MCollective::Agent::Discovery
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.223018 #962] DEBUG -- : stomp.rb:197:in `receive' Waiting for a message from Stomp
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.223190 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.223394 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.223463 #962] DEBUG -- : base.rb:168:in `create_reply' Encoded a message for request c4252a6973535a958349627872a4f6b2
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.223580 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.223646 #962] DEBUG -- : stomp.rb:230:in `publish' Sending a broadcast message to STOMP target '/topic/mcollective.discovery.reply'
2013-01-16T12:29:47 debug: D, [2013-01-16T12:29:47.223902 #962] DEBUG -- : runnerstats.rb:56:in `sent' Incrementing replies stat
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207332 #962] DEBUG -- : runnerstats.rb:49:in `received' Incrementing total stat
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207419 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207502 #962] DEBUG -- : runnerstats.rb:38:in `validated' Incrementing validated stat
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207558 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207625 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207697 #962] DEBUG -- : base.rb:117:in `validate_filter?' Passing based on agent rpcutil
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207747 #962] DEBUG -- : base.rb:117:in `validate_filter?' Passing based on agent rpcutil
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207796 #962] DEBUG -- : base.rb:153:in `validate_filter?' Message passed the filter checks
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207839 #962] DEBUG -- : runnerstats.rb:26:in `passed' Incrementing passed stat
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207883 #962] DEBUG -- : runner.rb:80:in `agentmsg' Handling message for agent 'rpcutil' on collective 'mcollective'
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.207926 #962] DEBUG -- : agents.rb:119:in `dispatch' Dispatching a message to agent rpcutil
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.208034 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin rpcutil_agent with class MCollective::Agent::Rpcutil
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.208099 #962] DEBUG -- : stomp.rb:197:in `receive' Waiting for a message from Stomp
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.208265 #962] DEBUG -- : cache.rb:105:in `read' Cache hit on 'ddl' key 'agent/rpcutil'
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.209068 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.209135 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.209211 #962] DEBUG -- : base.rb:168:in `create_reply' Encoded a message for request 865fb61cf4bc5dd08b2152f494c1d9f8
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.209334 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.209411 #962] DEBUG -- : stomp.rb:230:in `publish' Sending a broadcast message to STOMP target '/topic/mcollective.rpcutil.reply'
2013-01-16T12:29:49 debug: D, [2013-01-16T12:29:49.209685 #962] DEBUG -- : runnerstats.rb:56:in `sent' Incrementing replies stat
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236481 #962] DEBUG -- : runnerstats.rb:49:in `received' Incrementing total stat
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236566 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236644 #962] DEBUG -- : runnerstats.rb:38:in `validated' Incrementing validated stat
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236698 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236766 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236835 #962] DEBUG -- : base.rb:117:in `validate_filter?' Passing based on agent rpcutil
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236883 #962] DEBUG -- : base.rb:117:in `validate_filter?' Passing based on agent rpcutil
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236932 #962] DEBUG -- : base.rb:153:in `validate_filter?' Message passed the filter checks
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.236973 #962] DEBUG -- : runnerstats.rb:26:in `passed' Incrementing passed stat
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237040 #962] DEBUG -- : runner.rb:80:in `agentmsg' Handling message for agent 'discovery' on collective 'mcollective'
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237083 #962] DEBUG -- : agents.rb:119:in `dispatch' Dispatching a message to agent discovery
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237175 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin discovery_agent with class MCollective::Agent::Discovery
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237232 #962] DEBUG -- : stomp.rb:197:in `receive' Waiting for a message from Stomp
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237398 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237608 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237676 #962] DEBUG -- : base.rb:168:in `create_reply' Encoded a message for request 8d7776d9c45b5f56b87570fce2fbcc27
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237829 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.237896 #962] DEBUG -- : stomp.rb:230:in `publish' Sending a broadcast message to STOMP target '/topic/mcollective.discovery.reply'
2013-01-16T12:29:58 debug: D, [2013-01-16T12:29:58.238151 #962] DEBUG -- : runnerstats.rb:56:in `sent' Incrementing replies stat
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224258 #962] DEBUG -- : runnerstats.rb:49:in `received' Incrementing total stat
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224345 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224423 #962] DEBUG -- : runnerstats.rb:38:in `validated' Incrementing validated stat
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224478 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224544 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224614 #962] DEBUG -- : base.rb:117:in `validate_filter?' Passing based on agent rpcutil
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224663 #962] DEBUG -- : base.rb:117:in `validate_filter?' Passing based on agent rpcutil
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224711 #962] DEBUG -- : base.rb:153:in `validate_filter?' Message passed the filter checks
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224753 #962] DEBUG -- : runnerstats.rb:26:in `passed' Incrementing passed stat
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224802 #962] DEBUG -- : runner.rb:80:in `agentmsg' Handling message for agent 'rpcutil' on collective 'mcollective'
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224844 #962] DEBUG -- : agents.rb:119:in `dispatch' Dispatching a message to agent rpcutil
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.224933 #962] DEBUG -- : pluginmanager.rb:88:in `[]' Returning new plugin rpcutil_agent with class MCollective::Agent::Rpcutil
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.225004 #962] DEBUG -- : stomp.rb:197:in `receive' Waiting for a message from Stomp
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.225162 #962] DEBUG -- : cache.rb:105:in `read' Cache hit on 'ddl' key 'agent/rpcutil'
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.225500 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.225567 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin security_plugin with class MCollective::Security::Psk
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.225644 #962] DEBUG -- : base.rb:168:in `create_reply' Encoded a message for request abe36a613826518fb19cda0e2226dd6a
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.225807 #962] DEBUG -- : pluginmanager.rb:83:in `[]' Returning cached plugin connector_plugin with class MCollective::Connector::Stomp
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.225880 #962] DEBUG -- : stomp.rb:230:in `publish' Sending a broadcast message to STOMP target '/topic/mcollective.rpcutil.reply'
2013-01-16T12:30:00 debug: D, [2013-01-16T12:30:00.226246 #962] DEBUG -- : runnerstats.rb:56:in `sent' Incrementing replies stat

File diff suppressed because it is too large Load Diff

View File

@ -1,164 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import code
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
run_parser = subparsers.add_parser(
'run', help='run application locally'
)
run_parser.add_argument(
'-p', '--port', dest='port', action='store', type=str,
help='application port', default='8000'
)
run_parser.add_argument(
'-a', '--address', dest='address', action='store', type=str,
help='application address', default='0.0.0.0'
)
run_parser.add_argument(
'--fake-tasks', action='store_true', help='fake tasks'
)
run_parser.add_argument(
'--fake-tasks-amqp', action='store_true',
help='fake tasks with real AMQP'
)
run_parser.add_argument(
'--keepalive',
action='store_true',
help='run keep alive thread'
)
run_parser.add_argument(
'-c', '--config', dest='config_file', action='store', type=str,
help='custom config file', default=None
)
run_parser.add_argument(
'--fake-tasks-tick-count', action='store', type=int,
help='Fake tasks tick count'
)
run_parser.add_argument(
'--fake-tasks-tick-interval', action='store', type=int,
help='Fake tasks tick interval in seconds'
)
test_parser = subparsers.add_parser(
'test', help='run unit tests'
)
syncdb_parser = subparsers.add_parser(
'syncdb', help='sync application database'
)
dropdb_parser = subparsers.add_parser(
'dropdb', help='drop application database'
)
shell_parser = subparsers.add_parser(
'shell', help='open python REPL'
)
shell_parser.add_argument(
'-c', '--config', dest='config_file', action='store', type=str,
help='custom config file', default=None
)
loaddata_parser = subparsers.add_parser(
'loaddata', help='load data from fixture'
)
loaddata_parser.add_argument(
'fixture', action='store', help='json fixture to load'
)
dumpdata_parser = subparsers.add_parser(
'dumpdata', help='dump models as fixture'
)
dumpdata_parser.add_argument(
'model', action='store', help='model name to dump; underscored name'
'should be used, e.g. network_group for NetworkGroup model'
)
loaddefault_parser = subparsers.add_parser(
'loaddefault',
help='load data from default fixtures '
'(settings.FIXTURES_TO_IPLOAD)'
)
dump_settings = subparsers.add_parser(
'dump_settings', help='dump current settings to YAML'
)
params, other_params = parser.parse_known_args()
sys.argv.pop(1)
if params.action == "dumpdata":
import logging
logging.disable(logging.WARNING)
from nailgun.fixtures import fixman
fixman.dump_fixture(params.model)
sys.exit(0)
from nailgun.logger import logger
from nailgun.settings import settings
if params.action == "syncdb":
logger.info("Syncing database...")
from nailgun.db import syncdb
syncdb()
logger.info("Done")
elif params.action == "dropdb":
logger.info("Dropping database...")
from nailgun.db import dropdb
dropdb()
logger.info("Done")
elif params.action == "test":
logger.info("Running tests...")
from nailgun.unit_test import TestRunner
TestRunner.run()
logger.info("Done")
elif params.action == "loaddata":
logger.info("Uploading fixture...")
from nailgun.fixtures import fixman
with open(params.fixture, "r") as fileobj:
fixman.upload_fixture(fileobj)
logger.info("Done")
elif params.action == "loaddefault":
logger.info("Uploading fixture...")
from nailgun.fixtures import fixman
fixman.upload_fixtures()
logger.info("Done")
elif params.action == "dump_settings":
sys.stdout.write(settings.dump())
elif params.action in ("run",):
settings.update({
'LISTEN_PORT': int(params.port),
'LISTEN_ADDRESS': params.address,
})
for attr in ['FAKE_TASKS', 'FAKE_TASKS_TICK_COUNT',
'FAKE_TASKS_TICK_INTERVAL', 'FAKE_TASKS_AMQP']:
param = getattr(params, attr.lower())
if param is not None:
settings.update({attr: param})
if params.config_file:
settings.update_from_file(params.config_file)
from nailgun.wsgi import appstart
appstart(keepalive=params.keepalive)
elif params.action == "shell":
from nailgun.db import db
if params.config_file:
settings.update_from_file(params.config_file)
try:
from IPython import embed
embed()
except ImportError:
code.interact(local={'db': db, 'settings': settings})
else:
parser.print_help()

View File

@ -1,25 +0,0 @@
NAILGUN_VERSION:=$(shell python -c "import sys; sys.path.insert(0, '$(SOURCE_DIR)/nailgun'); import setup; print setup.version")
$(BUILD_DIR)/packages/eggs/Nailgun-$(NAILGUN_VERSION).tar.gz: $(call depv,NO_UI_OPTIMIZE) \
$(call find-files,$(SOURCE_DIR)/nailgun)
ifeq ($(NO_UI_OPTIMIZE),0)
mkdir -p $(BUILD_DIR)/packages/eggs
cp -r $(SOURCE_DIR)/nailgun $(BUILD_DIR)/packages/eggs
cd $(SOURCE_DIR)/nailgun && \
r.js -o build.js dir=$(BUILD_DIR)/packages/eggs/nailgun/static
rm -rf $(BUILD_DIR)/packages/eggs/nailgun/static/templates
rm -f $(BUILD_DIR)/packages/eggs/nailgun/static/build.txt
find $(BUILD_DIR)/packages/eggs/nailgun/static/css -type f ! -name main.css -delete
find $(BUILD_DIR)/packages/eggs/nailgun/static/js -type f ! -name main.js -and ! -name require.js -delete
cd $(BUILD_DIR)/packages/eggs/nailgun && \
python setup.py sdist --dist-dir $(BUILD_DIR)/packages/eggs
else
cd $(SOURCE_DIR)/nailgun && \
python setup.py sdist --dist-dir $(BUILD_DIR)/packages/eggs
endif
test-unit: test-unit-nailgun
.PHONY: test-unit test-unit-nailgun
test-unit-nailgun:
cd $(SOURCE_DIR)/nailgun && ./run_tests.sh

View File

@ -1,13 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,13 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,34 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sqlalchemy.types as types
class JSON(types.TypeDecorator):
impl = types.Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value

View File

@ -1,16 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.handlers.base import check_client_content_type
from nailgun.api.handlers.base import forbid_client_caching

View File

@ -1,140 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from decorator import decorator
import json
import web
from nailgun.api.serializers.base import BasicSerializer
from nailgun.api.validators.base import BasicValidator
from nailgun.db import db
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun import notifier
def check_client_content_type(handler):
content_type = web.ctx.env.get("CONTENT_TYPE", "application/json")
if web.ctx.path.startswith("/api")\
and not content_type.startswith("application/json"):
raise web.unsupportedmediatype
return handler()
def forbid_client_caching(handler):
if web.ctx.path.startswith("/api"):
web.header('Cache-Control',
'store, no-cache, must-revalidate,'
' post-check=0, pre-check=0')
web.header('Pragma', 'no-cache')
dt = datetime.fromtimestamp(0).strftime(
'%a, %d %b %Y %H:%M:%S GMT'
)
web.header('Expires', dt)
return handler()
@decorator
def content_json(func, *args, **kwargs):
web.header('Content-Type', 'application/json')
data = func(*args, **kwargs)
return build_json_response(data)
def build_json_response(data):
web.header('Content-Type', 'application/json')
if type(data) in (dict, list):
return json.dumps(data, indent=4)
return data
handlers = {}
class HandlerRegistrator(type):
def __init__(cls, name, bases, dct):
super(HandlerRegistrator, cls).__init__(name, bases, dct)
if hasattr(cls, 'model'):
key = cls.model.__name__
if key in handlers:
logger.warning("Handler for %s already registered" % key)
return
handlers[key] = cls
class JSONHandler(object):
__metaclass__ = HandlerRegistrator
validator = BasicValidator
serializer = BasicSerializer
fields = []
def __init__(self):
self.serializer.load_handlers(handlers)
@classmethod
def render(cls, instance, fields=None):
return cls.serializer.serialize(
instance,
fields=fields or cls.fields
)
def checked_data(self, validate_method=None):
try:
if validate_method:
data = validate_method(web.data())
else:
data = self.validator.validate(web.data())
except (
errors.InvalidInterfacesInfo,
errors.InvalidMetadata
) as exc:
notifier.notify("error", str(exc))
raise web.badrequest(message=str(exc))
except (
errors.AlreadyExists
) as exc:
err = web.conflict()
err.message = exc.message
raise err
except (
errors.InvalidData,
Exception
) as exc:
raise web.badrequest(message=str(exc))
return data
def get_object_or_404(self, model, *args, **kwargs):
# should be in ('warning', 'Log message') format
# (loglevel, message)
log_404 = kwargs.pop("log_404") if "log_404" in kwargs else None
log_get = kwargs.pop("log_get") if "log_get" in kwargs else None
if "id" in kwargs:
obj = db().query(model).get(kwargs["id"])
elif len(args) > 0:
obj = db().query(model).get(args[0])
else:
obj = db().query(model).filter(**kwargs).all()
if not obj:
if log_404:
getattr(logger, log_404[0])(log_404[1])
raise web.notfound()
else:
if log_get:
getattr(logger, log_get[0])(log_get[1])
return obj

View File

@ -1,386 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with clusters
"""
import json
import traceback
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.handlers.tasks import TaskHandler
from nailgun.api.models import Attributes
from nailgun.api.models import Cluster
from nailgun.api.models import Node
from nailgun.api.models import Release
from nailgun.api.serializers.network_configuration \
import NetworkConfigurationSerializer
from nailgun.api.validators.cluster import AttributesValidator
from nailgun.api.validators.cluster import ClusterValidator
from nailgun.db import db
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun.task.manager import ClusterDeletionManager
from nailgun.task.manager import DeploymentTaskManager
class ClusterHandler(JSONHandler):
"""Cluster single handler
"""
fields = (
"id",
"name",
"mode",
"status",
"grouping",
("release", "*")
)
model = Cluster
validator = ClusterValidator
@classmethod
def render(cls, instance, fields=None):
json_data = JSONHandler.render(instance, fields=cls.fields)
if instance.changes:
for i in instance.changes:
if not i.node_id:
json_data.setdefault("changes", []).append(i.name)
else:
json_data.setdefault("changes", []).append(
[i.name, i.node_id]
)
else:
json_data["changes"] = []
return json_data
@content_json
def GET(self, cluster_id):
""":returns: JSONized Cluster object.
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return self.render(cluster)
@content_json
def PUT(self, cluster_id):
""":returns: JSONized Cluster object.
:http: * 200 (OK)
* 400 (invalid cluster data specified)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
data = self.checked_data()
network_manager = NetworkManager()
for key, value in data.iteritems():
if key == "nodes":
# TODO(NAME): sepatate nodes
#for deletion and addition by set().
new_nodes = db().query(Node).filter(
Node.id.in_(value)
)
nodes_to_remove = [n for n in cluster.nodes
if n not in new_nodes]
nodes_to_add = [n for n in new_nodes
if n not in cluster.nodes]
for node in nodes_to_add:
if not node.online:
raise web.badrequest(
"Can not add offline node to cluster")
map(cluster.nodes.remove, nodes_to_remove)
map(cluster.nodes.append, nodes_to_add)
for node in nodes_to_remove:
network_manager.clear_assigned_networks(node.id)
network_manager.clear_all_allowed_networks(node.id)
for node in nodes_to_add:
network_manager.allow_network_assignment_to_all_interfaces(
node.id
)
network_manager.assign_networks_to_main_interface(node.id)
else:
setattr(cluster, key, value)
db().commit()
return self.render(cluster)
@content_json
def DELETE(self, cluster_id):
""":returns: {}
:http: * 202 (cluster deletion process launched)
* 400 (failed to execute cluster deletion process)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
task_manager = ClusterDeletionManager(cluster_id=cluster.id)
try:
logger.debug('Trying to execute cluster deletion task')
task_manager.execute()
except Exception as e:
logger.warn('Error while execution '
'cluster deletion task: %s' % str(e))
logger.warn(traceback.format_exc())
raise web.badrequest(str(e))
raise web.webapi.HTTPError(
status="202 Accepted",
data="{}"
)
class ClusterCollectionHandler(JSONHandler):
"""Cluster collection handler
"""
validator = ClusterValidator
@content_json
def GET(self):
""":returns: Collection of JSONized Cluster objects.
:http: * 200 (OK)
"""
return map(
ClusterHandler.render,
db().query(Cluster).all()
)
@content_json
def POST(self):
""":returns: JSONized Cluster object.
:http: * 201 (cluster successfully created)
* 400 (invalid cluster data specified)
* 409 (cluster with such parameters already exists)
"""
# It's used for cluster creating only.
data = self.checked_data()
cluster = Cluster()
cluster.release = db().query(Release).get(data["release"])
# TODO(NAME): use fields
for field in ('name', 'mode', 'net_manager'):
if data.get(field):
setattr(cluster, field, data.get(field))
db().add(cluster)
db().commit()
attributes = Attributes(
editable=cluster.release.attributes_metadata.get("editable"),
generated=cluster.release.attributes_metadata.get("generated"),
cluster=cluster
)
attributes.generate_fields()
netmanager = NetworkManager()
try:
netmanager.create_network_groups(cluster.id)
cluster.add_pending_changes("attributes")
cluster.add_pending_changes("networks")
if 'nodes' in data and data['nodes']:
nodes = db().query(Node).filter(
Node.id.in_(data['nodes'])
).all()
map(cluster.nodes.append, nodes)
db().commit()
for node in nodes:
netmanager.allow_network_assignment_to_all_interfaces(
node.id
)
netmanager.assign_networks_to_main_interface(node.id)
raise web.webapi.created(json.dumps(
ClusterHandler.render(cluster),
indent=4
))
except (
errors.OutOfVLANs,
errors.OutOfIPs,
errors.NoSuitableCIDR
) as e:
# Cluster was created in this request,
# so we no need to use ClusterDeletionManager.
# All relations wiil be cascade deleted automaticly.
# TODO(NAME): investigate transactions
db().delete(cluster)
raise web.badrequest(e.message)
class ClusterChangesHandler(JSONHandler):
"""Cluster changes handler
"""
fields = (
"id",
"name",
)
@content_json
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 404 (cluster not found in db)
* 400 (failed to execute task)
"""
cluster = self.get_object_or_404(
Cluster,
cluster_id,
log_404=(
"warning",
"Error: there is no cluster "
"with id '{0}' in DB.".format(cluster_id)))
try:
network_info = \
NetworkConfigurationSerializer.serialize_for_cluster(
cluster
)
logger.info(
u"Network info:\n{0}".format(
json.dumps(network_info, indent=4)
)
)
task_manager = DeploymentTaskManager(
cluster_id=cluster.id
)
task = task_manager.execute()
except Exception as exc:
logger.warn(u'ClusterChangesHandler: error while execution'
' deploy task: {0}'.format(str(exc)))
raise web.badrequest(str(exc))
return TaskHandler.render(task)
class ClusterAttributesHandler(JSONHandler):
"""Cluster attributes handler
"""
fields = (
"editable",
)
validator = AttributesValidator
@content_json
def GET(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
if not cluster.attributes:
raise web.internalerror("No attributes found!")
return {
"editable": cluster.attributes.editable
}
@content_json
def PUT(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
if not cluster.attributes:
raise web.internalerror("No attributes found!")
data = self.checked_data()
for key, value in data.iteritems():
setattr(cluster.attributes, key, value)
cluster.add_pending_changes("attributes")
db().commit()
return {"editable": cluster.attributes.editable}
class ClusterAttributesDefaultsHandler(JSONHandler):
"""Cluster default attributes handler
"""
fields = (
"editable",
)
@content_json
def GET(self, cluster_id):
""":returns: JSONized default Cluster attributes.
:http: * 200 (OK)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
attrs = cluster.release.attributes_metadata.get("editable")
if not attrs:
raise web.internalerror("No attributes found!")
return {"editable": attrs}
@content_json
def PUT(self, cluster_id):
""":returns: JSONized Cluster attributes.
:http: * 200 (OK)
* 400 (wrong attributes data specified)
* 404 (cluster not found in db)
* 500 (cluster has no attributes)
"""
cluster = self.get_object_or_404(
Cluster,
cluster_id,
log_404=(
"warning",
"Error: there is no cluster "
"with id '{0}' in DB.".format(cluster_id)
)
)
if not cluster.attributes:
logger.error('ClusterAttributesDefaultsHandler: no attributes'
' found for cluster_id %s' % cluster_id)
raise web.internalerror("No attributes found!")
cluster.attributes.editable = cluster.release.attributes_metadata.get(
"editable"
)
db().commit()
cluster.add_pending_changes("attributes")
logger.debug('ClusterAttributesDefaultsHandler:'
' editable attributes for cluster_id %s were reset'
' to default' % cluster_id)
return {"editable": cluster.attributes.editable}
class ClusterGeneratedData(JSONHandler):
"""Cluster generated data
"""
@content_json
def GET(self, cluster_id):
""":returns: JSONized cluster generated data
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return cluster.attributes.generated

View File

@ -1,119 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with disks
"""
import traceback
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.models import Node
from nailgun.api.models import NodeAttributes
from nailgun.api.validators.node import NodeDisksValidator
from nailgun.db import db
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.volumes.manager import DisksFormatConvertor
class NodeDisksHandler(JSONHandler):
"""Node disks handler
"""
validator = NodeDisksValidator
@content_json
def GET(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
node_volumes = node.attributes.volumes
return DisksFormatConvertor.format_disks_to_simple(node_volumes)
@content_json
def PUT(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 400 (invalid disks data specified)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
data = self.checked_data()
if node.cluster:
node.cluster.add_pending_changes('disks', node_id=node.id)
volumes_data = DisksFormatConvertor.format_disks_to_full(node, data)
# For some reasons if we update node attributes like
# node.attributes.volumes = volumes_data
# after
# db().commit()
# it resets to previous state
db().query(NodeAttributes).filter_by(node_id=node_id).update(
{'volumes': volumes_data})
db().commit()
return DisksFormatConvertor.format_disks_to_simple(
node.attributes.volumes)
class NodeDefaultsDisksHandler(JSONHandler):
"""Node default disks handler
"""
@content_json
def GET(self, node_id):
""":returns: JSONized node disks.
:http: * 200 (OK)
* 404 (node or its attributes not found in db)
"""
node = self.get_object_or_404(Node, node_id)
if not node.attributes:
return web.notfound()
volumes = DisksFormatConvertor.format_disks_to_simple(
node.volume_manager.gen_volumes_info())
return volumes
class NodeVolumesInformationHandler(JSONHandler):
"""Node volumes information handler
"""
@content_json
def GET(self, node_id):
""":returns: JSONized volumes info for node.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
volumes_info = []
try:
volumes_info = DisksFormatConvertor.get_volumes_info(node)
except errors.CannotFindVolumesInfoForRole:
logger.error(traceback.format_exc())
raise web.notfound(
message='Cannot calculate volumes info. '
'Please, add node to a cluster.')
return volumes_info

View File

@ -1,351 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with logs
"""
from itertools import dropwhile
import json
import logging
import os
import re
import time
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.handlers.tasks import TaskHandler
from nailgun.api.models import Node
from nailgun.api.models import RedHatAccount
from nailgun.db import db
from nailgun.settings import settings
from nailgun.task.manager import DumpTaskManager
logger = logging.getLogger(__name__)
def read_backwards(file, bufsize=4096):
buf = ""
try:
file.seek(-1, 1)
except IOError:
return
trailing_newline = False
if file.read(1) == "\n":
trailing_newline = True
file.seek(-1, 1)
while True:
newline_pos = buf.rfind("\n")
pos = file.tell()
if newline_pos != -1:
line = buf[newline_pos + 1:]
buf = buf[:newline_pos]
if pos or newline_pos or trailing_newline:
line += "\n"
yield line
elif pos:
toread = min(bufsize, pos)
file.seek(-toread, 1)
buf = file.read(toread) + buf
file.seek(-toread, 1)
if pos == toread:
buf = "\n" + buf
else:
return
class LogEntryCollectionHandler(JSONHandler):
"""Log entry collection handler
"""
@content_json
def GET(self):
"""Receives following parameters:
- *date_before* - get logs before this date
- *date_after* - get logs after this date
- *source* - source of logs
- *node* - node id (for getting node logs)
- *level* - log level (all levels showed by default)
- *to* - number of entries
- *max_entries* - max number of entries to load
:returns: Collection of log entries, log file size
and if there are new entries.
:http: * 200 (OK)
* 400 (invalid *date_before* value)
* 400 (invalid *date_after* value)
* 400 (invalid *source* value)
* 400 (invalid *node* value)
* 400 (invalid *level* value)
* 400 (invalid *to* value)
* 400 (invalid *max_entries* value)
* 404 (log file not found)
* 404 (log files dir not found)
* 404 (node not found)
* 500 (node has no assigned ip)
* 500 (invalid regular expression in config)
"""
user_data = web.input()
date_before = user_data.get('date_before')
if date_before:
try:
date_before = time.strptime(date_before,
settings.UI_LOG_DATE_FORMAT)
except ValueError:
logger.debug("Invalid 'date_before' value: %s", date_before)
raise web.badrequest("Invalid 'date_before' value")
date_after = user_data.get('date_after')
if date_after:
try:
date_after = time.strptime(date_after,
settings.UI_LOG_DATE_FORMAT)
except ValueError:
logger.debug("Invalid 'date_after' value: %s", date_after)
raise web.badrequest("Invalid 'date_after' value")
truncate_log = bool(user_data.get('truncate_log'))
if not user_data.get('source'):
logger.debug("'source' must be specified")
raise web.badrequest("'source' must be specified")
log_config = filter(lambda lc: lc['id'] == user_data.source,
settings.LOGS)
# If log source not found or it is fake source but we are run without
# fake tasks.
if not log_config or (log_config[0].get('fake') and
not settings.FAKE_TASKS):
logger.debug("Log source %r not found", user_data.source)
return web.notfound("Log source not found")
log_config = log_config[0]
# If it is 'remote' and not 'fake' log source then calculate log file
# path by base dir, node IP and relative path to file.
# Otherwise return absolute path.
node = None
if log_config['remote'] and not log_config.get('fake'):
if not user_data.get('node'):
raise web.badrequest("'node' must be specified")
node = db().query(Node).get(user_data.node)
if not node:
return web.notfound("Node not found")
if not node.ip:
logger.error('Node %r has no assigned ip', node.id)
raise web.internalerror("Node has no assigned ip")
if node.status == "discover":
ndir = node.ip
else:
ndir = node.fqdn
remote_log_dir = os.path.join(log_config['base'], ndir)
if not os.path.exists(remote_log_dir):
logger.debug("Log files dir %r for node %s not found",
remote_log_dir, node.id)
return web.notfound("Log files dir for node not found")
log_file = os.path.join(remote_log_dir, log_config['path'])
else:
log_file = log_config['path']
if not os.path.exists(log_file):
if node:
logger.debug("Log file %r for node %s not found",
log_file, node.id)
else:
logger.debug("Log file %r not found", log_file)
return web.notfound("Log file not found")
level = user_data.get('level')
allowed_levels = log_config['levels']
if level is not None:
if not (level in log_config['levels']):
raise web.badrequest("Invalid level")
allowed_levels = [l for l in dropwhile(lambda l: l != level,
log_config['levels'])]
try:
regexp = re.compile(log_config['regexp'])
except re.error as e:
logger.error('Invalid regular expression for file %r: %s',
log_config['id'], e)
raise web.internalerror("Invalid regular expression in config")
entries = []
to_byte = None
try:
to_byte = int(user_data.get('to', 0))
except ValueError:
logger.debug("Invalid 'to' value: %d", to_byte)
raise web.badrequest("Invalid 'to' value")
log_file_size = os.stat(log_file).st_size
if to_byte >= log_file_size:
return json.dumps({
'entries': [],
'to': log_file_size,
'has_more': False,
})
try:
max_entries = int(user_data.get('max_entries',
settings.TRUNCATE_LOG_ENTRIES))
except ValueError:
logger.debug("Invalid 'max_entries' value: %d", max_entries)
raise web.badrequest("Invalid 'max_entries' value")
accs = db().query(RedHatAccount).all()
regs = []
if len(accs) > 0:
regs = [
(
re.compile(r"|".join([a.username for a in accs])),
"username"
),
(
re.compile(r"|".join([a.password for a in accs])),
"password"
)
]
has_more = False
with open(log_file, 'r') as f:
f.seek(0, 2)
# we need to calculate current position manually instead of using
# tell() because read_backwards uses buffering
pos = f.tell()
multilinebuf = []
for line in read_backwards(f):
pos -= len(line)
if not truncate_log and pos < to_byte:
has_more = pos > 0
break
entry = line.rstrip('\n')
if not len(entry):
continue
if 'skip_regexp' in log_config and \
re.match(log_config['skip_regexp'], entry):
continue
m = regexp.match(entry)
if m is None:
if log_config.get('multiline'):
# Add next multiline part to last entry if it exist.
multilinebuf.append(entry)
else:
logger.debug("Unable to parse log entry '%s' from %s",
entry, log_file)
continue
entry_text = m.group('text')
if len(multilinebuf):
multilinebuf.reverse()
entry_text += '\n' + '\n'.join(multilinebuf)
multilinebuf = []
entry_level = m.group('level').upper() or 'INFO'
if level and not (entry_level in allowed_levels):
continue
try:
entry_date = time.strptime(m.group('date'),
log_config['date_format'])
except ValueError:
logger.debug("Unable to parse date from log entry."
" Date format: %r, date part of entry: %r",
log_config['date_format'],
m.group('date'))
continue
for regex, replace in regs:
entry_text = regex.sub(replace, entry_text)
entries.append([
time.strftime(settings.UI_LOG_DATE_FORMAT, entry_date),
entry_level,
entry_text
])
if truncate_log and len(entries) >= max_entries:
has_more = True
break
return {
'entries': entries,
'to': log_file_size,
'has_more': has_more,
}
class LogPackageHandler(object):
"""Log package handler
"""
@content_json
def PUT(self):
""":returns: JSONized Task object.
:http: * 200 (task successfully executed)
* 400 (failed to execute task)
"""
try:
task_manager = DumpTaskManager()
task = task_manager.execute()
except Exception as exc:
logger.warn(u'DumpTask: error while execution '
'dump environment task: {0}'.format(str(exc)))
raise web.badrequest(str(exc))
return TaskHandler.render(task)
class LogSourceCollectionHandler(JSONHandler):
"""Log source collection handler
"""
@content_json
def GET(self):
""":returns: Collection of log sources (from settings)
:http: * 200 (OK)
"""
return settings.LOGS
class LogSourceByNodeCollectionHandler(JSONHandler):
"""Log source by node collection handler
"""
@content_json
def GET(self, node_id):
""":returns: Collection of log sources by node (from settings)
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
def getpath(x):
if x.get('fake'):
if settings.FAKE_TASKS:
return x['path']
else:
return ''
else:
if node.status == "discover":
ndir = node.ip
else:
ndir = node.fqdn
return os.path.join(x['base'], ndir, x['path'])
f = lambda x: (
x.get('remote') and x.get('path') and x.get('base') and
os.access(getpath(x), os.R_OK) and os.path.isfile(getpath(x))
)
sources = filter(f, settings.LOGS)
return sources

View File

@ -1,128 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with network configurations
"""
import json
import traceback
import web
from nailgun.api.handlers.base import build_json_response
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.handlers.tasks import TaskHandler
from nailgun.api.models import Cluster
from nailgun.api.models import NetworkConfiguration
from nailgun.api.models import NetworkGroup
from nailgun.api.models import Task
from nailgun.api.serializers.network_configuration \
import NetworkConfigurationSerializer
from nailgun.api.validators.network import NetworkConfigurationValidator
from nailgun.db import db
from nailgun.logger import logger
from nailgun.task.helpers import TaskHelper
from nailgun.task.manager import CheckNetworksTaskManager
from nailgun.task.manager import VerifyNetworksTaskManager
class NetworkConfigurationVerifyHandler(JSONHandler):
"""Network configuration verify handler
"""
validator = NetworkConfigurationValidator
@content_json
def PUT(self, cluster_id):
""":IMPORTANT: this method should be rewritten to be more RESTful
:returns: JSONized Task object.
:http: * 202 (network checking task failed)
* 200 (network verification task started)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
try:
data = self.validator.validate_networks_update(web.data())
except web.webapi.badrequest as exc:
task = Task(name='check_networks', cluster=cluster)
db().add(task)
db().commit()
TaskHelper.set_error(task.uuid, exc.data)
logger.error(traceback.format_exc())
json_task = build_json_response(TaskHandler.render(task))
raise web.accepted(data=json_task)
vlan_ids = [{
'name': n['name'],
'vlans': NetworkGroup.generate_vlan_ids_list(n)
} for n in data['networks']]
task_manager = VerifyNetworksTaskManager(cluster_id=cluster.id)
task = task_manager.execute(data, vlan_ids)
return TaskHandler.render(task)
class NetworkConfigurationHandler(JSONHandler):
"""Network configuration handler
"""
validator = NetworkConfigurationValidator
serializer = NetworkConfigurationSerializer
@content_json
def GET(self, cluster_id):
""":returns: JSONized network configuration for cluster.
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return self.serializer.serialize_for_cluster(cluster)
def PUT(self, cluster_id):
""":returns: JSONized Task object.
:http: * 202 (network checking task created)
* 404 (cluster not found in db)
"""
data = json.loads(web.data())
cluster = self.get_object_or_404(Cluster, cluster_id)
task_manager = CheckNetworksTaskManager(cluster_id=cluster.id)
task = task_manager.execute(data)
if task.status != 'error':
try:
if 'networks' in data:
self.validator.validate_networks_update(json.dumps(data))
NetworkConfiguration.update(cluster, data)
except web.webapi.badrequest as exc:
TaskHelper.set_error(task.uuid, exc.data)
logger.error(traceback.format_exc())
except Exception as exc:
TaskHelper.set_error(task.uuid, exc)
logger.error(traceback.format_exc())
data = build_json_response(TaskHandler.render(task))
if task.status == 'error':
db().rollback()
else:
db().commit()
raise web.accepted(data=data)

View File

@ -1,567 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with nodes
"""
from datetime import datetime
import json
import traceback
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.models import Cluster
from nailgun.api.models import NetworkGroup
from nailgun.api.models import Node
from nailgun.api.models import NodeAttributes
from nailgun.api.models import NodeNICInterface
from nailgun.api.validators.network import NetAssignmentValidator
from nailgun.api.validators.node import NodeValidator
from nailgun.db import db
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun.network.topology import TopoChecker
from nailgun import notifier
class NodeHandler(JSONHandler):
fields = ('id', 'name', 'meta', 'progress', 'roles', 'pending_roles',
'status', 'mac', 'fqdn', 'ip', 'manufacturer', 'platform_name',
'pending_addition', 'pending_deletion', 'os_platform',
'error_type', 'online', 'cluster')
model = Node
validator = NodeValidator
@classmethod
def render(cls, instance, fields=None):
json_data = None
try:
json_data = JSONHandler.render(instance, fields=cls.fields)
network_manager = NetworkManager()
json_data['network_data'] = network_manager.get_node_networks(
instance.id)
except Exception:
logger.error(traceback.format_exc())
return json_data
@content_json
def GET(self, node_id):
""":returns: JSONized Node object.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
return self.render(node)
@content_json
def PUT(self, node_id):
""":returns: JSONized Node object.
:http: * 200 (OK)
* 400 (invalid node data specified)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
if not node.attributes:
node.attributes = NodeAttributes(node_id=node.id)
data = self.checked_data(self.validator.validate_update)
network_manager = NetworkManager()
if "cluster_id" in data:
if data["cluster_id"] is None and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
node.roles = node.pending_roles = []
old_cluster_id = node.cluster_id
node.cluster_id = data["cluster_id"]
if node.cluster_id != old_cluster_id:
if old_cluster_id:
network_manager.clear_assigned_networks(node.id)
network_manager.clear_all_allowed_networks(node.id)
if node.cluster_id:
network_manager.allow_network_assignment_to_all_interfaces(
node.id
)
network_manager.assign_networks_to_main_interface(node.id)
for key, value in data.iteritems():
# we don't allow to update id explicitly
# and updated cluster_id before all other fields
if key in ("id", "cluster_id"):
continue
setattr(node, key, value)
if not node.status in ('provisioning', 'deploying') \
and "roles" in data or "cluster_id" in data:
try:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().commit()
return self.render(node)
def DELETE(self, node_id):
""":returns: Empty string
:http: * 204 (node successfully deleted)
* 404 (cluster not found in db)
"""
node = self.get_object_or_404(Node, node_id)
db().delete(node)
db().commit()
raise web.webapi.HTTPError(
status="204 No Content",
data=""
)
class NodeCollectionHandler(JSONHandler):
"""Node collection handler
"""
validator = NodeValidator
@content_json
def GET(self):
"""May receive cluster_id parameter to filter list
of nodes
:returns: Collection of JSONized Node objects.
:http: * 200 (OK)
"""
user_data = web.input(cluster_id=None)
if user_data.cluster_id == '':
nodes = db().query(Node).filter_by(
cluster_id=None).all()
elif user_data.cluster_id:
nodes = db().query(Node).filter_by(
cluster_id=user_data.cluster_id).all()
else:
nodes = db().query(Node).all()
return map(NodeHandler.render, nodes)
@content_json
def POST(self):
""":returns: JSONized Node object.
:http: * 201 (cluster successfully created)
* 400 (invalid node data specified)
* 403 (node has incorrect status)
* 409 (node with such parameters already exists)
"""
data = self.checked_data()
if data.get("status", "") != "discover":
error = web.forbidden()
error.data = "Only bootstrap nodes are allowed to be registered."
msg = u"Node with mac '{0}' was not created, " \
u"because request status is '{1}'."\
.format(data[u'mac'], data[u'status'])
logger.warning(msg)
raise error
node = Node()
if "cluster_id" in data:
# FIXME(vk): this part is needed only for tests. Normally,
# nodes are created only by agent and POST requests don't contain
# cluster_id, but our integration and unit tests widely use it.
# We need to assign cluster first
cluster_id = data.pop("cluster_id")
if cluster_id:
node.cluster = db.query(Cluster).get(cluster_id)
for key, value in data.iteritems():
if key == "id":
continue
elif key == "meta":
node.create_meta(value)
else:
setattr(node, key, value)
node.name = "Untitled (%s)" % data['mac'][-5:]
node.timestamp = datetime.now()
db().add(node)
db().commit()
node.attributes = NodeAttributes()
try:
node.attributes.volumes = node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
u"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().add(node)
db().commit()
network_manager = NetworkManager()
# Add interfaces for node from 'meta'.
if node.meta and node.meta.get('interfaces'):
network_manager.update_interfaces_info(node.id)
if node.cluster_id:
network_manager.allow_network_assignment_to_all_interfaces(node.id)
network_manager.assign_networks_to_main_interface(node.id)
try:
# we use multiplier of 1024 because there are no problems here
# with unfair size calculation
ram = str(round(float(
node.meta['memory']['total']) / 1073741824, 1)) + " GB RAM"
except Exception as exc:
logger.warning(traceback.format_exc())
ram = "unknown RAM"
try:
# we use multiplier of 1000 because disk vendors specify HDD size
# in terms of decimal capacity. Sources:
# http://knowledge.seagate.com/articles/en_US/FAQ/172191en
# http://physics.nist.gov/cuu/Units/binary.html
hd_size = round(float(
sum([d["size"] for d in node.meta["disks"]]) / 1000000000), 1)
# if HDD > 100 GB we show it's size in TB
if hd_size > 100:
hd_size = str(hd_size / 1000) + " TB HDD"
else:
hd_size = str(hd_size) + " GB HDD"
except Exception as exc:
logger.warning(traceback.format_exc())
hd_size = "unknown HDD"
cores = str(node.meta.get('cpu', {}).get('total', "unknown"))
notifier.notify(
"discover",
"New node is discovered: %s CPUs / %s / %s " %
(cores, ram, hd_size),
node_id=node.id
)
raise web.webapi.created(json.dumps(
NodeHandler.render(node),
indent=4
))
@content_json
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (invalid nodes data specified)
"""
data = self.checked_data(
self.validator.validate_collection_update
)
network_manager = NetworkManager()
q = db().query(Node)
nodes_updated = []
for nd in data:
is_agent = nd.pop("is_agent") if "is_agent" in nd else False
node = None
if "mac" in nd:
node = q.filter_by(mac=nd["mac"]).first() \
or self.validator.validate_existent_node_mac_update(nd)
else:
node = q.get(nd["id"])
if is_agent:
node.timestamp = datetime.now()
if not node.online:
node.online = True
msg = u"Node '{0}' is back online".format(
node.human_readable_name)
logger.info(msg)
notifier.notify("discover", msg, node_id=node.id)
db().commit()
old_cluster_id = node.cluster_id
if "cluster_id" in nd:
if nd["cluster_id"] is None and node.cluster:
node.cluster.clear_pending_changes(node_id=node.id)
node.roles = node.pending_roles = []
node.cluster_id = nd["cluster_id"]
for key, value in nd.iteritems():
if is_agent and (key, value) == ("status", "discover") \
and node.status == "provisioning":
# We don't update provisioning back to discover
logger.debug(
"Node is already provisioning - "
"status not updated by agent"
)
continue
if key == "meta":
node.update_meta(value)
else:
setattr(node, key, value)
db().commit()
if not node.attributes:
node.attributes = NodeAttributes()
db().commit()
if not node.attributes.volumes:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
db().commit()
if not node.status in ('provisioning', 'deploying'):
variants = (
"disks" in node.meta and
len(node.meta["disks"]) != len(
filter(
lambda d: d["type"] == "disk",
node.attributes.volumes
)
),
"roles" in nd,
"cluster_id" in nd
)
if any(variants):
try:
node.attributes.volumes = \
node.volume_manager.gen_volumes_info()
if node.cluster:
node.cluster.add_pending_changes(
"disks",
node_id=node.id
)
except Exception as exc:
msg = (
"Failed to generate volumes "
"info for node '{0}': '{1}'"
).format(
node.name or data.get("mac") or data.get("id"),
str(exc) or "see logs for details"
)
logger.warning(traceback.format_exc())
notifier.notify("error", msg, node_id=node.id)
db().commit()
if is_agent:
# Update node's NICs.
if node.meta and 'interfaces' in node.meta:
# we won't update interfaces if data is invalid
network_manager.update_interfaces_info(node.id)
nodes_updated.append(node)
db().commit()
if 'cluster_id' in nd and nd['cluster_id'] != old_cluster_id:
if old_cluster_id:
network_manager.clear_assigned_networks(node.id)
network_manager.clear_all_allowed_networks(node.id)
if nd['cluster_id']:
network_manager.allow_network_assignment_to_all_interfaces(
node.id
)
network_manager.assign_networks_to_main_interface(node.id)
return map(NodeHandler.render, nodes_updated)
class NodeNICsHandler(JSONHandler):
"""Node network interfaces handler
"""
fields = (
'id', (
'interfaces',
'id',
'mac',
'name',
'current_speed',
'max_speed',
('assigned_networks', 'id', 'name'),
('allowed_networks', 'id', 'name')
)
)
model = NodeNICInterface
validator = NetAssignmentValidator
@content_json
def GET(self, node_id):
""":returns: Collection of JSONized Node interfaces.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
return self.render(node)['interfaces']
class NodeCollectionNICsHandler(JSONHandler):
"""Node collection network interfaces handler
"""
model = NetworkGroup
validator = NetAssignmentValidator
fields = NodeNICsHandler.fields
@content_json
def PUT(self):
""":returns: Collection of JSONized Node objects.
:http: * 200 (nodes are successfully updated)
* 400 (invalid nodes data specified)
"""
data = self.validator.validate_collection_structure(web.data())
network_manager = NetworkManager()
updated_nodes_ids = []
for node_data in data:
self.validator.verify_data_correctness(node_data)
node_id = network_manager._update_attrs(node_data)
updated_nodes_ids.append(node_id)
updated_nodes = db().query(Node).filter(
Node.id.in_(updated_nodes_ids)
).all()
return map(self.render, updated_nodes)
class NodeNICsDefaultHandler(JSONHandler):
"""Node default network interfaces handler
"""
@content_json
def GET(self, node_id):
""":returns: Collection of default JSONized interfaces for node.
:http: * 200 (OK)
* 404 (node not found in db)
"""
node = self.get_object_or_404(Node, node_id)
default_nets = self.get_default(node)
return default_nets
def get_default(self, node):
nics = []
network_manager = NetworkManager()
for nic in node.interfaces:
nic_dict = {
"id": nic.id,
"name": nic.name,
"mac": nic.mac,
"max_speed": nic.max_speed,
"current_speed": nic.current_speed
}
assigned_ng_ids = network_manager.get_default_nic_networkgroups(
node.id,
nic.id
)
for ng_id in assigned_ng_ids:
ng = db().query(NetworkGroup).get(ng_id)
nic_dict.setdefault("assigned_networks", []).append(
{"id": ng_id, "name": ng.name}
)
allowed_ng_ids = network_manager.get_allowed_nic_networkgroups(
node.id,
nic.id
)
for ng_id in allowed_ng_ids:
ng = db().query(NetworkGroup).get(ng_id)
nic_dict.setdefault("allowed_networks", []).append(
{"id": ng_id, "name": ng.name}
)
nics.append(nic_dict)
return nics
class NodeCollectionNICsDefaultHandler(NodeNICsDefaultHandler):
"""Node collection default network interfaces handler
"""
validator = NetAssignmentValidator
@content_json
def GET(self):
"""May receive cluster_id parameter to filter list
of nodes
:returns: Collection of JSONized Nodes interfaces.
:http: * 200 (OK)
* 404 (node not found in db)
"""
user_data = web.input(cluster_id=None)
if user_data.cluster_id == '':
nodes = self.get_object_or_404(Node, cluster_id=None)
elif user_data.cluster_id:
nodes = self.get_object_or_404(
Node,
cluster_id=user_data.cluster_id
)
else:
nodes = self.get_object_or_404(Node)
def_net_nodes = []
for node in nodes:
rendered_node = self.get_default(self.render(node))
def_net_nodes.append(rendered_node)
return map(self.render, nodes)
class NodeNICsVerifyHandler(JSONHandler):
"""Node NICs verify handler
Class is proof of concept. Not ready for use.
"""
fields = (
'id', (
'interfaces',
'id',
'mac',
'name',
('assigned_networks', 'id', 'name'),
('allowed_networks', 'id', 'name')
)
)
validator = NetAssignmentValidator
@content_json
def POST(self):
""":returns: Collection of JSONized Nodes interfaces.
:http: * 200 (OK)
"""
data = self.validator.validate_structure(web.data())
for node in data:
self.validator.verify_data_correctness(node)
if TopoChecker.is_assignment_allowed(data):
return map(self.render, data)
topo, fields_with_conflicts = TopoChecker.resolve_topo_conflicts(data)
return map(self.render, topo, fields=fields_with_conflicts)
class NodesAllocationStatsHandler(object):
"""Node allocation stats handler
"""
@content_json
def GET(self):
""":returns: Total and unallocated nodes count.
:http: * 200 (OK)
"""
unallocated_nodes = db().query(Node).filter_by(cluster_id=None).count()
total_nodes = \
db().query(Node).count()
return {'total': total_nodes,
'unallocated': unallocated_nodes}

View File

@ -1,124 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with notifications
"""
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.models import Notification
from nailgun.api.validators.notification import NotificationValidator
from nailgun.db import db
from nailgun.settings import settings
class NotificationHandler(JSONHandler):
"""Notification single handler
"""
fields = (
"id",
"cluster",
"topic",
"message",
"status",
"node_id",
"task_id"
)
model = Notification
validator = NotificationValidator
@classmethod
def render(cls, instance, fields=None):
json_data = JSONHandler.render(instance, fields=cls.fields)
json_data["time"] = ":".join([
instance.datetime.strftime("%H"),
instance.datetime.strftime("%M"),
instance.datetime.strftime("%S")
])
json_data["date"] = "-".join([
instance.datetime.strftime("%d"),
instance.datetime.strftime("%m"),
instance.datetime.strftime("%Y")
])
return json_data
@content_json
def GET(self, notification_id):
""":returns: JSONized Notification object.
:http: * 200 (OK)
* 404 (notification not found in db)
"""
notification = self.get_object_or_404(Notification, notification_id)
return self.render(notification)
@content_json
def PUT(self, notification_id):
""":returns: JSONized Notification object.
:http: * 200 (OK)
* 400 (invalid notification data specified)
* 404 (notification not found in db)
"""
notification = self.get_object_or_404(Notification, notification_id)
data = self.validator.validate_update(web.data())
for key, value in data.iteritems():
setattr(notification, key, value)
db().add(notification)
db().commit()
return self.render(notification)
class NotificationCollectionHandler(JSONHandler):
validator = NotificationValidator
@content_json
def GET(self):
""":returns: Collection of JSONized Notification objects.
:http: * 200 (OK)
"""
user_data = web.input(limit=settings.MAX_ITEMS_PER_PAGE)
limit = user_data.limit
query = db().query(Notification).limit(limit)
notifications = query.all()
return map(
NotificationHandler.render,
notifications
)
@content_json
def PUT(self):
""":returns: Collection of JSONized Notification objects.
:http: * 200 (OK)
* 400 (invalid data specified for collection update)
"""
data = self.validator.validate_collection_update(web.data())
q = db().query(Notification)
notifications_updated = []
for nd in data:
notification = q.get(nd["id"])
for key, value in nd.iteritems():
setattr(notification, key, value)
notifications_updated.append(notification)
db().add(notification)
db().commit()
return map(
NotificationHandler.render,
notifications_updated
)

View File

@ -1,126 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.models import Cluster
from nailgun.db import db
from nailgun.logger import logger
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator import provisioning_serializers
class DefaultOrchestratorInfo(JSONHandler):
"""Base class for default orchestrator data.
Need to redefine serializer variable
"""
# Override this attribute
_serializer = None
@content_json
def GET(self, cluster_id):
""":returns: JSONized default data which will be passed to orchestrator
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return self._serializer.serialize(cluster)
class OrchestratorInfo(JSONHandler):
"""Base class for replaced data."""
def get_orchestrator_info(self, cluster):
"""Method should return data
which will be passed to orchestrator
"""
raise NotImplementedError('Please Implement this method')
def update_orchestrator_info(self, cluster, data):
"""Method should override data which
will be passed to orchestrator
"""
raise NotImplementedError('Please Implement this method')
@content_json
def GET(self, cluster_id):
""":returns: JSONized data which will be passed to orchestrator
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return self.get_orchestrator_info(cluster)
@content_json
def PUT(self, cluster_id):
""":returns: JSONized data which will be passed to orchestrator
:http: * 200 (OK)
* 400 (wrong data specified)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
data = self.checked_data()
self.update_orchestrator_info(cluster, data)
logger.debug('OrchestratorInfo:'
' facts for cluster_id {0} were uploaded'
.format(cluster_id))
return data
@content_json
def DELETE(self, cluster_id):
""":returns: {}
:http: * 202 (orchestrator data deletion process launched)
* 400 (failed to execute orchestrator data deletion process)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
self.update_orchestrator_info(cluster, {})
raise web.accepted(data="{}")
class DefaultProvisioningInfo(DefaultOrchestratorInfo):
_serializer = provisioning_serializers
class DefaultDeploymentInfo(DefaultOrchestratorInfo):
_serializer = deployment_serializers
class ProvisioningInfo(OrchestratorInfo):
def get_orchestrator_info(self, cluster):
return cluster.replaced_provisioning_info
def update_orchestrator_info(self, cluster, data):
cluster.replaced_provisioning_info = data
db().commit()
return cluster.replaced_provisioning_info
class DeploymentInfo(OrchestratorInfo):
def get_orchestrator_info(self, cluster):
return cluster.replaced_deployment_info
def update_orchestrator_info(self, cluster, data):
cluster.replaced_deployment_info = data
db().commit()
return cluster.replaced_deployment_info

View File

@ -1,58 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with plugins
"""
import json
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.handlers.tasks import TaskHandler
from nailgun.plugin.manager import PluginManager
class PluginCollectionHandler(JSONHandler):
@content_json
def GET(self):
pass
@content_json
def POST(self):
plugin_manager = PluginManager()
task = plugin_manager.add_install_plugin_task(
json.loads(web.data()))
return TaskHandler.render(task)
class PluginHandler(JSONHandler):
@content_json
def GET(self, plugin_id):
pass
@content_json
def DELETE(self, plugin_id):
pass
@content_json
def PUT(self, plugin_id):
pass

View File

@ -1,139 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with exclusive Red Hat tasks
"""
import traceback
import web
from nailgun.api.handlers.base import build_json_response
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.handlers.tasks import TaskHandler
from nailgun.api.models import RedHatAccount
from nailgun.api.models import Release
from nailgun.api.validators.redhat import RedHatAccountValidator
from nailgun.db import db
from nailgun.logger import logger
from nailgun.task.manager import RedHatSetupTaskManager
class RedHatAccountHandler(JSONHandler):
"""Red Hat account handler
"""
fields = (
'username',
'password',
'license_type',
'satellite',
'activation_key'
)
model = RedHatAccount
@content_json
def GET(self):
""":returns: JSONized RedHatAccount object.
:http: * 200 (OK)
* 404 (account not found in db)
"""
account = db().query(RedHatAccount).first()
if not account:
raise web.notfound()
return self.render(account)
@content_json
def POST(self):
""":returns: JSONized RedHatAccount object.
:http: * 200 (OK)
* 400 (invalid account data specified)
* 404 (account not found in db)
"""
data = self.checked_data()
license_type = data.get("license_type")
if license_type == 'rhsm':
data["satellite"] = ""
data["activation_key"] = ""
release_id = data.pop('release_id')
release_db = db().query(Release).get(release_id)
if not release_db:
raise web.notfound(
"No release with ID={0} found".format(release_id)
)
account = db().query(RedHatAccount).first()
if account:
db().query(RedHatAccount).update(data)
else:
account = RedHatAccount(**data)
db().add(account)
db().commit()
return self.render(account)
class RedHatSetupHandler(JSONHandler):
"""Red Hat setup handler
"""
validator = RedHatAccountValidator
@content_json
def POST(self):
"""Starts Red Hat setup and download process
:returns: JSONized Task object.
:http: * 202 (setup task created and started)
* 400 (invalid account data specified)
* 404 (release not found in db)
"""
data = self.checked_data()
license_type = data.get("license_type")
if license_type == 'rhsm':
data["satellite"] = ""
data["activation_key"] = ""
release_data = {'release_id': data['release_id']}
release_id = data.pop('release_id')
release_db = db().query(Release).get(release_id)
if not release_db:
raise web.notfound(
"No release with ID={0} found".format(release_id)
)
release_data['redhat'] = data
release_data['release_name'] = release_db.name
account = db().query(RedHatAccount).first()
if account:
db().query(RedHatAccount).update(data)
else:
account = RedHatAccount(**data)
db().add(account)
db().commit()
task_manager = RedHatSetupTaskManager(release_data)
try:
task = task_manager.execute()
except Exception as exc:
logger.error(u'RedHatAccountHandler: error while execution'
' Red Hat validation task: {0}'.format(str(exc)))
logger.error(traceback.format_exc())
raise web.badrequest(str(exc))
data = build_json_response(TaskHandler.render(task))
raise web.accepted(data=data)

View File

@ -1,46 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Product registration handlers
"""
import base64
import json
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.settings import settings
class FuelKeyHandler(JSONHandler):
""" Fuel key handler"""
@content_json
def GET(self):
"""Returns Fuel Key data
:returns: base64 of FUEL commit SHA, release version and Fuel UUID.
:http: * 200 (OK)
"""
key_data = {
"sha": settings.COMMIT_SHA,
"release": settings.PRODUCT_VERSION,
"uuid": settings.FUEL_KEY
}
signature = base64.b64encode(json.dumps(key_data))
key_data["signature"] = signature
return {"key": base64.b64encode(json.dumps(key_data))}

View File

@ -1,122 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handlers dealing with releases
"""
import json
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.models import Release
from nailgun.api.validators.release import ReleaseValidator
from nailgun.db import db
class ReleaseHandler(JSONHandler):
"""Release single handler
"""
fields = (
"id",
"name",
"version",
"description",
"operating_system",
"roles",
"roles_metadata",
"state"
)
model = Release
validator = ReleaseValidator
@content_json
def GET(self, release_id):
""":returns: JSONized Release object.
:http: * 200 (OK)
* 404 (release not found in db)
"""
release = self.get_object_or_404(Release, release_id)
return self.render(release)
@content_json
def PUT(self, release_id):
""":returns: JSONized Release object.
:http: * 200 (OK)
* 400 (invalid release data specified)
* 404 (release not found in db)
* 409 (release with such parameters already exists)
"""
release = self.get_object_or_404(Release, release_id)
data = self.checked_data()
for key, value in data.iteritems():
setattr(release, key, value)
db().commit()
return self.render(release)
def DELETE(self, release_id):
""":returns: JSONized Release object.
:http: * 204 (release successfully deleted)
* 404 (release not found in db)
"""
release = self.get_object_or_404(Release, release_id)
db().delete(release)
db().commit()
raise web.webapi.HTTPError(
status="204 No Content",
data=""
)
class ReleaseCollectionHandler(JSONHandler):
"""Release collection handler
"""
validator = ReleaseValidator
@content_json
def GET(self):
""":returns: Collection of JSONized Release objects.
:http: * 200 (OK)
"""
return map(
ReleaseHandler.render,
db().query(Release).all()
)
@content_json
def POST(self):
""":returns: JSONized Release object.
:http: * 201 (cluster successfully created)
* 400 (invalid cluster data specified)
* 409 (release with such parameters already exists)
"""
data = self.checked_data()
release = Release()
for key, value in data.iteritems():
setattr(release, key, value)
db().add(release)
db().commit()
raise web.webapi.created(json.dumps(
ReleaseHandler.render(release),
indent=4
))

View File

@ -1,98 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.models import Task
from nailgun.db import db
"""
Handlers dealing with tasks
"""
class TaskHandler(JSONHandler):
"""Task single handler
"""
fields = (
"id",
"cluster",
"uuid",
"name",
"result",
"message",
"status",
"progress"
)
model = Task
@content_json
def GET(self, task_id):
""":returns: JSONized Task object.
:http: * 200 (OK)
* 404 (task not found in db)
"""
task = self.get_object_or_404(Task, task_id)
return self.render(task)
def DELETE(self, task_id):
""":returns: JSONized Cluster object.
:http: * 204 (task successfully deleted)
* 400 (can't delete running task manually)
* 404 (task not found in db)
"""
task = self.get_object_or_404(Task, task_id)
if task.status not in ("ready", "error"):
raise web.badrequest("You cannot delete running task manually")
for subtask in task.subtasks:
db().delete(subtask)
db().delete(task)
db().commit()
raise web.webapi.HTTPError(
status="204 No Content",
data=""
)
class TaskCollectionHandler(JSONHandler):
"""Task collection handler
"""
@content_json
def GET(self):
"""May receive cluster_id parameter to filter list
of tasks
:returns: Collection of JSONized Task objects.
:http: * 200 (OK)
* 404 (task not found in db)
"""
user_data = web.input(cluster_id=None)
if user_data.cluster_id == '':
tasks = db().query(Task).filter_by(
cluster_id=None).all()
elif user_data.cluster_id:
tasks = db().query(Task).filter_by(
cluster_id=user_data.cluster_id).all()
else:
tasks = db().query(Task).all()
return map(
TaskHandler.render,
tasks
)

View File

@ -1,39 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Product info handlers
"""
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.settings import settings
class VersionHandler(JSONHandler):
"""Version info handler
"""
@content_json
def GET(self):
""":returns: FUEL/FUELWeb commit SHA, release version.
:http: * 200 (OK)
"""
return {
"sha": str(settings.COMMIT_SHA),
"release": str(settings.PRODUCT_VERSION),
"fuel_sha": str(settings.FUEL_COMMIT_SHA)
}

View File

@ -1,850 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
from random import choice
import string
import uuid
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import Unicode
from sqlalchemy import UniqueConstraint
from sqlalchemy import ForeignKey, Enum, DateTime
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
import web
from nailgun.api.fields import JSON
from nailgun.db import db
from nailgun.logger import logger
from nailgun.settings import settings
from nailgun.volumes.manager import VolumeManager
Base = declarative_base()
class NodeRoles(Base):
__tablename__ = 'node_roles'
id = Column(Integer, primary_key=True)
role = Column(Integer, ForeignKey('roles.id', ondelete="CASCADE"))
node = Column(Integer, ForeignKey('nodes.id'))
class PendingNodeRoles(Base):
__tablename__ = 'pending_node_roles'
id = Column(Integer, primary_key=True)
role = Column(Integer, ForeignKey('roles.id', ondelete="CASCADE"))
node = Column(Integer, ForeignKey('nodes.id'))
class Role(Base):
__tablename__ = 'roles'
id = Column(Integer, primary_key=True)
release_id = Column(Integer, ForeignKey('releases.id', ondelete='CASCADE'))
name = Column(String(50), nullable=False)
class Release(Base):
__tablename__ = 'releases'
__table_args__ = (
UniqueConstraint('name', 'version'),
)
STATES = (
'not_available',
'downloading',
'error',
'available'
)
id = Column(Integer, primary_key=True)
name = Column(Unicode(100), nullable=False)
version = Column(String(30), nullable=False)
description = Column(Unicode)
operating_system = Column(String(50), nullable=False)
state = Column(Enum(*STATES, name='release_state'),
nullable=False,
default='not_available')
networks_metadata = Column(JSON, default=[])
attributes_metadata = Column(JSON, default={})
volumes_metadata = Column(JSON, default={})
roles_metadata = Column(JSON, default={})
role_list = relationship("Role", backref="release")
clusters = relationship("Cluster", backref="release")
@property
def roles(self):
return [role.name for role in self.role_list]
@roles.setter
def roles(self, roles):
for role in roles:
if role not in self.roles:
self.role_list.append(Role(name=role, release=self))
db().commit()
class ClusterChanges(Base):
__tablename__ = 'cluster_changes'
POSSIBLE_CHANGES = (
'networks',
'attributes',
'disks'
)
id = Column(Integer, primary_key=True)
cluster_id = Column(Integer, ForeignKey('clusters.id'))
node_id = Column(Integer, ForeignKey('nodes.id', ondelete='CASCADE'))
name = Column(
Enum(*POSSIBLE_CHANGES, name='possible_changes'),
nullable=False
)
class Cluster(Base):
__tablename__ = 'clusters'
MODES = ('multinode', 'ha_full', 'ha_compact')
STATUSES = ('new', 'deployment', 'operational', 'error', 'remove')
NET_MANAGERS = ('FlatDHCPManager', 'VlanManager')
GROUPING = ('roles', 'hardware', 'both')
id = Column(Integer, primary_key=True)
mode = Column(
Enum(*MODES, name='cluster_mode'),
nullable=False,
default='multinode'
)
status = Column(
Enum(*STATUSES, name='cluster_status'),
nullable=False,
default='new'
)
net_manager = Column(
Enum(*NET_MANAGERS, name='cluster_net_manager'),
nullable=False,
default='FlatDHCPManager'
)
grouping = Column(
Enum(*GROUPING, name='cluster_grouping'),
nullable=False,
default='roles'
)
name = Column(Unicode(50), unique=True, nullable=False)
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False)
nodes = relationship("Node", backref="cluster", cascade="delete")
tasks = relationship("Task", backref="cluster", cascade="delete")
attributes = relationship("Attributes", uselist=False,
backref="cluster", cascade="delete")
changes = relationship("ClusterChanges", backref="cluster",
cascade="delete")
# We must keep all notifications even if cluster is removed.
# It is because we want user to be able to see
# the notification history so that is why we don't use
# cascade="delete" in this relationship
# During cluster deletion sqlalchemy engine will set null
# into cluster foreign key column of notification entity
notifications = relationship("Notification", backref="cluster")
network_groups = relationship("NetworkGroup", backref="cluster",
cascade="delete")
replaced_deployment_info = Column(JSON, default={})
replaced_provisioning_info = Column(JSON, default={})
@property
def is_ha_mode(self):
return self.mode in ('ha_full', 'ha_compact')
@property
def full_name(self):
return '%s (id=%s, mode=%s)' % (self.name, self.id, self.mode)
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if d.get("name"):
if db().query(Cluster).filter_by(
name=d["name"]
).first():
c = web.webapi.conflict
c.message = "Environment with this name already exists"
raise c()
if d.get("release"):
release = db().query(Release).get(d.get("release"))
if not release:
raise web.webapi.badrequest(message="Invalid release id")
return d
def add_pending_changes(self, changes_type, node_id=None):
ex_chs = db().query(ClusterChanges).filter_by(
cluster=self,
name=changes_type
)
if not node_id:
ex_chs = ex_chs.first()
else:
ex_chs = ex_chs.filter_by(node_id=node_id).first()
# do nothing if changes with the same name already pending
if ex_chs:
return
ch = ClusterChanges(
cluster_id=self.id,
name=changes_type
)
if node_id:
ch.node_id = node_id
db().add(ch)
db().commit()
def clear_pending_changes(self, node_id=None):
chs = db().query(ClusterChanges).filter_by(
cluster_id=self.id
)
if node_id:
chs = chs.filter_by(node_id=node_id)
map(db().delete, chs.all())
db().commit()
def prepare_for_deployment(self):
from nailgun.network.manager import NetworkManager
from nailgun.task.helpers import TaskHelper
nodes = set(TaskHelper.nodes_to_deploy(self) +
TaskHelper.nodes_in_provisioning(self))
TaskHelper.update_slave_nodes_fqdn(nodes)
nodes_ids = sorted([n.id for n in nodes])
netmanager = NetworkManager()
if nodes_ids:
netmanager.assign_ips(nodes_ids, 'management')
netmanager.assign_ips(nodes_ids, 'public')
netmanager.assign_ips(nodes_ids, 'storage')
def prepare_for_provisioning(self):
from nailgun.network.manager import NetworkManager
from nailgun.task.helpers import TaskHelper
netmanager = NetworkManager()
nodes = TaskHelper.nodes_to_provision(self)
TaskHelper.update_slave_nodes_fqdn(nodes)
for node in nodes:
netmanager.assign_admin_ips(
node.id, len(node.meta.get('interfaces', [])))
class Node(Base):
__tablename__ = 'nodes'
NODE_STATUSES = (
'ready',
'discover',
'provisioning',
'provisioned',
'deploying',
'error'
)
NODE_ERRORS = (
'deploy',
'provision',
'deletion'
)
id = Column(Integer, primary_key=True)
cluster_id = Column(Integer, ForeignKey('clusters.id'))
name = Column(Unicode(100))
status = Column(
Enum(*NODE_STATUSES, name='node_status'),
nullable=False,
default='discover'
)
meta = Column(JSON, default={})
mac = Column(String(17), nullable=False, unique=True)
ip = Column(String(15))
fqdn = Column(String(255))
manufacturer = Column(Unicode(50))
platform_name = Column(String(150))
progress = Column(Integer, default=0)
os_platform = Column(String(150))
pending_addition = Column(Boolean, default=False)
pending_deletion = Column(Boolean, default=False)
changes = relationship("ClusterChanges", backref="node")
error_type = Column(Enum(*NODE_ERRORS, name='node_error_type'))
error_msg = Column(String(255))
timestamp = Column(DateTime, nullable=False)
online = Column(Boolean, default=True)
role_list = relationship("Role", secondary=NodeRoles.__table__)
pending_role_list = relationship("Role",
secondary=PendingNodeRoles.__table__)
attributes = relationship("NodeAttributes",
backref=backref("node"),
uselist=False)
interfaces = relationship("NodeNICInterface", backref="node",
cascade="delete")
@property
def offline(self):
return not self.online
@property
def network_data(self):
# It is required for integration tests; to get info about nets
# which must be created on target node
from nailgun.network.manager import NetworkManager
netmanager = NetworkManager()
return netmanager.get_node_networks(self.id)
@property
def volume_manager(self):
return VolumeManager(self)
@property
def needs_reprovision(self):
return self.status == 'error' and self.error_type == 'provision' and \
not self.pending_deletion
@property
def needs_redeploy(self):
return (self.status == 'error' or len(self.pending_roles)) and \
not self.pending_deletion
@property
def needs_redeletion(self):
return self.status == 'error' and self.error_type == 'deletion'
@property
def human_readable_name(self):
return self.name or self.mac
@property
def full_name(self):
return u'%s (id=%s, mac=%s)' % (self.name, self.id, self.mac)
@property
def roles(self):
return [role.name for role in self.role_list]
@roles.setter
def roles(self, new_roles):
self.role_list = map(lambda role: Role(name=role), new_roles)
db().commit()
@property
def pending_roles(self):
return [role.name for role in self.pending_role_list]
@pending_roles.setter
def pending_roles(self, new_roles):
self.pending_role_list = map(
lambda role: Role(name=role), new_roles)
db().commit()
def _check_interface_has_required_params(self, iface):
return bool(iface.get('name') and iface.get('mac'))
def _clean_iface(self, iface):
# cleaning up unnecessary fields - set to None if bad
for param in ["max_speed", "current_speed"]:
val = iface.get(param)
if not (isinstance(val, int) and val >= 0):
val = None
iface[param] = val
return iface
def update_meta(self, data):
# helper for basic checking meta before updation
result = []
for iface in data["interfaces"]:
if not self._check_interface_has_required_params(iface):
logger.warning(
"Invalid interface data: {0}. "
"Interfaces are not updated.".format(iface)
)
data["interfaces"] = self.meta.get("interfaces")
self.meta = data
return
result.append(self._clean_iface(iface))
data["interfaces"] = result
self.meta = data
def create_meta(self, data):
# helper for basic checking meta before creation
result = []
for iface in data["interfaces"]:
if not self._check_interface_has_required_params(iface):
logger.warning(
"Invalid interface data: {0}. "
"Skipping interface.".format(iface)
)
continue
result.append(self._clean_iface(iface))
data["interfaces"] = result
self.meta = data
class NodeAttributes(Base):
__tablename__ = 'node_attributes'
id = Column(Integer, primary_key=True)
node_id = Column(Integer, ForeignKey('nodes.id'))
volumes = Column(JSON, default=[])
interfaces = Column(JSON, default={})
class IPAddr(Base):
__tablename__ = 'ip_addrs'
id = Column(Integer, primary_key=True)
network = Column(Integer, ForeignKey('networks.id', ondelete="CASCADE"))
node = Column(Integer, ForeignKey('nodes.id', ondelete="CASCADE"))
ip_addr = Column(String(25), nullable=False)
class IPAddrRange(Base):
__tablename__ = 'ip_addr_ranges'
id = Column(Integer, primary_key=True)
network_group_id = Column(Integer, ForeignKey('network_groups.id'))
first = Column(String(25), nullable=False)
last = Column(String(25), nullable=False)
class Vlan(Base):
__tablename__ = 'vlan'
id = Column(Integer, primary_key=True)
network = relationship("Network",
backref=backref("vlan"))
class Network(Base):
__tablename__ = 'networks'
id = Column(Integer, primary_key=True)
# can be nullable only for fuelweb admin net
release = Column(Integer, ForeignKey('releases.id'))
name = Column(Unicode(100), nullable=False)
access = Column(String(20), nullable=False)
vlan_id = Column(Integer, ForeignKey('vlan.id'))
network_group_id = Column(Integer, ForeignKey('network_groups.id'))
cidr = Column(String(25), nullable=False)
gateway = Column(String(25))
nodes = relationship(
"Node",
secondary=IPAddr.__table__,
backref="networks")
class NetworkGroup(Base):
__tablename__ = 'network_groups'
NAMES = (
# Node networks
'fuelweb_admin',
'storage',
# internal in terms of fuel
'management',
'public',
# VM networks
'floating',
# private in terms of fuel
'fixed'
)
id = Column(Integer, primary_key=True)
name = Column(Enum(*NAMES, name='network_group_name'), nullable=False)
access = Column(String(20), nullable=False)
# can be nullable only for fuelweb admin net
release = Column(Integer, ForeignKey('releases.id'))
# can be nullable only for fuelweb admin net
cluster_id = Column(Integer, ForeignKey('clusters.id'))
network_size = Column(Integer, default=256)
amount = Column(Integer, default=1)
vlan_start = Column(Integer, default=1)
networks = relationship("Network", cascade="delete",
backref="network_group")
cidr = Column(String(25))
gateway = Column(String(25))
netmask = Column(String(25), nullable=False)
ip_ranges = relationship(
"IPAddrRange",
backref="network_group"
)
@classmethod
def generate_vlan_ids_list(cls, ng):
if ng["vlan_start"] is None:
return []
vlans = [
i for i in xrange(
int(ng["vlan_start"]),
int(ng["vlan_start"]) + int(ng["amount"])
)
]
return vlans
class NetworkConfiguration(object):
@classmethod
def update(cls, cluster, network_configuration):
from nailgun.network.manager import NetworkManager
network_manager = NetworkManager()
if 'net_manager' in network_configuration:
setattr(
cluster,
'net_manager',
network_configuration['net_manager'])
if 'networks' in network_configuration:
for ng in network_configuration['networks']:
ng_db = db().query(NetworkGroup).get(ng['id'])
for key, value in ng.iteritems():
if key == "ip_ranges":
cls.__set_ip_ranges(ng['id'], value)
else:
if key == 'cidr' and \
not ng['name'] in ('public', 'floating'):
network_manager.update_ranges_from_cidr(
ng_db, value)
setattr(ng_db, key, value)
network_manager.create_networks(ng_db)
ng_db.cluster.add_pending_changes('networks')
@classmethod
def __set_ip_ranges(cls, network_group_id, ip_ranges):
# deleting old ip ranges
db().query(IPAddrRange).filter_by(
network_group_id=network_group_id).delete()
for r in ip_ranges:
new_ip_range = IPAddrRange(
first=r[0],
last=r[1],
network_group_id=network_group_id)
db().add(new_ip_range)
db().commit()
class AttributesGenerators(object):
@classmethod
def password(cls, arg=None):
try:
length = int(arg)
except Exception:
length = 8
chars = string.letters + string.digits
return u''.join([choice(chars) for _ in xrange(length)])
@classmethod
def ip(cls, arg=None):
if str(arg) in ("admin", "master"):
return settings.MASTER_IP
return "127.0.0.1"
@classmethod
def identical(cls, arg=None):
return str(arg)
class Attributes(Base):
__tablename__ = 'attributes'
id = Column(Integer, primary_key=True)
cluster_id = Column(Integer, ForeignKey('clusters.id'))
editable = Column(JSON)
generated = Column(JSON)
def generate_fields(self):
self.generated = self.traverse(self.generated)
db().add(self)
db().commit()
@classmethod
def traverse(cls, cdict):
new_dict = {}
if cdict:
for i, val in cdict.iteritems():
if isinstance(val, (str, unicode, int, float)):
new_dict[i] = val
elif isinstance(val, dict) and "generator" in val:
try:
generator = getattr(
AttributesGenerators,
val["generator"]
)
except AttributeError:
logger.error("Attribute error: %s" % val["generator"])
raise
else:
new_dict[i] = generator(val.get("generator_arg"))
else:
new_dict[i] = cls.traverse(val)
return new_dict
def merged_attrs(self):
return self._dict_merge(self.generated, self.editable)
def merged_attrs_values(self):
attrs = self.merged_attrs()
for group_attrs in attrs.itervalues():
for attr, value in group_attrs.iteritems():
if isinstance(value, dict) and 'value' in value:
group_attrs[attr] = value['value']
if 'common' in attrs:
attrs.update(attrs.pop('common'))
return attrs
def _dict_merge(self, a, b):
'''recursively merges dict's. not just simple a['key'] = b['key'], if
both a and bhave a key who's value is a dict then dict_merge is called
on both values and the result stored in the returned dictionary.
'''
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.iteritems():
if k in result and isinstance(result[k], dict):
result[k] = self._dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
class Task(Base):
__tablename__ = 'tasks'
TASK_STATUSES = (
'ready',
'running',
'error'
)
TASK_NAMES = (
'super',
# cluster
'deploy',
'deployment',
'provision',
'node_deletion',
'cluster_deletion',
'check_before_deployment',
# network
'check_networks',
'verify_networks',
'check_dhcp',
'verify_network_connectivity',
# plugin
'install_plugin',
'update_plugin',
'delete_plugin',
# red hat
'redhat_setup',
'redhat_check_credentials',
'redhat_check_licenses',
'redhat_download_release',
'redhat_update_cobbler_profile',
# dump
'dump',
)
id = Column(Integer, primary_key=True)
cluster_id = Column(Integer, ForeignKey('clusters.id'))
uuid = Column(String(36), nullable=False,
default=lambda: str(uuid.uuid4()))
name = Column(
Enum(*TASK_NAMES, name='task_name'),
nullable=False,
default='super'
)
message = Column(Text)
status = Column(
Enum(*TASK_STATUSES, name='task_status'),
nullable=False,
default='running'
)
progress = Column(Integer, default=0)
cache = Column(JSON, default={})
result = Column(JSON, default={})
parent_id = Column(Integer, ForeignKey('tasks.id'))
subtasks = relationship(
"Task",
backref=backref('parent', remote_side=[id])
)
notifications = relationship(
"Notification",
backref=backref('task', remote_side=[id])
)
# Task weight is used to calculate supertask progress
# sum([t.progress * t.weight for t in supertask.subtasks]) /
# sum([t.weight for t in supertask.subtasks])
weight = Column(Float, default=1.0)
def __repr__(self):
return "<Task '{0}' {1} ({2}) {3}>".format(
self.name,
self.uuid,
self.cluster_id,
self.status
)
def create_subtask(self, name):
if not name:
raise ValueError("Subtask name not specified")
task = Task(name=name, cluster=self.cluster)
self.subtasks.append(task)
db().commit()
return task
class Notification(Base):
__tablename__ = 'notifications'
NOTIFICATION_STATUSES = (
'read',
'unread',
)
NOTIFICATION_TOPICS = (
'discover',
'done',
'error',
'warning',
)
id = Column(Integer, primary_key=True)
cluster_id = Column(
Integer,
ForeignKey('clusters.id', ondelete='SET NULL')
)
node_id = Column(Integer, ForeignKey('nodes.id', ondelete='SET NULL'))
task_id = Column(Integer, ForeignKey('tasks.id', ondelete='SET NULL'))
topic = Column(
Enum(*NOTIFICATION_TOPICS, name='notif_topic'),
nullable=False
)
message = Column(Text)
status = Column(
Enum(*NOTIFICATION_STATUSES, name='notif_status'),
nullable=False,
default='unread'
)
datetime = Column(DateTime, nullable=False)
class L2Topology(Base):
__tablename__ = 'l2_topologies'
id = Column(Integer, primary_key=True)
network_id = Column(
Integer,
ForeignKey('network_groups.id', ondelete="CASCADE"),
nullable=False
)
class L2Connection(Base):
__tablename__ = 'l2_connections'
id = Column(Integer, primary_key=True)
topology_id = Column(
Integer,
ForeignKey('l2_topologies.id', ondelete="CASCADE"),
nullable=False
)
interface_id = Column(
Integer,
# If interface is removed we should somehow remove
# all L2Topologes which include this interface.
ForeignKey('node_nic_interfaces.id', ondelete="CASCADE"),
nullable=False
)
class AllowedNetworks(Base):
__tablename__ = 'allowed_networks'
id = Column(Integer, primary_key=True)
network_id = Column(
Integer,
ForeignKey('network_groups.id', ondelete="CASCADE"),
nullable=False
)
interface_id = Column(
Integer,
ForeignKey('node_nic_interfaces.id', ondelete="CASCADE"),
nullable=False
)
class NetworkAssignment(Base):
__tablename__ = 'net_assignments'
id = Column(Integer, primary_key=True)
network_id = Column(
Integer,
ForeignKey('network_groups.id', ondelete="CASCADE"),
nullable=False
)
interface_id = Column(
Integer,
ForeignKey('node_nic_interfaces.id', ondelete="CASCADE"),
nullable=False
)
class NodeNICInterface(Base):
__tablename__ = 'node_nic_interfaces'
id = Column(Integer, primary_key=True)
node_id = Column(
Integer,
ForeignKey('nodes.id', ondelete="CASCADE"),
nullable=False
)
name = Column(String(128), nullable=False)
mac = Column(String(32), nullable=False)
max_speed = Column(Integer)
current_speed = Column(Integer)
allowed_networks = relationship(
"NetworkGroup",
secondary=AllowedNetworks.__table__,
)
assigned_networks = relationship(
"NetworkGroup",
secondary=NetworkAssignment.__table__,
)
class Plugin(Base):
__tablename__ = 'plugins'
TYPES = ('nailgun', 'fuel')
id = Column(Integer, primary_key=True)
type = Column(Enum(*TYPES, name='plugin_type'), nullable=False)
name = Column(String(128), nullable=False, unique=True)
state = Column(String(128), nullable=False, default='registered')
version = Column(String(128), nullable=False)
class RedHatAccount(Base):
__tablename__ = 'red_hat_accounts'
LICENSE_TYPES = ('rhsm', 'rhn')
id = Column(Integer, primary_key=True)
username = Column(String(100), nullable=False)
password = Column(String(100), nullable=False)
license_type = Column(Enum(*LICENSE_TYPES, name='license_type'),
nullable=False)
satellite = Column(String(250))
activation_key = Column(String(300))

View File

@ -1,13 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,75 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class BasicSerializer(object):
handlers = {}
@classmethod
def load_handlers(cls, handlers):
if not cls.handlers:
cls.handlers = handlers
return cls.handlers
@classmethod
def serialize(cls, instance, fields=None):
data_dict = {}
use_fields = fields if fields else cls.fields
if not use_fields:
raise ValueError("No fields for serialize")
for field in use_fields:
if isinstance(field, (tuple,)):
if field[1] == '*':
subfields = None
else:
subfields = field[1:]
value = getattr(instance, field[0])
rel = getattr(
instance.__class__, field[0]).impl.__class__.__name__
if value is None:
pass
elif rel == 'ScalarObjectAttributeImpl':
handler = cls.handlers[value.__class__.__name__]
data_dict[field[0]] = handler.render(
value, fields=subfields
)
elif rel == 'CollectionAttributeImpl':
if not value:
data_dict[field[0]] = []
else:
handler = cls.handlers[value[0].__class__.__name__]
data_dict[field[0]] = [
handler.render(v, fields=subfields) for v in value
]
else:
value = getattr(instance, field)
if value is None:
data_dict[field] = value
else:
f = getattr(instance.__class__, field)
if hasattr(f, "impl"):
rel = f.impl.__class__.__name__
if rel == 'ScalarObjectAttributeImpl':
data_dict[field] = value.id
elif rel == 'CollectionAttributeImpl':
data_dict[field] = [v.id for v in value]
else:
data_dict[field] = value
else:
data_dict[field] = value
return data_dict

View File

@ -1,51 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.serializers.base import BasicSerializer
from nailgun.network.manager import NetworkManager
class NetworkConfigurationSerializer(BasicSerializer):
fields = ('id', 'cluster_id', 'name', 'cidr', 'netmask',
'gateway', 'vlan_start', 'network_size', 'amount')
@classmethod
def serialize_network_group(cls, instance, fields=None):
data_dict = BasicSerializer.serialize(instance, fields=cls.fields)
data_dict["ip_ranges"] = [
[ir.first, ir.last] for ir in instance.ip_ranges
]
data_dict.setdefault("netmask", "")
data_dict.setdefault("gateway", "")
return data_dict
@classmethod
def serialize_for_cluster(cls, cluster):
result = {}
result['net_manager'] = cluster.net_manager
result['networks'] = map(
cls.serialize_network_group,
cluster.network_groups
)
if cluster.is_ha_mode:
net_manager = NetworkManager()
result['management_vip'] = net_manager.assign_vip(
cluster.id, 'management')
result['public_vip'] = net_manager.assign_vip(
cluster.id, 'public')
return result

View File

@ -1,162 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import web
from nailgun.api.handlers.cluster import ClusterAttributesDefaultsHandler
from nailgun.api.handlers.cluster import ClusterAttributesHandler
from nailgun.api.handlers.cluster import ClusterChangesHandler
from nailgun.api.handlers.cluster import ClusterCollectionHandler
from nailgun.api.handlers.cluster import ClusterGeneratedData
from nailgun.api.handlers.cluster import ClusterHandler
from nailgun.api.handlers.disks import NodeDefaultsDisksHandler
from nailgun.api.handlers.disks import NodeDisksHandler
from nailgun.api.handlers.disks import NodeVolumesInformationHandler
from nailgun.api.handlers.logs import LogEntryCollectionHandler
from nailgun.api.handlers.logs import LogPackageHandler
from nailgun.api.handlers.logs import LogSourceByNodeCollectionHandler
from nailgun.api.handlers.logs import LogSourceCollectionHandler
from nailgun.api.handlers.network_configuration \
import NetworkConfigurationHandler
from nailgun.api.handlers.network_configuration \
import NetworkConfigurationVerifyHandler
from nailgun.api.handlers.node import NodeCollectionHandler
from nailgun.api.handlers.node import NodeHandler
from nailgun.api.handlers.node import NodesAllocationStatsHandler
from nailgun.api.handlers.node import NodeCollectionNICsDefaultHandler
from nailgun.api.handlers.node import NodeCollectionNICsHandler
from nailgun.api.handlers.node import NodeNICsDefaultHandler
from nailgun.api.handlers.node import NodeNICsHandler
from nailgun.api.handlers.node import NodeNICsVerifyHandler
from nailgun.api.handlers.notifications import NotificationCollectionHandler
from nailgun.api.handlers.notifications import NotificationHandler
from nailgun.api.handlers.orchestrator import DefaultDeploymentInfo
from nailgun.api.handlers.orchestrator import DefaultProvisioningInfo
from nailgun.api.handlers.orchestrator import DeploymentInfo
from nailgun.api.handlers.orchestrator import ProvisioningInfo
from nailgun.api.handlers.plugin import PluginCollectionHandler
from nailgun.api.handlers.plugin import PluginHandler
from nailgun.api.handlers.redhat import RedHatAccountHandler
from nailgun.api.handlers.redhat import RedHatSetupHandler
from nailgun.api.handlers.registration import FuelKeyHandler
from nailgun.api.handlers.release import ReleaseCollectionHandler
from nailgun.api.handlers.release import ReleaseHandler
from nailgun.api.handlers.tasks import TaskCollectionHandler
from nailgun.api.handlers.tasks import TaskHandler
from nailgun.api.handlers.version import VersionHandler
urls = (
r'/releases/?$',
ReleaseCollectionHandler,
r'/releases/(?P<release_id>\d+)/?$',
ReleaseHandler,
r'/clusters/?$',
ClusterCollectionHandler,
r'/clusters/(?P<cluster_id>\d+)/?$',
ClusterHandler,
r'/clusters/(?P<cluster_id>\d+)/changes/?$',
ClusterChangesHandler,
r'/clusters/(?P<cluster_id>\d+)/attributes/?$',
ClusterAttributesHandler,
r'/clusters/(?P<cluster_id>\d+)/attributes/defaults/?$',
ClusterAttributesDefaultsHandler,
r'/clusters/(?P<cluster_id>\d+)/network_configuration/?$',
NetworkConfigurationHandler,
r'/clusters/(?P<cluster_id>\d+)/network_configuration/verify/?$',
NetworkConfigurationVerifyHandler,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/deployment/?$',
DeploymentInfo,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/deployment/defaults/?$',
DefaultDeploymentInfo,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/provisioning/?$',
ProvisioningInfo,
r'/clusters/(?P<cluster_id>\d+)/orchestrator/provisioning/defaults/?$',
DefaultProvisioningInfo,
r'/clusters/(?P<cluster_id>\d+)/generated/?$',
ClusterGeneratedData,
r'/nodes/?$',
NodeCollectionHandler,
r'/nodes/(?P<node_id>\d+)/?$',
NodeHandler,
r'/nodes/(?P<node_id>\d+)/disks/?$',
NodeDisksHandler,
r'/nodes/(?P<node_id>\d+)/disks/defaults/?$',
NodeDefaultsDisksHandler,
r'/nodes/(?P<node_id>\d+)/volumes/?$',
NodeVolumesInformationHandler,
r'/nodes/interfaces/?$',
NodeCollectionNICsHandler,
r'/nodes/interfaces/default_assignment?$',
NodeCollectionNICsDefaultHandler,
r'/nodes/(?P<node_id>\d+)/interfaces/?$',
NodeNICsHandler,
r'/nodes/(?P<node_id>\d+)/interfaces/default_assignment?$',
NodeNICsDefaultHandler,
r'/nodes/interfaces_verify/?$',
NodeNICsVerifyHandler,
r'/nodes/allocation/stats/?$',
NodesAllocationStatsHandler,
r'/tasks/?$',
TaskCollectionHandler,
r'/tasks/(?P<task_id>\d+)/?$',
TaskHandler,
r'/notifications/?$',
NotificationCollectionHandler,
r'/notifications/(?P<notification_id>\d+)/?$',
NotificationHandler,
r'/logs/?$',
LogEntryCollectionHandler,
r'/logs/package/?$',
LogPackageHandler,
r'/logs/sources/?$',
LogSourceCollectionHandler,
r'/logs/sources/nodes/(?P<node_id>\d+)/?$',
LogSourceByNodeCollectionHandler,
r'/registration/key/?$',
FuelKeyHandler,
r'/version/?$',
VersionHandler,
r'/plugins/?$',
PluginCollectionHandler,
r'/plugins/(?P<plugin_id>\d+)/?$',
PluginHandler,
r'/redhat/account/?$',
RedHatAccountHandler,
r'/redhat/setup/?$',
RedHatSetupHandler,
)
urls = [i if isinstance(i, str) else i.__name__ for i in urls]
app = web.application(urls, locals())

View File

@ -1,13 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,50 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from jsonschema import validate
from nailgun.errors import errors
class BasicValidator(object):
@classmethod
def validate_json(cls, data):
if data:
try:
res = json.loads(data)
except Exception:
raise errors.InvalidData(
"Invalid json received",
log_message=True
)
else:
raise errors.InvalidData(
"Empty request received",
log_message=True
)
return res
@classmethod
def validate(cls, data):
return cls.validate_json(data)
@classmethod
def validate_schema(cls, data, schema):
try:
validate(data, schema)
except Exception as exc:
raise errors.InvalidData(exc.message)

View File

@ -1,70 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.models import Cluster
from nailgun.api.models import Release
from nailgun.api.validators.base import BasicValidator
from nailgun.db import db
from nailgun.errors import errors
class ClusterValidator(BasicValidator):
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if d.get("name"):
if db().query(Cluster).filter_by(
name=d["name"]
).first():
raise errors.AlreadyExists(
"Environment with this name already exists",
log_message=True
)
if d.get("release"):
release = db().query(Release).get(d.get("release"))
if not release:
raise errors.InvalidData(
"Invalid release id",
log_message=True
)
return d
class AttributesValidator(BasicValidator):
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if "generated" in d:
raise errors.InvalidData(
"It is not allowed to update generated attributes",
log_message=True
)
if "editable" in d and not isinstance(d["editable"], dict):
raise errors.InvalidData(
"Editable attributes should be a dictionary",
log_message=True
)
return d
@classmethod
def validate_fixture(cls, data):
"""Here we just want to be sure that data is logically valid.
We try to generate "generated" parameters. If there will not
be any error during generating then we assume data is
logically valid.
"""
d = cls.validate_json(data)
if "generated" in d:
cls.traverse(d["generated"])

View File

@ -1,13 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,55 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
disks_simple_format_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': 'Disks',
'description': 'Array of disks in simple format',
'type': 'array',
'items': {
'type': 'object',
'required': ['id', 'size', 'volumes'],
'properties': {
'id': {
'description': 'The unique identifier for a disk',
'type': 'string'
},
'size': {
'description': 'Disk size in megabytes',
'type': 'integer'
},
'volumes': {
'description': 'Volumes for disk',
'type': 'array',
'items': {
'type': 'object',
'required': ['size', 'name'],
'properties': {
'name': {
'description': 'Volume name',
'type': 'string'
},
'size': {
'description': 'Volume size',
'type': 'integer'
}
}
}
}
}
}
}

View File

@ -1,204 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from netaddr import AddrFormatError
from netaddr import IPNetwork
from nailgun.api.models import NetworkGroup
from nailgun.api.models import Node
from nailgun.api.validators.base import BasicValidator
from nailgun.db import db
from nailgun.errors import errors
class NetworkConfigurationValidator(BasicValidator):
@classmethod
def validate_networks_update(cls, data):
d = cls.validate_json(data)
networks = d['networks']
if not d:
raise errors.InvalidData(
"No valid data received",
log_message=True
)
if not isinstance(networks, list):
raise errors.InvalidData(
"It's expected to receive array, not a single object",
log_message=True
)
for i in networks:
if 'id' not in i:
raise errors.InvalidData(
"No 'id' param for '{0}'".format(i),
log_message=True
)
if i.get('name') == 'public':
try:
IPNetwork('0.0.0.0/' + i['netmask'])
except (AddrFormatError, KeyError):
raise errors.InvalidData(
"Invalid netmask for public network",
log_message=True
)
return d
class NetAssignmentValidator(BasicValidator):
@classmethod
def validate(cls, node):
if not isinstance(node, dict):
raise errors.InvalidData(
"Each node should be dict",
log_message=True
)
if 'id' not in node:
raise errors.InvalidData(
"Each node should have ID",
log_message=True
)
if 'interfaces' not in node or \
not isinstance(node['interfaces'], list):
raise errors.InvalidData(
"There is no 'interfaces' list in node '%d'" % node['id'],
log_message=True
)
net_ids = set()
for iface in node['interfaces']:
if not isinstance(iface, dict):
raise errors.InvalidData(
"Node '%d': each interface should be dict (got '%s')" % (
node['id'],
str(iface)
),
log_message=True
)
if 'id' not in iface:
raise errors.InvalidData(
"Node '%d': each interface should have ID" % node['id'],
log_message=True
)
if 'assigned_networks' not in iface or \
not isinstance(iface['assigned_networks'], list):
raise errors.InvalidData(
"There is no 'assigned_networks' list"
" in interface '%d' in node '%d'" %
(iface['id'], node['id']),
log_message=True
)
for net in iface['assigned_networks']:
if not isinstance(net, dict):
raise errors.InvalidData(
"Node '%d', interface '%d':"
" each assigned network should be dict" %
(iface['id'], node['id']),
log_message=True
)
if 'id' not in net:
raise errors.InvalidData(
"Node '%d', interface '%d':"
" each assigned network should have ID" %
(iface['id'], node['id']),
log_message=True
)
if net['id'] in net_ids:
raise errors.InvalidData(
"Assigned networks for node '%d' have"
" a duplicate network '%d' (second"
" occurrence in interface '%d')" %
(node['id'], net['id'], iface['id']),
log_message=True
)
net_ids.add(net['id'])
return node
@classmethod
def validate_structure(cls, webdata):
node_data = cls.validate_json(webdata)
return cls.validate(node_data)
@classmethod
def validate_collection_structure(cls, webdata):
data = cls.validate_json(webdata)
if not isinstance(data, list):
raise errors.InvalidData(
"Data should be list of nodes",
log_message=True
)
for node_data in data:
cls.validate(node_data)
return data
@classmethod
def verify_data_correctness(cls, node):
db_node = db().query(Node).filter_by(id=node['id']).first()
if not db_node:
raise errors.InvalidData(
"There is no node with ID '%d' in DB" % node['id'],
log_message=True
)
interfaces = node['interfaces']
db_interfaces = db_node.interfaces
if len(interfaces) != len(db_interfaces):
raise errors.InvalidData(
"Node '%d' has different amount of interfaces" % node['id'],
log_message=True
)
# FIXIT: we should use not all networks but appropriate for this
# node only.
db_network_groups = db().query(NetworkGroup).filter_by(
cluster_id=db_node.cluster_id
).all()
if not db_network_groups:
raise errors.InvalidData(
"There are no networks related to"
" node '%d' in DB" % node['id'],
log_message=True
)
network_group_ids = set([ng.id for ng in db_network_groups])
for iface in interfaces:
db_iface = filter(
lambda i: i.id == iface['id'],
db_interfaces
)
if not db_iface:
raise errors.InvalidData(
"There is no interface with ID '%d'"
" for node '%d' in DB" %
(iface['id'], node['id']),
log_message=True
)
db_iface = db_iface[0]
for net in iface['assigned_networks']:
if net['id'] not in network_group_ids:
raise errors.InvalidData(
"Node '%d' shouldn't be connected to"
" network with ID '%d'" %
(node['id'], net['id']),
log_message=True
)
network_group_ids.remove(net['id'])
# Check if there are unassigned networks for this node.
if network_group_ids:
raise errors.InvalidData(
"Too few networks to assign to node '%d'" % node['id'],
log_message=True
)

View File

@ -1,227 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.models import Node
from nailgun.api.validators.base import BasicValidator
from nailgun.api.validators.json_schema.disks \
import disks_simple_format_schema
from nailgun.db import db
from nailgun.errors import errors
class MetaInterfacesValidator(BasicValidator):
@classmethod
def _validate_data(cls, interfaces):
if not isinstance(interfaces, list):
raise errors.InvalidInterfacesInfo(
"Meta.interfaces should be list",
log_message=True
)
return interfaces
@classmethod
def validate_create(cls, interfaces):
interfaces = cls._validate_data(interfaces)
def filter_valid_nic(nic):
for key in ('mac', 'name'):
if not key in nic or not isinstance(nic[key], basestring)\
or not nic[key]:
return False
return True
return filter(filter_valid_nic, interfaces)
@classmethod
def validate_update(cls, interfaces):
interfaces = cls._validate_data(interfaces)
for nic in interfaces:
if not isinstance(nic, dict):
raise errors.InvalidInterfacesInfo(
"Interface in meta.interfaces must be dict",
log_message=True
)
return interfaces
class MetaValidator(BasicValidator):
@classmethod
def _validate_data(cls, meta):
if not isinstance(meta, dict):
raise errors.InvalidMetadata(
"Invalid data: 'meta' should be dict",
log_message=True
)
@classmethod
def validate_create(cls, meta):
cls._validate_data(meta)
if 'interfaces' in meta:
meta['interfaces'] = MetaInterfacesValidator.validate_create(
meta['interfaces']
)
else:
raise errors.InvalidInterfacesInfo(
"Failed to discover node: "
"invalid interfaces info",
log_message=True
)
return meta
@classmethod
def validate_update(cls, meta):
cls._validate_data(meta)
if 'interfaces' in meta:
meta['interfaces'] = MetaInterfacesValidator.validate_update(
meta['interfaces']
)
return meta
class NodeValidator(BasicValidator):
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if not isinstance(d, dict):
raise errors.InvalidData(
"Node data must be dict",
log_message=True
)
if "mac" not in d:
raise errors.InvalidData(
"No mac address specified",
log_message=True
)
else:
q = db().query(Node)
if q.filter(Node.mac == d["mac"]).first():
raise errors.AlreadyExists(
"Node with mac {0} already "
"exists - doing nothing".format(d["mac"]),
log_level="info"
)
if cls.validate_existent_node_mac_create(d):
raise errors.AlreadyExists(
"Node with mac {0} already "
"exists - doing nothing".format(d["mac"]),
log_level="info"
)
if 'meta' in d:
MetaValidator.validate_create(d['meta'])
return d
# TODO(NAME): fix this using DRY
@classmethod
def validate_existent_node_mac_create(cls, data):
if 'meta' in data:
data['meta'] = MetaValidator.validate_create(data['meta'])
if 'interfaces' in data['meta']:
existent_node = db().query(Node).filter(Node.mac.in_(
[n['mac'] for n in data['meta']['interfaces']])).first()
return existent_node
@classmethod
def validate_existent_node_mac_update(cls, data):
if 'meta' in data:
data['meta'] = MetaValidator.validate_update(data['meta'])
if 'interfaces' in data['meta']:
existent_node = db().query(Node).filter(Node.mac.in_(
[n['mac'] for n in data['meta']['interfaces']])).first()
return existent_node
@classmethod
def validate_roles(cls, data, node):
if 'roles' in data:
if not isinstance(data['roles'], list) or \
any(not isinstance(role, (
str, unicode)) for role in data['roles']):
raise errors.InvalidData(
"Role list must be list of strings",
log_message=True
)
@classmethod
def validate_update(cls, data):
d = cls.validate_json(data)
if "status" in d and d["status"] not in Node.NODE_STATUSES:
raise errors.InvalidData(
"Invalid status for node",
log_message=True
)
if 'roles' in d and 'id' in d:
node = db().query(Node).get(d['id'])
cls.validate_roles(d, node)
if 'meta' in d:
d['meta'] = MetaValidator.validate_update(d['meta'])
return d
@classmethod
def validate_collection_update(cls, data):
d = cls.validate_json(data)
if not isinstance(d, list):
raise errors.InvalidData(
"Invalid json list",
log_message=True
)
q = db().query(Node)
for nd in d:
if not "mac" in nd and not "id" in nd:
raise errors.InvalidData(
"MAC or ID is not specified",
log_message=True
)
else:
if "mac" in nd:
existent_node = q.filter_by(mac=nd["mac"]).first() \
or cls.validate_existent_node_mac_update(nd)
if not existent_node:
raise errors.InvalidData(
"Invalid MAC specified",
log_message=True
)
if "id" in nd:
existent_node = q.get(nd["id"])
if not existent_node:
raise errors.InvalidData(
"Invalid ID specified",
log_message=True
)
if 'roles' in nd:
cls.validate_roles(nd, existent_node)
if 'meta' in nd:
nd['meta'] = MetaValidator.validate_update(nd['meta'])
return d
class NodeDisksValidator(BasicValidator):
@classmethod
def validate(cls, data):
dict_data = cls.validate_json(data)
cls.validate_schema(dict_data, disks_simple_format_schema)
cls.sum_of_volumes_not_greater_than_disk_size(dict_data)
return dict_data
@classmethod
def sum_of_volumes_not_greater_than_disk_size(cls, data):
for disk in data:
volumes_size = sum([volume['size'] for volume in disk['volumes']])
if volumes_size > disk['size']:
raise errors.InvalidData(
u'Not enough free space on disk: %s' % disk)

View File

@ -1,74 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.models import Notification
from nailgun.api.validators.base import BasicValidator
from nailgun.db import db
from nailgun.errors import errors
class NotificationValidator(BasicValidator):
@classmethod
def validate_update(cls, data):
valid = {}
d = cls.validate_json(data)
status = d.get("status", None)
if status in Notification.NOTIFICATION_STATUSES:
valid["status"] = status
else:
raise errors.InvalidData(
"Bad status",
log_message=True
)
return valid
@classmethod
def validate_collection_update(cls, data):
d = cls.validate_json(data)
if not isinstance(d, list):
raise errors.InvalidData(
"Invalid json list",
log_message=True
)
q = db().query(Notification)
valid_d = []
for nd in d:
valid_nd = {}
if "id" not in nd:
raise errors.InvalidData(
"ID is not set correctly",
log_message=True
)
if "status" not in nd:
raise errors.InvalidData(
"ID is not set correctly",
log_message=True
)
if not q.get(nd["id"]):
raise errors.InvalidData(
"Invalid ID specified",
log_message=True
)
valid_nd["id"] = nd["id"]
valid_nd["status"] = nd["status"]
valid_d.append(valid_nd)
return valid_d

View File

@ -1,46 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.validators.base import BasicValidator
from nailgun.errors import errors
class RedHatAccountValidator(BasicValidator):
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if "release_id" not in d:
raise errors.InvalidData(
"No Release ID specified",
)
if "license_type" not in d:
raise errors.InvalidData(
"No License Type specified"
)
if d["license_type"] not in ["rhsm", "rhn"]:
raise errors.InvalidData(
"Invalid License Type"
)
if "username" not in d or "password" not in d:
raise errors.InvalidData(
"Username or password not specified"
)
if d["license_type"] == "rhn":
if "satellite" not in d or "activation_key" not in d:
raise errors.InvalidData(
"Satellite hostname or activation key not specified",
)
return d

View File

@ -1,72 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.models import Attributes
from nailgun.api.models import Release
from nailgun.api.validators.base import BasicValidator
from nailgun.db import db
from nailgun.errors import errors
from nailgun.settings import settings
class ReleaseValidator(BasicValidator):
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if "name" not in d:
raise errors.InvalidData(
"No release name specified",
log_message=True
)
if "version" not in d:
raise errors.InvalidData(
"No release version specified",
log_message=True
)
if db().query(Release).filter_by(
name=d["name"],
version=d["version"]
).first():
raise errors.AlreadyExists(
"Release with the same name and version "
"already exists",
log_message=True
)
if "networks_metadata" in d:
for network in d["networks_metadata"]:
if not "name" in network or not "access" in network:
raise errors.InvalidData(
"Invalid network data: %s" % str(network),
log_message=True
)
if network["access"] not in settings.NETWORK_POOLS:
raise errors.InvalidData(
"Invalid access mode for network",
log_message=True
)
else:
d["networks_metadata"] = []
if "attributes_metadata" not in d:
d["attributes_metadata"] = {}
else:
try:
Attributes.validate_fixture(d["attributes_metadata"])
except Exception:
raise errors.InvalidData(
"Invalid logical structure of attributes metadata",
log_message=True
)
return d

View File

@ -1,75 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import json
from nailgun.api.handlers.base import JSONHandler
from nailgun.test.base import reverse
class SampleGenerator(object):
@classmethod
def gen_sample_data(cls):
def process(app, what_, name, obj, options, lines):
if cls._ishandler(obj):
lines.insert(0, cls.generate_handler_url_doc(obj))
lines.insert(1, "")
if lines and lines[-1]:
lines.append("")
return process
@classmethod
def _ishandler(cls, obj):
return inspect.isclass(obj) and issubclass(obj, JSONHandler)
@classmethod
def _ishandlermethod(cls, obj):
return inspect.ismethod(obj) and issubclass(obj.im_class, JSONHandler)
@classmethod
def generate_handler_url_doc(cls, handler):
http_methods = ["GET", "POST", "PUT", "DELETE"]
sample_method = None
for field in http_methods:
if hasattr(handler, field):
sample_method = getattr(handler, field)
break
args = inspect.getargspec(sample_method).args[1:]
test_url_data = dict([
(arg, "%{0}%".format(arg)) for arg in args
])
return "URL: **{0}**".format(
reverse(handler.__name__, test_url_data)
)
@classmethod
def gen_json_block(cls, data):
return "\n.. code-block:: javascript\n\n{0}\n\n".format(
"\n".join([
" " + s
for s in json.dumps(data, indent=4).split("\n")
])
)
def setup(app):
app.connect(
'autodoc-process-docstring',
SampleGenerator.gen_sample_data()
)

View File

@ -1,106 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import web
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy import create_engine
from sqlalchemy.orm.query import Query
from nailgun.settings import settings
db_str = "{engine}://{user}:{passwd}@{host}:{port}/{name}".format(
**settings.DATABASE)
engine = create_engine(db_str, client_encoding='utf8')
class NoCacheQuery(Query):
"""Override for common Query class.
Needed for automatic refreshing objects
from database during every query for evading
problems with multiple sessions
"""
def __init__(self, *args, **kwargs):
self._populate_existing = True
super(NoCacheQuery, self).__init__(*args, **kwargs)
db = scoped_session(
sessionmaker(
autoflush=True,
autocommit=False,
bind=engine,
query_cls=NoCacheQuery
)
)
def load_db_driver(handler):
try:
return handler()
except web.HTTPError:
db().commit()
raise
except Exception:
db().rollback()
raise
finally:
db().commit()
db().expire_all()
def syncdb():
from nailgun.api.models import Base
Base.metadata.create_all(engine)
def dropdb():
tables = [name for (name,) in db().execute(
"SELECT tablename FROM pg_tables WHERE schemaname = 'public'")]
for table in tables:
db().execute("DROP TABLE IF EXISTS %s CASCADE" % table)
# sql query to list all types, equivalent to psql's \dT+
types = [name for (name,) in db().execute("""
SELECT t.typname as type FROM pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
WHERE (t.typrelid = 0 OR (
SELECT c.relkind = 'c' FROM pg_catalog.pg_class c
WHERE c.oid = t.typrelid
))
AND NOT EXISTS(
SELECT 1 FROM pg_catalog.pg_type el
WHERE el.oid = t.typelem AND el.typarray = t.oid
)
AND n.nspname = 'public'
""")]
for type_ in types:
db().execute("DROP TYPE IF EXISTS %s CASCADE" % type_)
db().commit()
def flush():
"""Delete all data from all tables within nailgun metadata
"""
from nailgun.api.models import Base
with contextlib.closing(engine.connect()) as con:
trans = con.begin()
for table in reversed(Base.metadata.sorted_tables):
con.execute(table.delete())
trans.commit()

View File

@ -1,84 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.errors.base import NailgunException
default_messages = {
# common errors
"InvalidData": "Invalid data received",
"AlreadyExists": "Object already exists",
"DumpRunning": "Dump already running",
# node discovering errors
"InvalidInterfacesInfo": "Invalid interfaces info",
"InvalidMetadata": "Invalid metadata specified for node",
"CannotFindNodeIDForDiscovering": "Cannot find node for discovering",
# deployment errors
"CheckBeforeDeploymentError": "Pre-Deployment check wasn't successful",
"DeploymentAlreadyStarted": "Deployment already started",
"DeletionAlreadyStarted": "Environment removal already started",
"FailedProvisioning": "Failed to start provisioning",
"WrongNodeStatus": "Wrong node status",
"NodeOffline": "Node is offline",
"NotEnoughControllers": "Not enough controllers",
"RedHatSetupError": "Red Hat setup error",
# disk errors
"NotEnoughFreeSpace": "Not enough free space",
"CannotFindVolumesInfoForRole": "Cannot find volumes info for role",
# network errors
"AdminNetworkNotFound": "Admin network info not found",
"InvalidNetworkAccess": "Invalid network access",
"AssignIPError": "Failed to assign IP to node",
"NetworkCheckError": "Network checking failed",
"OutOfVLANs": "Not enough available VLAN IDs",
"OutOfIPs": "Not enough free IP addresses in pool",
"NoSuitableCIDR": "Cannot find suitable CIDR",
"CanNotFindInterface": "Cannot find interface",
"CanNotDetermineEndPointIP": "Cannot determine end point IP",
"CanNotFindNetworkForNode": "Cannot find network for node",
# plugin errors
"PluginDownloading": "Cannot install plugin",
"PluginInitialization": "Cannot initialize plugin",
# unknown
"UnknownError": "Unknown error"
}
class ErrorFactory(object):
def __init__(self):
for name, msg in default_messages.iteritems():
setattr(self, name, self._build_exc(name, msg))
def _build_exc(self, name, msg):
return type(
name,
(NailgunException,),
{
"message": msg
}
)
def __getattr__(self, name):
return self._build_exc(name, default_messages["UnknownError"])
errors = ErrorFactory()

View File

@ -1,66 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import web
from nailgun.logger import logger
class NailgunException(Exception):
def __init__(self,
message="",
log_traceback=False,
log_message=False,
log_level='warning',
add_client=True,
notify_user=False):
self.log_traceback = log_traceback
self.log_message = log_message
self.notify_user = notify_user
if message:
self.message = message
if add_client:
client = self._get_client()
if client:
self.message = "[{0}] ".format(
client
) + self.message
if self.log_message:
getattr(logger, log_level)(self.message)
super(NailgunException, self).__init__()
def _get_client(self):
"""web.ctx.env is a thread-local object,
this hack is for getting client IP to add it
inside error message
"""
if not hasattr(web.ctx, "env"):
return None
if 'HTTP_X_REAL_IP' in web.ctx.env:
return web.ctx.env['HTTP_X_REAL_IP']
elif 'REMOTE_ADDR' in web.ctx.env:
return web.ctx.env['REMOTE_ADDR']
else:
return None
def __str__(self):
return self.message
def __unicode__(self):
return self.message

View File

@ -1,13 +0,0 @@
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -1,40 +0,0 @@
[
{
"pk": 1,
"model": "nailgun.network_group",
"fields": {
"name": "fuelweb_admin",
"access": "admin",
"cidr": "{{settings.ADMIN_NETWORK['cidr']}}",
"netmask": "{{settings.ADMIN_NETWORK['netmask']}}",
"network_size": "{{settings.ADMIN_NETWORK['size']}}",
"vlan_start": "1"
}
},
{
"pk": 1,
"model": "nailgun.vlan",
"fields": {
}
},
{
"pk": 1,
"model": "nailgun.network",
"fields": {
"name": "fuelweb_admin",
"access": "admin",
"network_group": 1,
"cidr": "{{settings.ADMIN_NETWORK['cidr']}}",
"vlan": 1
}
},
{
"pk": 1,
"model": "nailgun.i_p_addr_range",
"fields": {
"network_group": 1,
"first": "{{settings.ADMIN_NETWORK['first']}}",
"last": "{{settings.ADMIN_NETWORK['last']}}"
}
}
]

View File

@ -1,203 +0,0 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import itertools
import jinja2
import json
import os.path
import Queue
import StringIO
import sys
from sqlalchemy import orm
import sqlalchemy.types
from nailgun.api import models
from nailgun.db import db as ormgen
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun.settings import settings
db = ormgen()
def capitalize_model_name(model_name):
return ''.join(map(lambda s: s.capitalize(), model_name.split('_')))
def template_fixture(fileobj, config=None):
if not config:
config = settings
t = jinja2.Template(fileobj.read())
return StringIO.StringIO(t.render(settings=config))
def upload_fixture(fileobj):
db.expunge_all()
fixture = json.load(template_fixture(fileobj))
queue = Queue.Queue()
keys = {}
for obj in fixture:
pk = obj["pk"]
model_name = obj["model"].split(".")[1]
try:
itertools.dropwhile(
lambda m: not hasattr(models, m),
[model_name.capitalize(),
"".join(map(lambda n: n.capitalize(), model_name.split("_")))]
).next()
except StopIteration:
raise Exception("Couldn't find model {0}".format(model_name))
obj['model'] = getattr(models, capitalize_model_name(model_name))
keys[obj['model'].__tablename__] = {}
# Check if it's already uploaded
obj_from_db = db.query(obj['model']).get(pk)
if obj_from_db:
logger.info("Fixture model '%s' with pk='%s' already"
" uploaded. Skipping", model_name, pk)
continue
queue.put(obj)
pending_objects = []
while True:
try:
obj = queue.get_nowait()
except Exception:
break
new_obj = obj['model']()
fk_fields = {}
for field, value in obj["fields"].iteritems():
f = getattr(obj['model'], field)
impl = getattr(f, 'impl', None)
fk_model = None
try:
if hasattr(f.comparator.prop, "argument"):
if hasattr(f.comparator.prop.argument, "__call__"):
fk_model = f.comparator.prop.argument()
else:
fk_model = f.comparator.prop.argument.class_
except AttributeError:
pass
if fk_model:
if value not in keys[fk_model.__tablename__]:
if obj not in pending_objects:
queue.put(obj)
pending_objects.append(obj)
continue
else:
logger.error(
u"Can't resolve foreign key "
"'{0}' for object '{1}'".format(
field,
obj["model"]
)
)
break
else:
value = keys[fk_model.__tablename__][value].id
if isinstance(impl, orm.attributes.ScalarObjectAttributeImpl):
if value:
fk_fields[field] = (value, fk_model)
elif isinstance(impl, orm.attributes.CollectionAttributeImpl):
if value:
fk_fields[field] = (value, fk_model)
elif hasattr(f, 'property') and isinstance(
f.property.columns[0].type, sqlalchemy.types.DateTime
):
if value:
setattr(
new_obj,
field,
datetime.strptime(value, "%d-%m-%Y %H:%M:%S")
)
else:
setattr(
new_obj,
field,
datetime.now()
)
else:
setattr(new_obj, field, value)
for field, data in fk_fields.iteritems():
if isinstance(data[0], int):
setattr(new_obj, field, db.query(data[1]).get(data[0]))
elif isinstance(data[0], list):
for v in data[0]:
getattr(new_obj, field).append(
db.query(data[1]).get(v)
)
db.add(new_obj)
db.commit()
keys[obj['model'].__tablename__][obj["pk"]] = new_obj
# UGLY HACK for testing
if new_obj.__class__.__name__ == 'Node':
new_obj.attributes = models.NodeAttributes()
db.commit()
new_obj.attributes.volumes = \
new_obj.volume_manager.gen_volumes_info()
network_manager = NetworkManager()
network_manager.update_interfaces_info(new_obj.id)
db.commit()
def upload_fixtures():
fns = []
for path in settings.FIXTURES_TO_UPLOAD:
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
path))
fns.append(path)
for fn in fns:
with open(fn, "r") as fileobj:
upload_fixture(fileobj)
logger.info("Fixture has been uploaded from file: %s" % fn)
def dump_fixture(model_name):
dump = []
app_name = 'nailgun'
model = getattr(models, capitalize_model_name(model_name))
for obj in db.query(model).all():
obj_dump = {}
obj_dump['pk'] = getattr(obj, model.__mapper__.primary_key[0].name)
obj_dump['model'] = "%s.%s" % (app_name, model_name)
obj_dump['fields'] = {}
dump.append(obj_dump)
for prop in model.__mapper__.iterate_properties:
if isinstance(prop, sqlalchemy.orm.ColumnProperty):
field = str(prop.key)
value = getattr(obj, field)
if value is None:
continue
if not isinstance(value, (
list, dict, str, unicode, int, float, bool)):
value = ""
obj_dump['fields'][field] = value
sys.stdout.write(json.dumps(dump, indent=4))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +0,0 @@
[
{
"pk": 1,
"model": "nailgun.notification",
"fields": {
"topic": "done",
"datetime": "",
"message": "Master node installation has been completed successfully. Now you can boot new nodes over PXE, they will be discovered and become available for installing OpenStack on them"
}
}
]

Some files were not shown because too many files have changed in this diff Show More