[attribute_generation] All in one commit
This commit is contained in:
parent
0461386615
commit
21e27f3ba8
|
@ -1,39 +1,143 @@
|
|||
#!/usr/bin/env ruby
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2012, Mirantis
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import logging
|
||||
import pprint
|
||||
import sys
|
||||
|
||||
require 'rubygems'
|
||||
require 'json'
|
||||
require 'httpclient'
|
||||
import httplib
|
||||
import argparse
|
||||
|
||||
if ARGV[0].nil?
|
||||
puts "Provide path to release file for uploading"
|
||||
puts "You can also provide Nailgun app endpoint as a second argument, if it's not localhost(which is by default)"
|
||||
exit 1
|
||||
end
|
||||
|
||||
data = JSON.parse(File.read(File.expand_path(ARGV[0])))
|
||||
console = logging.StreamHandler()
|
||||
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
|
||||
console.setFormatter(formatter)
|
||||
logger = logging.getLogger()
|
||||
logger.setLevel(logging.DEBUG)
|
||||
logger.addHandler(console)
|
||||
|
||||
url = "http://127.0.0.1:8000/api/releases"
|
||||
if not ARGV[1].nil?
|
||||
url = "#{ARGV[1]}/api/releases"
|
||||
end
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-n", "--dry-run", dest="dry_run",
|
||||
action='store_true', default=False,
|
||||
help="Do not really create release")
|
||||
parser.add_argument("-H", "--host", dest="host",
|
||||
action='store', type=str, default="127.0.0.1",
|
||||
help="API host")
|
||||
parser.add_argument("-P", "--port", dest="port",
|
||||
action='store', type=int, default=8000,
|
||||
help="API port")
|
||||
parser.add_argument("-f", "--file", dest="release_file",
|
||||
action='store', type=str, required=True,
|
||||
help="Release file")
|
||||
params = parser.parse_args()
|
||||
|
||||
puts "Using #{url} url for relase uploading..."
|
||||
if not params.release_file:
|
||||
parser.error("Release file is not set. Use -f option.")
|
||||
|
||||
headers = {"Content-Type" => "application/json"}
|
||||
|
||||
cli = HTTPClient.new
|
||||
begin
|
||||
res = cli.post(url, data.to_json, headers)
|
||||
rescue Exception => e
|
||||
puts "Unknown error: #{e.message}"
|
||||
end
|
||||
if res.status == 200
|
||||
puts "Release '#{data['name']}' has been successfully created."
|
||||
elsif res.status == 409
|
||||
puts "Release '#{data['name']}' already exists."
|
||||
elsif res.status < 200 or res.status >= 300
|
||||
puts "Error in creating release: #{res.inspect}"
|
||||
exit 1
|
||||
end
|
||||
def main():
|
||||
|
||||
logger.info("=== Creating release ===")
|
||||
with open(params.release_file, "r") as f:
|
||||
logger.debug("Trying to parse release file")
|
||||
data = json.load(f)
|
||||
|
||||
httpconn = httplib.HTTPConnection(host=params.host, port=params.port)
|
||||
httpheaders = {"Content-Type": "application/json"}
|
||||
|
||||
httpdata = {}
|
||||
for field in ('name', 'version', 'description', 'networks_metadata'):
|
||||
httpdata[field] = data[field]
|
||||
|
||||
logger.info("Request url: %s, data: %s" % \
|
||||
('/api/releases', json.dumps(httpdata)))
|
||||
|
||||
if params.dry_run:
|
||||
release_dict = {'id': '1'}
|
||||
else:
|
||||
logger.debug("Sending request")
|
||||
try:
|
||||
httpconn.request(method="POST",
|
||||
url="/api/releases",
|
||||
body=json.dumps(httpdata),
|
||||
headers=httpheaders)
|
||||
except Exception as e:
|
||||
logger.error("Error: %s" % str(e))
|
||||
sys.exit(1)
|
||||
else:
|
||||
response = httpconn.getresponse()
|
||||
response_body = response.read()
|
||||
logger.debug("Response status: %s" % response.status)
|
||||
logger.debug("Response body: %s" % response_body)
|
||||
|
||||
if response.status == 200:
|
||||
logger.info("Release '%s' has been successfully added" % \
|
||||
httpdata['name'])
|
||||
release_dict = json.loads(response_body)
|
||||
logger.info("Release id: %s" % release_dict['id'])
|
||||
elif response.status == 409:
|
||||
logger.error("Release '%s' already exists" % httpdata['name'])
|
||||
sys.exit(0)
|
||||
elif response.status < 200 and response.status >= 300:
|
||||
logger.error("Error in creating release: %s" % response.read())
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def add_item(httpconn, handler_url, release,
|
||||
item_collection, item_fields):
|
||||
|
||||
for item in item_collection:
|
||||
httpdata = {}
|
||||
for field in item_fields:
|
||||
httpdata[field] = item[field]
|
||||
|
||||
if params.dry_run:
|
||||
httpdata['release'] = '1'
|
||||
else:
|
||||
httpdata['release'] = release
|
||||
|
||||
logger.debug("Request: url: %s, data: %s" % \
|
||||
(handler_url, json.dumps(httpdata)))
|
||||
|
||||
if not params.dry_run:
|
||||
logger.debug("Sending request")
|
||||
try:
|
||||
httpconn.request(method="POST",
|
||||
url=handler_url,
|
||||
body=json.dumps(httpdata),
|
||||
headers=httpheaders)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Error: %s" % str(e))
|
||||
raise e
|
||||
else:
|
||||
response = httpconn.getresponse()
|
||||
response_body = response.read()
|
||||
logger.debug("Response status: %s" % response.status)
|
||||
logger.debug("Response body: %s" % response_body)
|
||||
|
||||
|
||||
add_item(
|
||||
httpconn, '/api/points', release_dict['id'],
|
||||
data['points'],
|
||||
('name', 'scheme')
|
||||
)
|
||||
|
||||
add_item(
|
||||
httpconn, '/api/coms', release_dict['id'],
|
||||
data['components'],
|
||||
('name', 'deploy', 'provides', 'requires')
|
||||
)
|
||||
|
||||
add_item(
|
||||
httpconn, '/api/roles', release_dict['id'],
|
||||
data['roles'],
|
||||
('name', 'components')
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
|
53
bin/deploy
53
bin/deploy
|
@ -8,10 +8,9 @@ require 'rubygems'
|
|||
require 'ohai'
|
||||
require 'json'
|
||||
|
||||
APIURL = "http://127.0.0.1:8000"
|
||||
URL = "http://127.0.0.1"
|
||||
COOKS_DIR = "/tmp/cookbooks"
|
||||
DATABAG_DIR = "/tmp/databags"
|
||||
DATABAG_NAME = "nodes.tar.gz"
|
||||
|
||||
ohai = Ohai::System.new
|
||||
ohai.require_plugin("network")
|
||||
|
@ -19,34 +18,52 @@ ohai.require_plugin("linux/network")
|
|||
|
||||
ohai_data = ohai.data
|
||||
|
||||
if ARGV[0]
|
||||
component = ARGV[0]
|
||||
else
|
||||
puts "Component name is not set."
|
||||
exit 1
|
||||
end
|
||||
|
||||
host_id = ohai.data["macaddress"].gsub(':', '')
|
||||
|
||||
# Downloading host-specific json
|
||||
solo_json_file = "#{host_id}.json"
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/#{solo_json_file}")
|
||||
system("wget -N -T 7 --tries=7 -a wget.log -q -O #{solo_json_file} #{APIURL}/api/endpoints/#{host_id}/#{component}")
|
||||
|
||||
solo_json = JSON.parse(File.read(solo_json_file))
|
||||
|
||||
cooks = solo_json["run_list"].map do |recipe|
|
||||
cook_ver = recipe.match(/recipe\[(\S+)::\S+@(\S+)\]/)
|
||||
cook, version = cook_ver[1], cook_ver[2]
|
||||
|
||||
{cook => version}
|
||||
end.uniq
|
||||
|
||||
Dir.mkdir(COOKS_DIR) unless File.exists?(COOKS_DIR)
|
||||
Dir.mkdir(DATABAG_DIR) unless File.exists?(DATABAG_DIR)
|
||||
cluster_id = solo_json["cluster_id"]
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/cluster_#{cluster_id}/#{DATABAG_NAME}")
|
||||
system("tar xzf \"#{DATABAG_NAME}\" -C #{DATABAG_DIR}")
|
||||
|
||||
if not solo_json["cooks"]
|
||||
|
||||
cooks = solo_json["run_list"].map do |recipe|
|
||||
cook_ver = recipe.match(/recipe\[(\S+)::\S+@(\S+)\]/)
|
||||
cook, version = cook_ver[1], cook_ver[2]
|
||||
{cook => version}
|
||||
end.uniq
|
||||
|
||||
cooks.each do |cb|
|
||||
filename = "#{cb.keys[0]}_#{cb.values[0]}.tar.gz"
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/#{filename}")
|
||||
system("tar xzf \"#{filename}\" -C #{COOKS_DIR}")
|
||||
end
|
||||
|
||||
else
|
||||
|
||||
solo_json["cooks"].each do |cook|
|
||||
cook_name = cook["name"]
|
||||
cook_version = cook["version"]
|
||||
filename = "#{cook_name}_#{cook_version}.tar.gz"
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/#{filename}")
|
||||
system("tar xzf \"#{filename}\" -C #{COOKS_DIR}")
|
||||
end
|
||||
|
||||
cooks.each do |cb|
|
||||
filename = "#{cb.keys[0]}_#{cb.values[0]}.tar.gz"
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/#{filename}")
|
||||
system("tar xzf \"#{filename}\" -C #{COOKS_DIR}")
|
||||
end
|
||||
|
||||
Dir.mkdir("/var/log/chef") unless File.exists?("/var/log/chef")
|
||||
Dir.mkdir("/tmp/chef") unless File.exists?("/tmp/chef")
|
||||
|
||||
solo_rb = %{
|
||||
log_location "/var/log/chef/solo.log"
|
||||
file_cache_path "/tmp/chef"
|
||||
|
|
|
@ -17,63 +17,8 @@ def create_cookbook(metafile)
|
|||
md = Chef::Cookbook::Metadata.new
|
||||
md.from_file(metafile)
|
||||
|
||||
attr_id = create_attributes(metafile, md)
|
||||
create_recipes(metafile, md, attr_id)
|
||||
end
|
||||
|
||||
def create_attributes(metafile, md)
|
||||
cook_name = md.name.empty? ? File.basename(File.dirname(metafile)) : md.name
|
||||
|
||||
attrs_dir = File.join(File.dirname(metafile), "attributes")
|
||||
attr_file = File.join(attrs_dir, "exposed.rb")
|
||||
unless File.exists?(attr_file) and File.readable?(attr_file)
|
||||
puts "File '#{attr_file}' not found."
|
||||
return nil
|
||||
end
|
||||
node = Chef::Node.new
|
||||
|
||||
attrs_url = "#{ADMIN_URL}/attributes"
|
||||
puts "Processing '#{attr_file}' attributes file..."
|
||||
node.from_file(attr_file)
|
||||
attr_hash = {'attribute' => node.attribute.to_hash,
|
||||
'cookbook' => cook_name,
|
||||
'version' => md.version,
|
||||
}
|
||||
attr_id = http_put(attrs_url, attr_hash.to_json)
|
||||
return attr_id
|
||||
end
|
||||
|
||||
def create_recipes(metafile, md, attr_id)
|
||||
cook_name = md.name.empty? ? File.basename(File.dirname(metafile)) : md.name
|
||||
|
||||
recipes_dir = File.join(File.dirname(metafile), "recipes")
|
||||
unless File.exists?(recipes_dir) and File.readable?(recipes_dir)
|
||||
puts "Recipes folder not found."
|
||||
return true
|
||||
end
|
||||
Dir.chdir(recipes_dir)
|
||||
recipes = Dir.glob("*.rb")
|
||||
|
||||
recipes_tree_url = "#{ADMIN_URL}/recipes"
|
||||
dep_reg = /^# depends: "(\S+)"$/
|
||||
|
||||
recipe_tree = []
|
||||
recipes.each do |recipe|
|
||||
depends = []
|
||||
File.readlines(recipe).each { |line| depends << $1 if dep_reg =~ line }
|
||||
recipe_name = File.basename(recipe, '.rb')
|
||||
recipe_full = "#{cook_name}::#{recipe_name}@#{md.version}"
|
||||
recipe_tree << {"recipe" => recipe_full, "depends" => depends, "attribute" => attr_id}
|
||||
end
|
||||
|
||||
res = http_put(recipes_tree_url, recipe_tree.to_json)
|
||||
if res
|
||||
puts "Recipes for cookbook '#{cook_name}' created successfully"
|
||||
else
|
||||
puts "Skipping cookbook '#{cook_name}' from tar creation because of errors."
|
||||
return false
|
||||
end
|
||||
|
||||
begin
|
||||
Dir.mktmpdir do |dir|
|
||||
cook_dir = File.join(dir, cook_name)
|
||||
|
@ -88,23 +33,6 @@ def create_recipes(metafile, md, attr_id)
|
|||
puts e.backtrace
|
||||
return false
|
||||
end
|
||||
return true
|
||||
end
|
||||
|
||||
def http_put(url, data_json)
|
||||
headers = {"Content-Type" => "application/json"}
|
||||
cli = HTTPClient.new
|
||||
begin
|
||||
res = cli.put(url, data_json, headers)
|
||||
rescue Exception => e
|
||||
puts "Unknown error: #{e.message}"
|
||||
return nil
|
||||
end
|
||||
if res.status < 200 or res.status >= 300
|
||||
puts "Error received from server: #{res.inspect}"
|
||||
return nil
|
||||
end
|
||||
return res.content
|
||||
end
|
||||
|
||||
### MAIN ###
|
||||
|
|
|
@ -57,7 +57,8 @@ template "#{node.nailgun.root}/nailgun/extrasettings.py" do
|
|||
:cobbler_address => "localhost",
|
||||
:cobbler_user => node.cobbler.user,
|
||||
:cobbler_password => node.cobbler.password,
|
||||
:cobbler_profile => "centos-6.2-x86_64"
|
||||
:cobbler_profile => "centos-6.2-x86_64",
|
||||
:repo_address => node.cobbler.repoaddr
|
||||
)
|
||||
end
|
||||
|
||||
|
|
|
@ -11,7 +11,8 @@ releases = Dir.glob("#{node[:nailgun][:root]}/os-cookbooks/releases/*.json")
|
|||
releases.each do |rls|
|
||||
bash "Bash script for release creation #{rls}" do
|
||||
code <<-EOH
|
||||
#{node[:nailgun][:root]}/bin/create_release "#{rls}"
|
||||
#{node[:nailgun][:root]}/bin/create_release -f "#{rls}"
|
||||
EOH
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -8,10 +8,9 @@ require 'rubygems'
|
|||
require 'ohai'
|
||||
require 'json'
|
||||
|
||||
APIURL = "http://<%= node.cobbler.repoaddr %>:8000"
|
||||
URL = "http://<%= node.cobbler.repoaddr %>"
|
||||
COOKS_DIR = "/tmp/cookbooks"
|
||||
DATABAG_DIR = "/tmp/databags"
|
||||
DATABAG_NAME = "nodes.tar.gz"
|
||||
|
||||
ohai = Ohai::System.new
|
||||
ohai.require_plugin("network")
|
||||
|
@ -19,36 +18,60 @@ ohai.require_plugin("linux/network")
|
|||
|
||||
ohai_data = ohai.data
|
||||
|
||||
if ARGV[0]
|
||||
component = ARGV[0]
|
||||
else
|
||||
puts "Component name is not set."
|
||||
exit 1
|
||||
end
|
||||
|
||||
host_id = ohai.data["macaddress"].gsub(':', '')
|
||||
|
||||
# Downloading host-specific json
|
||||
solo_json_file = "#{host_id}.json"
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/#{solo_json_file}")
|
||||
system("wget -N -T 7 --tries=7 -a wget.log -q -O #{solo_json_file} #{APIURL}/api/endpoints/#{host_id}/#{component}")
|
||||
|
||||
solo_json = JSON.parse(File.read(solo_json_file))
|
||||
|
||||
cooks = solo_json["run_list"].map do |recipe|
|
||||
cook_ver = recipe.match(/recipe\[(\S+)::\S+@(\S+)\]/)
|
||||
cook, version = cook_ver[1], cook_ver[2]
|
||||
|
||||
{cook => version}
|
||||
end.uniq
|
||||
|
||||
Dir.mkdir(COOKS_DIR) unless File.exists?(COOKS_DIR)
|
||||
Dir.mkdir(DATABAG_DIR) unless File.exists?(DATABAG_DIR)
|
||||
cluster_id = solo_json["cluster_id"]
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/cluster_#{cluster_id}/#{DATABAG_NAME}")
|
||||
system("tar xzf \"#{DATABAG_NAME}\" -C #{DATABAG_DIR}")
|
||||
|
||||
if not solo_json["cooks"]
|
||||
|
||||
cooks = solo_json["run_list"].map do |recipe|
|
||||
cook_ver = recipe.match(/recipe\[(\S+)::\S+@(\S+)\]/)
|
||||
cook, version = cook_ver[1], cook_ver[2]
|
||||
{cook => version}
|
||||
end.uniq
|
||||
|
||||
cooks.each do |cb|
|
||||
filename = "#{cb.keys[0]}_#{cb.values[0]}.tar.gz"
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/#{filename}")
|
||||
system("tar xzf \"#{filename}\" -C #{COOKS_DIR}")
|
||||
end
|
||||
|
||||
else
|
||||
|
||||
solo_json["cooks"].each do |cook|
|
||||
cook_name = cook["name"]
|
||||
cook_version = cook["version"]
|
||||
filename = "#{cook_name}_#{cook_version}.tar.gz"
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/#{filename}")
|
||||
system("tar xzf \"#{filename}\" -C #{COOKS_DIR}")
|
||||
end
|
||||
|
||||
cooks.each do |cb|
|
||||
filename = "#{cb.keys[0]}_#{cb.values[0]}.tar.gz"
|
||||
system("wget -N -T 7 --tries=7 -a wget.log #{URL}/#{filename}")
|
||||
system("tar xzf \"#{filename}\" -C #{COOKS_DIR}")
|
||||
end
|
||||
|
||||
Dir.mkdir("/var/log/chef") unless File.exists?("/var/log/chef")
|
||||
Dir.mkdir("/tmp/chef") unless File.exists?("/tmp/chef")
|
||||
|
||||
solo_rb = %{
|
||||
log_location "/var/log/solo.log"
|
||||
log_location "/var/log/chef/solo.log"
|
||||
file_cache_path "/tmp/chef"
|
||||
cookbook_path "#{COOKS_DIR}"
|
||||
log_level :debug
|
||||
|
|
|
@ -15,3 +15,5 @@ COBBLER_URL = "http://<%= @cobbler_address %>/cobbler_api"
|
|||
COBBLER_USER = "<%= @cobbler_user %>"
|
||||
COBBLER_PASSWORD = "<%= @cobbler_password %>"
|
||||
COBBLER_PROFILE = "<%= @cobbler_profile %>"
|
||||
|
||||
REPO_ADDRESS = "<%= @repo_address %>"
|
2
cooks
2
cooks
|
@ -1 +1 @@
|
|||
Subproject commit 2b629048af3b7c2911cb4b9183eeb4aa492e718f
|
||||
Subproject commit 533def6771db5957e76c9298213b4148c2e7fdb2
|
|
@ -1,20 +1,3 @@
|
|||
import re
|
||||
from django.db import models
|
||||
from django import forms
|
||||
|
||||
|
||||
class RecipeField(models.CharField):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RecipeField, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class RecipeListFormField(forms.CharField):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(RecipeListFormField, self).__init__(*args, **kwargs)
|
||||
|
||||
def to_python(self, value):
|
||||
if not value:
|
||||
return ()
|
||||
return tuple(re.split(r'@|::', value))
|
||||
|
|
|
@ -7,49 +7,87 @@ from django.forms.fields import Field, IntegerField, CharField, ChoiceField, \
|
|||
BooleanField
|
||||
from django.core.validators import RegexValidator
|
||||
|
||||
from nailgun.models import Cluster, Node, Recipe, Role, Release, Network, \
|
||||
Attribute
|
||||
from nailgun.models import Cluster
|
||||
from nailgun.models import Node
|
||||
from nailgun.models import Role
|
||||
from nailgun.models import Release
|
||||
from nailgun.models import Network
|
||||
from nailgun.models import Point
|
||||
from nailgun.models import Com
|
||||
|
||||
import nailgun.api.validators as vld
|
||||
|
||||
|
||||
class RecipeForm(forms.ModelForm):
|
||||
depends = Field(required=False)
|
||||
|
||||
class Meta:
|
||||
model = Recipe
|
||||
|
||||
def clean(self):
|
||||
return self.cleaned_data
|
||||
|
||||
def clean_depends(self):
|
||||
for depend in self.cleaned_data['depends']:
|
||||
vld.validate_recipe(depend)
|
||||
return self.cleaned_data['depends']
|
||||
|
||||
def clean_attribute(self):
|
||||
return self.cleaned_data['attribute']
|
||||
|
||||
def clean_recipe(self):
|
||||
vld.validate_recipe(self.cleaned_data['recipe'])
|
||||
return self.cleaned_data['recipe']
|
||||
import logging
|
||||
|
||||
|
||||
class RoleForm(forms.ModelForm):
|
||||
recipes = Field(validators=[vld.validate_role_recipes])
|
||||
logger = logging.getLogger('forms')
|
||||
|
||||
|
||||
class RoleFilterForm(forms.Form):
|
||||
node_id = Field(required=False, validators=[vld.validate_node_id])
|
||||
release_id = Field(required=False, validators=[])
|
||||
|
||||
|
||||
class RoleCreateForm(forms.ModelForm):
|
||||
components = Field(validators=[], required=False)
|
||||
|
||||
def clean_components(self):
|
||||
|
||||
return [c.name for c in Com.objects.filter(
|
||||
name__in=self.data['components'],
|
||||
release=Release.objects.get(id=self.data['release'])
|
||||
)]
|
||||
|
||||
class Meta:
|
||||
model = Role
|
||||
|
||||
|
||||
class AttributeForm(forms.ModelForm):
|
||||
attribute = Field(validators=[vld.validate_attribute])
|
||||
class PointFilterForm(forms.Form):
|
||||
release = IntegerField(required=False)
|
||||
|
||||
|
||||
class PointUpdateForm(forms.ModelForm):
|
||||
scheme = Field(validators=[])
|
||||
|
||||
class Meta:
|
||||
model = Attribute
|
||||
model = Point
|
||||
exclude = ('name', 'release', 'provided_by', 'required_by')
|
||||
|
||||
|
||||
class RoleFilterForm(forms.Form):
|
||||
node_id = Field(required=False, validators=[vld.validate_node_id])
|
||||
class PointCreateForm(forms.ModelForm):
|
||||
scheme = Field(required=False, validators=[])
|
||||
|
||||
class Meta:
|
||||
model = Point
|
||||
exclude = ('provided_by', 'required_by')
|
||||
|
||||
|
||||
class ComFilterForm(forms.Form):
|
||||
release = IntegerField(required=False)
|
||||
|
||||
|
||||
class ComCreateForm(forms.ModelForm):
|
||||
deploy = Field(validators=[])
|
||||
requires = Field(validators=[], required=False)
|
||||
provides = Field(validators=[], required=False)
|
||||
|
||||
def clean_requires(self):
|
||||
|
||||
return [p.name for p in Point.objects.filter(
|
||||
name__in=self.data['requires'],
|
||||
release=Release.objects.get(id=self.data['release'])
|
||||
)]
|
||||
|
||||
def clean_provides(self):
|
||||
|
||||
return [p.name for p in Point.objects.filter(
|
||||
name__in=self.data['provides'],
|
||||
release=Release.objects.get(id=self.data['release'])
|
||||
)]
|
||||
|
||||
class Meta:
|
||||
model = Com
|
||||
exclude = ('roles')
|
||||
|
||||
|
||||
class ClusterForm(forms.Form):
|
||||
|
@ -90,7 +128,6 @@ class NodeFilterForm(forms.Form):
|
|||
|
||||
|
||||
class ReleaseCreationForm(forms.ModelForm):
|
||||
roles = Field(validators=[vld.validate_release_node_roles])
|
||||
networks_metadata = Field(validators=[vld.validate_networks_metadata])
|
||||
|
||||
class Meta:
|
||||
|
|
|
@ -1,23 +1,54 @@
|
|||
import os
|
||||
|
||||
import copy
|
||||
import re
|
||||
import celery
|
||||
import ipaddr
|
||||
import json
|
||||
|
||||
from piston.handler import BaseHandler, HandlerMetaClass
|
||||
from piston.utils import rc, validate
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from django.conf import settings
|
||||
from django.db import models
|
||||
|
||||
from nailgun.models import Cluster, Node, Recipe, Role, Release, Network, \
|
||||
Attribute, Task
|
||||
from nailgun.models import Cluster
|
||||
from nailgun.models import Release
|
||||
from nailgun.models import Role
|
||||
from nailgun.models import Com
|
||||
from nailgun.models import Point
|
||||
from nailgun.models import EndPoint
|
||||
from nailgun.models import Network
|
||||
from nailgun.models import Node
|
||||
from nailgun.models import Task
|
||||
|
||||
from nailgun.deployment_types import deployment_types
|
||||
from nailgun.api.validators import validate_json, validate_json_list
|
||||
from nailgun.api.forms import ClusterForm, ClusterCreationForm, RecipeForm, \
|
||||
RoleForm, RoleFilterForm, NodeCreationForm, NodeFilterForm, NodeForm, \
|
||||
ReleaseCreationForm, NetworkCreationForm, AttributeForm
|
||||
from nailgun.api.forms import ClusterForm
|
||||
from nailgun.api.forms import ClusterCreationForm
|
||||
from nailgun.api.forms import RoleFilterForm
|
||||
from nailgun.api.forms import RoleCreateForm
|
||||
from nailgun.api.forms import PointFilterForm
|
||||
from nailgun.api.forms import PointUpdateForm
|
||||
from nailgun.api.forms import PointCreateForm
|
||||
from nailgun.api.forms import ComFilterForm
|
||||
from nailgun.api.forms import ComCreateForm
|
||||
from nailgun.api.forms import NodeCreationForm
|
||||
from nailgun.api.forms import NodeFilterForm
|
||||
from nailgun.api.forms import NodeForm
|
||||
from nailgun.api.forms import ReleaseCreationForm
|
||||
from nailgun.api.forms import NetworkCreationForm
|
||||
|
||||
from nailgun import tasks
|
||||
import nailgun.api.validators as vld
|
||||
|
||||
from nailgun.helpers import DeployManager
|
||||
from nailgun.helpers import DeployDriver
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
handlers = {}
|
||||
|
||||
|
@ -44,25 +75,53 @@ class JSONHandler(BaseHandler):
|
|||
def render(cls, item, fields=None):
|
||||
json_data = {}
|
||||
use_fields = fields if fields else cls.fields
|
||||
|
||||
if not use_fields:
|
||||
raise ValueError("No fields for serialize")
|
||||
for field in use_fields:
|
||||
value = getattr(item, field)
|
||||
if value is None:
|
||||
pass
|
||||
elif value.__class__.__name__ in ('ManyRelatedManager',
|
||||
'RelatedManager'):
|
||||
try:
|
||||
handler = handlers[value.model.__name__]
|
||||
json_data[field] = map(handler.render, value.all())
|
||||
except KeyError:
|
||||
raise Exception("No handler for %s" % \
|
||||
value.model.__name__)
|
||||
elif value.__class__.__name__ in handlers:
|
||||
handler = handlers[value.__class__.__name__]
|
||||
json_data[field] = handler.render(value)
|
||||
if isinstance(field, (tuple,)):
|
||||
|
||||
logger.debug("rendering: field is a tuple: %s" % str(field))
|
||||
if field[1] == '*':
|
||||
subfields = None
|
||||
else:
|
||||
subfields = field[1:]
|
||||
|
||||
value = getattr(item, field[0])
|
||||
if value is None:
|
||||
pass
|
||||
elif value.__class__.__name__ in ('ManyRelatedManager',
|
||||
'RelatedManager'):
|
||||
try:
|
||||
handler = handlers[value.model.__name__]
|
||||
json_data[field[0]] = [
|
||||
handler.render(o, fields=subfields) \
|
||||
for o in value.all()]
|
||||
except KeyError:
|
||||
raise Exception("No handler for %s" % \
|
||||
value.model.__name__)
|
||||
|
||||
elif value.__class__.__name__ in handlers:
|
||||
handler = handlers[value.__class__.__name__]
|
||||
json_data[field[0]] = handler.render(value,
|
||||
fields=subfields)
|
||||
else:
|
||||
json_data[field[0]] = value.id
|
||||
|
||||
else:
|
||||
json_data[field] = value
|
||||
value = getattr(item, field)
|
||||
|
||||
if value is None:
|
||||
pass
|
||||
elif value.__class__.__name__ in ('ManyRelatedManager',
|
||||
'RelatedManager',):
|
||||
json_data[field] = [getattr(o, 'id') \
|
||||
for o in value.all()]
|
||||
elif value.__class__.__name__ in handlers:
|
||||
json_data[field] = value.id
|
||||
else:
|
||||
json_data[field] = value
|
||||
|
||||
return json_data
|
||||
|
||||
|
||||
|
@ -72,7 +131,7 @@ class TaskHandler(JSONHandler):
|
|||
model = Task
|
||||
|
||||
@classmethod
|
||||
def render(cls, task):
|
||||
def render(cls, task, fields=None):
|
||||
result = {
|
||||
'task_id': task.pk,
|
||||
'name': task.name,
|
||||
|
@ -103,6 +162,7 @@ class ClusterChangesHandler(BaseHandler):
|
|||
except ObjectDoesNotExist:
|
||||
return rc.NOT_FOUND
|
||||
|
||||
logger.debug("Cluster changes: Checking if another task is running")
|
||||
if cluster.task:
|
||||
if cluster.task.ready:
|
||||
cluster.task.delete()
|
||||
|
@ -111,16 +171,25 @@ class ClusterChangesHandler(BaseHandler):
|
|||
response.content = "Another task is running"
|
||||
return response
|
||||
|
||||
logger.debug("Cluster changes: Updating node roles")
|
||||
for node in cluster.nodes.filter(redeployment_needed=True):
|
||||
node.roles = node.new_roles.all()
|
||||
node.new_roles.clear()
|
||||
node.redeployment_needed = False
|
||||
node.save()
|
||||
|
||||
logger.debug("Cluster changes: Updating node networks")
|
||||
for nw in cluster.release.networks.all():
|
||||
for node in cluster.nodes.all():
|
||||
nw.update_node_network_info(node)
|
||||
|
||||
logger.debug("Cluster changes: Trying to instantiate cluster")
|
||||
|
||||
dm = DeployManager(cluster_id)
|
||||
dm.clean_cluster()
|
||||
dm.instantiate_cluster()
|
||||
|
||||
logger.debug("Cluster changes: Trying to deploy cluster")
|
||||
task = Task(task_name='deploy_cluster', cluster=cluster)
|
||||
task.run(cluster_id)
|
||||
|
||||
|
@ -172,10 +241,37 @@ class DeploymentTypeHandler(JSONHandler):
|
|||
return {}
|
||||
|
||||
|
||||
class EndPointCollectionHandler(BaseHandler):
|
||||
allowed_methods = ('GET',)
|
||||
|
||||
def read(self, request, node_id=None, component_name=None):
|
||||
if not node_id or not component_name:
|
||||
return map(EndPointHandler.render,
|
||||
EndPoint.objects.all())
|
||||
|
||||
try:
|
||||
node = Node.objects.get(id=node_id)
|
||||
component = Com.objects.get(
|
||||
name=component_name,
|
||||
release=node.cluster.release
|
||||
)
|
||||
dd = DeployDriver(node, component)
|
||||
return dd.deploy_data()
|
||||
except:
|
||||
return rc.NOT_FOUND
|
||||
|
||||
|
||||
class EndPointHandler(JSONHandler):
|
||||
model = EndPoint
|
||||
|
||||
@classmethod
|
||||
def render(cls, endpoint):
|
||||
return endpoint.data
|
||||
|
||||
|
||||
class ClusterCollectionHandler(BaseHandler):
|
||||
|
||||
allowed_methods = ('GET', 'POST')
|
||||
model = Cluster
|
||||
|
||||
def read(self, request):
|
||||
json_data = map(
|
||||
|
@ -186,6 +282,16 @@ class ClusterCollectionHandler(BaseHandler):
|
|||
|
||||
@validate_json(ClusterCreationForm)
|
||||
def create(self, request):
|
||||
data = request.form.cleaned_data
|
||||
|
||||
try:
|
||||
cluster = Cluster.objects.get(
|
||||
name=data['name']
|
||||
)
|
||||
return rc.DUPLICATE_ENTRY
|
||||
except Cluster.DoesNotExist:
|
||||
pass
|
||||
|
||||
cluster = Cluster()
|
||||
for key, value in request.form.cleaned_data.items():
|
||||
if key in request.form.data:
|
||||
|
@ -198,9 +304,9 @@ class ClusterCollectionHandler(BaseHandler):
|
|||
vlan_ids = {
|
||||
'storage': 200,
|
||||
'public': 300,
|
||||
'floating': 300,
|
||||
'floating': 400,
|
||||
'fixed': 500,
|
||||
'management': 100
|
||||
'admin': 100
|
||||
}
|
||||
|
||||
for network in cluster.release.networks_metadata:
|
||||
|
@ -227,7 +333,7 @@ class ClusterCollectionHandler(BaseHandler):
|
|||
access=access,
|
||||
network=str(new_network),
|
||||
gateway=str(new_network[1]),
|
||||
range_l=str(new_network[2]),
|
||||
range_l=str(new_network[3]),
|
||||
range_h=str(new_network[-1]),
|
||||
vlan_id=vlan_ids[network['name']]
|
||||
)
|
||||
|
@ -246,9 +352,12 @@ class ClusterHandler(JSONHandler):
|
|||
|
||||
allowed_methods = ('GET', 'PUT', 'DELETE')
|
||||
model = Cluster
|
||||
fields = ('id', 'name', 'nodes', 'release', 'task')
|
||||
fields = ('id', 'name',
|
||||
('nodes', '*'),
|
||||
('release', '*'), 'task')
|
||||
|
||||
def read(self, request, cluster_id):
|
||||
logger.debug("Cluster reading: id: %s" % cluster_id)
|
||||
try:
|
||||
cluster = Cluster.objects.get(id=cluster_id)
|
||||
return ClusterHandler.render(cluster)
|
||||
|
@ -286,7 +395,6 @@ class ClusterHandler(JSONHandler):
|
|||
class NodeCollectionHandler(BaseHandler):
|
||||
|
||||
allowed_methods = ('GET', 'POST')
|
||||
model = Node
|
||||
|
||||
@validate(NodeFilterForm, 'GET')
|
||||
def read(self, request):
|
||||
|
@ -314,7 +422,7 @@ class NodeHandler(JSONHandler):
|
|||
model = Node
|
||||
fields = ('id', 'name', 'info', 'status', 'mac', 'fqdn', 'ip',
|
||||
'manufacturer', 'platform_name', 'redeployment_needed',
|
||||
'roles', 'new_roles', 'os_platform')
|
||||
('roles', '*'), ('new_roles', '*'), 'os_platform')
|
||||
|
||||
def read(self, request, node_id):
|
||||
try:
|
||||
|
@ -346,113 +454,158 @@ class NodeHandler(JSONHandler):
|
|||
return rc.NOT_FOUND
|
||||
|
||||
|
||||
class AttributeCollectionHandler(BaseHandler):
|
||||
class PointCollectionHandler(BaseHandler):
|
||||
|
||||
allowed_methods = ('GET', 'POST')
|
||||
|
||||
@validate(PointFilterForm, 'GET')
|
||||
def read(self, request):
|
||||
logger.debug("Getting points from data: %s" % \
|
||||
str(request.form.data))
|
||||
if 'release' in request.form.data:
|
||||
points = Point.objects.filter(
|
||||
release__id=request.form.cleaned_data['release']
|
||||
)
|
||||
else:
|
||||
points = Point.objects.all()
|
||||
return map(PointHandler.render, points)
|
||||
|
||||
@validate_json(PointCreateForm)
|
||||
def create(self, request):
|
||||
data = request.form.cleaned_data
|
||||
logger.debug("Creating Point from data: %s" % str(data))
|
||||
|
||||
try:
|
||||
point = Point.objects.get(
|
||||
name=data['name'],
|
||||
release=data['release']
|
||||
)
|
||||
return rc.DUPLICATE_ENTRY
|
||||
except Point.DoesNotExist:
|
||||
pass
|
||||
|
||||
point = Point(
|
||||
name=data['name'],
|
||||
release=data['release']
|
||||
)
|
||||
|
||||
if 'scheme' in data:
|
||||
point.scheme = data['scheme']
|
||||
else:
|
||||
point.scheme = {}
|
||||
point.save()
|
||||
|
||||
return PointHandler.render(point)
|
||||
|
||||
|
||||
class PointHandler(JSONHandler):
|
||||
|
||||
allowed_methods = ('GET', 'PUT')
|
||||
model = Point
|
||||
|
||||
def read(self, request):
|
||||
return map(AttributeHandler.render, Attribute.objects.all())
|
||||
fields = ('id', 'name', 'scheme', ('release', 'name'),
|
||||
('required_by', 'name'),
|
||||
('provided_by', 'name'))
|
||||
|
||||
@validate_json(AttributeForm)
|
||||
def update(self, request):
|
||||
data = request.form.cleaned_data
|
||||
attr, is_created = Attribute.objects.get_or_create(
|
||||
cookbook=data['cookbook'],
|
||||
version=data['version']
|
||||
)
|
||||
attr.attribute = data['attribute']
|
||||
attr.save()
|
||||
|
||||
# FIXME: it is not RESTful, handler should return full representation
|
||||
# of an attribute
|
||||
return attr.id
|
||||
|
||||
|
||||
class AttributeHandler(JSONHandler):
|
||||
|
||||
allowed_methods = ('GET',)
|
||||
model = Attribute
|
||||
|
||||
fields = ('id', 'cookbook', 'version', 'attribute')
|
||||
|
||||
def read(self, request, attribute_id):
|
||||
def read(self, request, point_id):
|
||||
try:
|
||||
return Attribute.objects.get(id=attribute_id)
|
||||
return PointHandler.render(Point.objects.get(id=point_id))
|
||||
except ObjectDoesNotExist:
|
||||
return rc.NOT_FOUND
|
||||
|
||||
@validate_json(PointUpdateForm)
|
||||
def update(self, request, point_id):
|
||||
data = request.form.cleaned_data
|
||||
logger.debug("Updating Point from data: %s" % str(data))
|
||||
|
||||
class RecipeCollectionHandler(BaseHandler):
|
||||
try:
|
||||
point = Point.objects.get(id=point_id)
|
||||
except ObjectDoesNotExist:
|
||||
return rc.NOT_FOUND
|
||||
|
||||
allowed_methods = ('GET', 'POST', 'PUT')
|
||||
if data.get('scheme', None):
|
||||
point.scheme = data['scheme']
|
||||
|
||||
point.save()
|
||||
return PointHandler.render(point)
|
||||
|
||||
|
||||
class ComCollectionHandler(BaseHandler):
|
||||
allowed_methods = ('GET', 'POST')
|
||||
|
||||
@validate(ComFilterForm, 'GET')
|
||||
def read(self, request):
|
||||
return map(RecipeHandler.render, Recipe.objects.all())
|
||||
logger.debug("Getting components from data: %s" % \
|
||||
str(request.form.data))
|
||||
if 'release' in request.form.data:
|
||||
components = Com.objects.filter(
|
||||
release__id=request.form.cleaned_data['release']
|
||||
)
|
||||
else:
|
||||
components = Com.objects.all()
|
||||
return map(ComHandler.render, components)
|
||||
|
||||
@validate_json(RecipeForm)
|
||||
@validate_json(ComCreateForm)
|
||||
def create(self, request):
|
||||
data = request.form.cleaned_data
|
||||
logger.debug("Creating Com from data: %s" % str(data))
|
||||
|
||||
try:
|
||||
release = Recipe.objects.get(
|
||||
recipe=data['recipe']
|
||||
component = Com.objects.get(
|
||||
name=data['name'],
|
||||
release=data['release']
|
||||
)
|
||||
return rc.DUPLICATE_ENTRY
|
||||
except Recipe.DoesNotExist:
|
||||
except Com.DoesNotExist:
|
||||
pass
|
||||
|
||||
recipe = Recipe(recipe=data['recipe'])
|
||||
recipe.save()
|
||||
for key, value in data.items():
|
||||
if key == 'depends':
|
||||
for dep in value:
|
||||
try:
|
||||
d = Recipe.objects.get(recipe=dep)
|
||||
except Recipe.DoesNotExist:
|
||||
d = Recipe(recipe=dep)
|
||||
d.save()
|
||||
else:
|
||||
setattr(recipe, key, value)
|
||||
recipe.save()
|
||||
component = Com(
|
||||
name=data['name'],
|
||||
release=data['release']
|
||||
)
|
||||
|
||||
return RecipeHandler.render(recipe)
|
||||
component.deploy = data['deploy']
|
||||
component.save()
|
||||
|
||||
@validate_json_list(RecipeForm)
|
||||
def update(self, request):
|
||||
for form in request.forms:
|
||||
attr = form.cleaned_data["attribute"]
|
||||
create_depends = []
|
||||
for depend in form.cleaned_data["depends"]:
|
||||
if data.get('requires', None):
|
||||
for point_name in data['requires']:
|
||||
try:
|
||||
d = Recipe.objects.get(recipe=depend)
|
||||
except Recipe.DoesNotExist:
|
||||
d = Recipe(recipe=depend)
|
||||
d.save()
|
||||
create_depends.append(d)
|
||||
try:
|
||||
r = Recipe.objects.get(recipe=form.cleaned_data["recipe"])
|
||||
except Recipe.DoesNotExist:
|
||||
r = Recipe(recipe=form.cleaned_data["recipe"])
|
||||
r.save()
|
||||
r.depends = create_depends
|
||||
if attr:
|
||||
r.attribute = attr
|
||||
r.save()
|
||||
return rc.CREATED
|
||||
point = Point.objects.get(
|
||||
name=point_name,
|
||||
release=data['release']
|
||||
)
|
||||
except ObjectDoesNotExist:
|
||||
return rc.NOT_FOUND
|
||||
else:
|
||||
component.requires.add(point)
|
||||
|
||||
if data.get('provides', None):
|
||||
for point_name in data['provides']:
|
||||
try:
|
||||
point = Point.objects.get(
|
||||
name=point_name,
|
||||
release=data['release']
|
||||
)
|
||||
except ObjectDoesNotExist:
|
||||
return rc.NOT_FOUND
|
||||
else:
|
||||
component.provides.add(point)
|
||||
|
||||
component.save()
|
||||
return ComHandler.render(component)
|
||||
|
||||
|
||||
class RecipeHandler(JSONHandler):
|
||||
|
||||
class ComHandler(JSONHandler):
|
||||
allowed_methods = ('GET',)
|
||||
model = Recipe
|
||||
model = Com
|
||||
|
||||
@classmethod
|
||||
def render(cls, recipe, fields=None):
|
||||
return recipe.recipe
|
||||
fields = ('id', 'name', 'deploy', ('release', 'name'),
|
||||
('requires', 'name'), ('provides', 'name'),
|
||||
('roles', 'name'))
|
||||
|
||||
def read(self, request, recipe_id):
|
||||
def read(self, request, component_id):
|
||||
try:
|
||||
recipe = Recipe.objects.get(id=recipe_id)
|
||||
return RecipeHandler.render(recipe)
|
||||
return ComHandler.render(Com.objects.get(id=component_id))
|
||||
except ObjectDoesNotExist:
|
||||
return rc.NOT_FOUND
|
||||
|
||||
|
@ -463,6 +616,14 @@ class RoleCollectionHandler(BaseHandler):
|
|||
|
||||
@validate(RoleFilterForm, 'GET')
|
||||
def read(self, request):
|
||||
if 'release_id' in request.form.data:
|
||||
return map(
|
||||
RoleHandler.render,
|
||||
Role.objects.filter(
|
||||
release__id=request.form.data['release_id']
|
||||
)
|
||||
)
|
||||
|
||||
roles = Role.objects.all()
|
||||
if 'node_id' in request.form.data:
|
||||
result = []
|
||||
|
@ -481,15 +642,40 @@ class RoleCollectionHandler(BaseHandler):
|
|||
else:
|
||||
return map(RoleHandler.render, roles)
|
||||
|
||||
@validate_json(RoleForm)
|
||||
@validate_json(RoleCreateForm)
|
||||
def create(self, request):
|
||||
data = request.form.cleaned_data
|
||||
recipes = Recipe.objects.filter(recipe__in=data['recipes'])
|
||||
role = Role(name=data["name"])
|
||||
role.save()
|
||||
map(role.recipes.add, recipes)
|
||||
logger.debug("Creating Role from data: %s" % str(data))
|
||||
|
||||
try:
|
||||
role = Role.objects.get(
|
||||
name=data['name'],
|
||||
release=data['release']
|
||||
)
|
||||
return rc.DUPLICATE_ENTRY
|
||||
except Role.DoesNotExist:
|
||||
pass
|
||||
|
||||
role = Role(
|
||||
name=data['name'],
|
||||
release=data['release']
|
||||
)
|
||||
|
||||
role.save()
|
||||
|
||||
if data.get('components', None):
|
||||
for component_name in data['components']:
|
||||
try:
|
||||
component = Com.objects.get(
|
||||
name=component_name,
|
||||
release=data['release']
|
||||
)
|
||||
except ObjectDoesNotExist:
|
||||
return rc.NOT_FOUND
|
||||
else:
|
||||
role.components.add(component)
|
||||
|
||||
role.save()
|
||||
return RoleHandler.render(role)
|
||||
|
||||
|
||||
|
@ -497,19 +683,22 @@ class RoleHandler(JSONHandler):
|
|||
|
||||
allowed_methods = ('GET',)
|
||||
model = Role
|
||||
fields = ('id', 'name', 'recipes')
|
||||
fields = ('id', 'name', ('release', 'id', 'name'),
|
||||
('components', 'name'))
|
||||
|
||||
def read(self, request, role_id):
|
||||
try:
|
||||
role = Role.objects.get(id=role_id)
|
||||
RoleHandler.render(role)
|
||||
return RoleHandler.render(Role.objects.get(id=role_id))
|
||||
except ObjectDoesNotExist:
|
||||
return rc.NOT_FOUND
|
||||
|
||||
|
||||
class ReleaseCollectionHandler(BaseHandler):
|
||||
|
||||
logger.warning("Trying to add release")
|
||||
|
||||
allowed_methods = ('GET', 'POST')
|
||||
model = Release
|
||||
|
||||
def read(self, request):
|
||||
return map(ReleaseHandler.render, Release.objects.all())
|
||||
|
@ -517,7 +706,7 @@ class ReleaseCollectionHandler(BaseHandler):
|
|||
@validate_json(ReleaseCreationForm)
|
||||
def create(self, request):
|
||||
data = request.form.cleaned_data
|
||||
|
||||
logger.debug("Creating release from data: %s" % str(data))
|
||||
try:
|
||||
release = Release.objects.get(
|
||||
name=data['name'],
|
||||
|
@ -535,16 +724,6 @@ class ReleaseCollectionHandler(BaseHandler):
|
|||
)
|
||||
release.save()
|
||||
|
||||
for role in data["roles"]:
|
||||
rl = Role(name=role["name"])
|
||||
rl.save()
|
||||
recipes = Recipe.objects.filter(recipe__in=role["recipes"])
|
||||
map(rl.recipes.add, recipes)
|
||||
rl.save()
|
||||
release.roles.add(rl)
|
||||
|
||||
release.save()
|
||||
|
||||
return ReleaseHandler.render(release)
|
||||
|
||||
|
||||
|
@ -553,7 +732,8 @@ class ReleaseHandler(JSONHandler):
|
|||
allowed_methods = ('GET', 'DELETE')
|
||||
model = Release
|
||||
fields = ('id', 'name', 'version', 'description', 'networks_metadata',
|
||||
'roles')
|
||||
('roles', 'name'), ('components', 'name'),
|
||||
('points', 'name'))
|
||||
|
||||
def read(self, request, release_id):
|
||||
try:
|
||||
|
|
|
@ -4,14 +4,17 @@ from piston.resource import Resource
|
|||
from nailgun.api.handlers import ClusterCollectionHandler, ClusterHandler, \
|
||||
NodeCollectionHandler, NodeHandler, \
|
||||
NetworkHandler, NetworkCollectionHandler, \
|
||||
RecipeCollectionHandler, RecipeHandler, \
|
||||
RoleCollectionHandler, RoleHandler, \
|
||||
ReleaseCollectionHandler, ReleaseHandler, \
|
||||
ClusterChangesHandler, \
|
||||
DeploymentTypeCollectionHandler, \
|
||||
DeploymentTypeHandler, \
|
||||
TaskHandler, \
|
||||
AttributeCollectionHandler, AttributeHandler
|
||||
TaskHandler
|
||||
from nailgun.api.handlers import ComCollectionHandler
|
||||
from nailgun.api.handlers import ComHandler
|
||||
from nailgun.api.handlers import PointCollectionHandler
|
||||
from nailgun.api.handlers import PointHandler
|
||||
from nailgun.api.handlers import EndPointCollectionHandler
|
||||
|
||||
|
||||
class JsonResource(Resource):
|
||||
|
@ -44,24 +47,30 @@ urlpatterns = patterns('',
|
|||
url(r'^tasks/(?P<task_id>[\da-f\-]{36})/?$',
|
||||
JsonResource(TaskHandler),
|
||||
name='task_handler'),
|
||||
url(r'^attributes/?$',
|
||||
JsonResource(AttributeCollectionHandler),
|
||||
name='attribute_collection_handler'),
|
||||
url(r'^attribute/(?P<attribute_id>\d+)$',
|
||||
JsonResource(AttributeHandler),
|
||||
name='attribute_handler'),
|
||||
url(r'^recipes/?$',
|
||||
JsonResource(RecipeCollectionHandler),
|
||||
name='recipe_collection_handler'),
|
||||
url(r'^recipe/(?P<recipe_id>\d+)$',
|
||||
JsonResource(RecipeHandler),
|
||||
name='recipe_handler'),
|
||||
url(r'^roles/?$',
|
||||
JsonResource(RoleCollectionHandler),
|
||||
name='role_collection_handler'),
|
||||
url(r'^roles/(?P<role_id>\d+)/?$',
|
||||
JsonResource(RoleHandler),
|
||||
name='role_handler'),
|
||||
url(r'^coms/?$',
|
||||
JsonResource(ComCollectionHandler),
|
||||
name='com_collection_handler'),
|
||||
url(r'^coms/(?P<component_id>\d+)/?$',
|
||||
JsonResource(ComHandler),
|
||||
name='com_handler'),
|
||||
url(r'^points/?$',
|
||||
JsonResource(PointCollectionHandler),
|
||||
name='point_collection_handler'),
|
||||
url(r'^points/(?P<point_id>\d+)/?$',
|
||||
JsonResource(PointHandler),
|
||||
name='point_handler'),
|
||||
url(r'^endpoints/(?P<node_id>[\dA-F]{12})/(?P<component_name>\w+)/?$',
|
||||
JsonResource(EndPointCollectionHandler),
|
||||
name='endpoint_handler'),
|
||||
url(r'^endpoints/?$',
|
||||
JsonResource(EndPointCollectionHandler),
|
||||
name='endpoint_collection_handler'),
|
||||
url(r'^releases/?$',
|
||||
JsonResource(ReleaseCollectionHandler),
|
||||
name='release_collection_handler'),
|
||||
|
|
|
@ -7,14 +7,25 @@ from piston.decorator import decorator
|
|||
|
||||
from django.core.exceptions import ValidationError
|
||||
from django.core.validators import RegexValidator
|
||||
from nailgun.models import Cluster, Node, Recipe, Role, Release, Network
|
||||
from nailgun.models import Cluster
|
||||
from nailgun.models import Node
|
||||
from nailgun.models import Role
|
||||
from nailgun.models import Release
|
||||
from nailgun.models import Network
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger("validators")
|
||||
|
||||
|
||||
# Handler decorator for JSON validation using forms
|
||||
def validate_json(v_form):
|
||||
@decorator
|
||||
def wrap(f, self, request, *a, **kwa):
|
||||
logger.debug("Validation json: trying to find out content_type")
|
||||
content_type = request.content_type.split(';')[0]
|
||||
logger.debug("Validation json: content_type: %s" % content_type)
|
||||
if content_type != "application/json":
|
||||
response = rc.BAD_REQUEST
|
||||
response.content = "Invalid content type, must be application/json"
|
||||
|
@ -22,18 +33,24 @@ def validate_json(v_form):
|
|||
|
||||
try:
|
||||
parsed_body = json.loads(request.body)
|
||||
logger.debug("Validation json: body: %s" % str(parsed_body))
|
||||
except:
|
||||
response = rc.BAD_REQUEST
|
||||
response.content = "Invalid JSON object"
|
||||
raise HttpStatusCode(response)
|
||||
|
||||
if not isinstance(parsed_body, dict):
|
||||
logger.debug("Validation json: parsed_body is not dict")
|
||||
response = rc.BAD_REQUEST
|
||||
response.content = "Dictionary expected"
|
||||
raise HttpStatusCode(response)
|
||||
|
||||
form = v_form(parsed_body, request.FILES)
|
||||
|
||||
logger.debug("Validation json: trying to construct form from v_form")
|
||||
try:
|
||||
form = v_form(parsed_body, request.FILES)
|
||||
except Exception as e:
|
||||
logger.debug("Validation json: error: %s" % str(e.message))
|
||||
logger.debug("Validation json: form: %s" % str(form))
|
||||
if form.is_valid():
|
||||
setattr(request, 'form', form)
|
||||
return f(self, request, *a, **kwa)
|
||||
|
@ -84,30 +101,6 @@ def validate_json_list(v_form):
|
|||
FORM DATA VALIDATORS
|
||||
"""
|
||||
|
||||
|
||||
def validate_recipe(value):
|
||||
if not re.match(r'^[^\]]+::([^\]]+)@[0-9]+(\.[0-9]+){1,2}$', value):
|
||||
raise ValidationError('Recipe should be in a \
|
||||
"cookbook::recipe@version" format')
|
||||
|
||||
|
||||
def validate_attribute(value):
|
||||
if not isinstance(value, dict):
|
||||
raise ValidationError('Attributes must be in a dictionary')
|
||||
|
||||
|
||||
def validate_role_recipes(value):
|
||||
if value and isinstance(value, list):
|
||||
map(validate_recipe, value)
|
||||
for i in value:
|
||||
try:
|
||||
rec_exist = Recipe.objects.get(recipe=i)
|
||||
except Recipe.DoesNotExist:
|
||||
raise ValidationError('Recipe %s does not exist' % i)
|
||||
else:
|
||||
raise ValidationError('Invalid recipe list')
|
||||
|
||||
|
||||
validate_node_id = RegexValidator(regex=re.compile('^[\dA-F]{12}$'))
|
||||
|
||||
|
||||
|
@ -144,15 +137,23 @@ def validate_release_node_roles(data):
|
|||
if not all(map(lambda i: 'name' in i, data)):
|
||||
raise ValidationError('Role name is empty')
|
||||
for role in data:
|
||||
if 'recipes' not in role or not role['recipes']:
|
||||
raise ValidationError('Recipes list for role "%s" \
|
||||
if 'components' not in role or not role['components']:
|
||||
raise ValidationError('Components list for role "%s" \
|
||||
should not be empty' % role['name'])
|
||||
for recipe in role['recipes']:
|
||||
validate_recipe(recipe)
|
||||
try:
|
||||
rec_exists = Recipe.objects.get(recipe=recipe)
|
||||
except Recipe.DoesNotExist:
|
||||
raise ValidationError('Recipe %s doesn\'t exist' % recipe)
|
||||
|
||||
|
||||
def validate_release_points(data):
|
||||
if not data or not isinstance(data, list):
|
||||
raise ValidationError('Invalid points list')
|
||||
if not all(map(lambda i: 'name' in i, data)):
|
||||
raise ValidationError('Point name is empty')
|
||||
|
||||
|
||||
def validate_release_components(data):
|
||||
if not data or not isinstance(data, list):
|
||||
raise ValidationError('Invalid components list')
|
||||
if not all(map(lambda i: 'name' in i, data)):
|
||||
raise ValidationError('Component name is empty')
|
||||
|
||||
|
||||
def forbid_modifying_roles(value):
|
||||
|
|
|
@ -17,3 +17,5 @@ COBBLER_URL = "http://localhost/cobbler_api"
|
|||
COBBLER_USER = "cobbler"
|
||||
COBBLER_PASSWORD = "cobbler"
|
||||
COBBLER_PROFILE = "centos-6.2-x86_64"
|
||||
|
||||
REPO_ADDRESS = "127.0.0.1"
|
||||
|
|
|
@ -4,10 +4,11 @@
|
|||
"pk": 1,
|
||||
"fields": {
|
||||
"name": "Default Release",
|
||||
"version": "0.1.0",
|
||||
"networks_metadata": [
|
||||
{"name": "floating", "access": "public"},
|
||||
{"name": "fixed", "access": "private"},
|
||||
{"name": "management", "access": "private"}
|
||||
{"name": "admin", "access": "private"}
|
||||
]
|
||||
}
|
||||
},
|
||||
|
@ -18,5 +19,147 @@
|
|||
"name": "Default Cluster",
|
||||
"release": 1
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.point",
|
||||
"pk": 1,
|
||||
"fields": {
|
||||
"name": "point0",
|
||||
"release": 1,
|
||||
"scheme": {
|
||||
"attr0": {
|
||||
"generator": "generator_ip",
|
||||
"generator_args": "floating",
|
||||
"attribute": "attr.path0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.point",
|
||||
"pk": 2,
|
||||
"fields": {
|
||||
"name": "point1",
|
||||
"release": 1,
|
||||
"scheme": {
|
||||
"attr1": {
|
||||
"generator": "generator_ip",
|
||||
"generator_args": "floating",
|
||||
"attribute": "attr.path1"
|
||||
},
|
||||
"attr2": {
|
||||
"generator": "generator_ip",
|
||||
"generator_args": "floating",
|
||||
"attribute": "attr.path2"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.com",
|
||||
"pk": 1,
|
||||
"fields": {
|
||||
"name": "component0",
|
||||
"release": 1,
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[cookbook0::recipe0@0.1.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": [1],
|
||||
"requires": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.com",
|
||||
"pk": 2,
|
||||
"fields": {
|
||||
"name": "component1",
|
||||
"release": 1,
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[cookbook0::recipe1@0.1.0]",
|
||||
"recipe[cookbook0::recipe2@0.1.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": [2],
|
||||
"requires": [1]
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.com",
|
||||
"pk": 3,
|
||||
"fields": {
|
||||
"name": "component2",
|
||||
"release": 1,
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[cookbook1::recipe0@0.1.0]",
|
||||
"recipe[cookbook2::recipe0@0.1.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": [2],
|
||||
"provides": [],
|
||||
"requires": [1, 2]
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.role",
|
||||
"pk": 1,
|
||||
"fields": {
|
||||
"name": "role1",
|
||||
"release": 1,
|
||||
"components": [1,2]
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.role",
|
||||
"pk": 2,
|
||||
"fields": {
|
||||
"name": "role2",
|
||||
"release": 1,
|
||||
"components": [3]
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.node",
|
||||
"pk": "080000000001",
|
||||
"fields": {
|
||||
"name": "test.example.com",
|
||||
"ip": "127.0.0.1",
|
||||
"metadata": {
|
||||
"block_device": {},
|
||||
"interfaces": {},
|
||||
"cpu": {},
|
||||
"memory": {}
|
||||
},
|
||||
"cluster": 1,
|
||||
"roles": [1]
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.node",
|
||||
"pk": "080000000002",
|
||||
"fields": {
|
||||
"name": "test2.example.com",
|
||||
"ip": "127.0.0.2",
|
||||
"metadata": {
|
||||
"block_device": {},
|
||||
"interfaces": {},
|
||||
"cpu": {},
|
||||
"memory": {}
|
||||
},
|
||||
"cluster": 1,
|
||||
"roles": [2]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
|
@ -6,9 +6,10 @@
|
|||
"name": "Essex",
|
||||
"version": "1.2.3",
|
||||
"description": "Essex release description",
|
||||
"roles": [
|
||||
1,
|
||||
2
|
||||
"networks_metadata": [
|
||||
{"name": "floating", "access": "public"},
|
||||
{"name": "fixed", "access": "private"},
|
||||
{"name": "admin", "access": "private"}
|
||||
]
|
||||
}
|
||||
},
|
||||
|
@ -29,20 +30,114 @@
|
|||
}
|
||||
},
|
||||
{
|
||||
"pk": 1,
|
||||
"model": "nailgun.role",
|
||||
"fields": {
|
||||
"recipes": [],
|
||||
"name": "Sample role"
|
||||
"model": "nailgun.point",
|
||||
"pk": 1,
|
||||
"fields": {
|
||||
"name": "point0",
|
||||
"release": 1,
|
||||
"scheme": {
|
||||
"attr0": {
|
||||
"generator": "generator_ip",
|
||||
"generator_args": "floating",
|
||||
"attribute": "attr.path0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"pk": 2,
|
||||
"model": "nailgun.role",
|
||||
"fields": {
|
||||
"recipes": [],
|
||||
"name": "Another role"
|
||||
"model": "nailgun.point",
|
||||
"pk": 2,
|
||||
"fields": {
|
||||
"name": "point1",
|
||||
"release": 1,
|
||||
"scheme": {
|
||||
"attr1": {
|
||||
"generator": "generator_ip",
|
||||
"generator_args": "floating",
|
||||
"attribute": "attr.path1"
|
||||
},
|
||||
"attr2": {
|
||||
"generator": "generator_ip",
|
||||
"generator_args": "floating",
|
||||
"attribute": "attr.path2"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.com",
|
||||
"pk": 1,
|
||||
"fields": {
|
||||
"name": "component0",
|
||||
"release": 1,
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[cookbook0::recipe0@0.1.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": [1],
|
||||
"requires": []
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.com",
|
||||
"pk": 2,
|
||||
"fields": {
|
||||
"name": "component1",
|
||||
"release": 1,
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[cookbook0::recipe1@0.1.0]",
|
||||
"recipe[cookbook0::recipe2@0.1.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": [2],
|
||||
"requires": [1]
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.com",
|
||||
"pk": 3,
|
||||
"fields": {
|
||||
"name": "component2",
|
||||
"release": 1,
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[cookbook1::recipe0@0.1.0]",
|
||||
"recipe[cookbook2::recipe0@0.1.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": [2],
|
||||
"provides": [],
|
||||
"requires": [1, 2]
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.role",
|
||||
"pk": 1,
|
||||
"fields": {
|
||||
"name": "Controller",
|
||||
"release": 1,
|
||||
"components": [1,2]
|
||||
}
|
||||
},
|
||||
{
|
||||
"model": "nailgun.role",
|
||||
"pk": 2,
|
||||
"fields": {
|
||||
"name": "Compute",
|
||||
"release": 1,
|
||||
"components": [3]
|
||||
}
|
||||
},
|
||||
{
|
||||
"pk": "111111111111",
|
||||
|
|
|
@ -8,12 +8,15 @@ from random import choice
|
|||
import re
|
||||
import time
|
||||
import socket
|
||||
import pprint
|
||||
|
||||
from nailgun import models
|
||||
from nailgun import settings
|
||||
|
||||
from nailgun.models import Node, Recipe, IPAddr, Network
|
||||
from nailgun.exceptions import EmptyListError, NotFound
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger("helpers")
|
||||
|
||||
|
||||
class SshConnect(object):
|
||||
|
@ -47,38 +50,106 @@ class SshConnect(object):
|
|||
pass
|
||||
|
||||
|
||||
def merge_dictionary(dst, src):
|
||||
"""
|
||||
'True' way of merging two dictionaries
|
||||
(python dict.update() updates just top-level keys and items)
|
||||
"""
|
||||
stack = [(dst, src)]
|
||||
while stack:
|
||||
current_dst, current_src = stack.pop()
|
||||
for key in current_src:
|
||||
if key not in current_dst:
|
||||
current_dst[key] = current_src[key]
|
||||
else:
|
||||
if isinstance(current_src[key], dict) \
|
||||
and isinstance(current_dst[key], dict):
|
||||
stack.append((current_dst[key], current_src[key]))
|
||||
else:
|
||||
current_dst[key] = current_src[key]
|
||||
return dst
|
||||
class EndPointDataDriver:
|
||||
def __init__(self, node):
|
||||
self.node = node
|
||||
|
||||
def node_ip(self, network_name):
|
||||
for ip_addr in models.IPAddr.objects.filter(node__id=self.node.id):
|
||||
network = models.Network.objects.get(id=ip_addr.network.id)
|
||||
if network.name == network_name:
|
||||
return ip_addr.ip_addr
|
||||
|
||||
def node_netmask(self, network_name):
|
||||
release = self.node.cluster.release
|
||||
network = models.Network.objects.get(name=network_name,
|
||||
release=release)
|
||||
return network.netmask
|
||||
|
||||
def node_vlan(self, network_name):
|
||||
release = self.node.cluster.release
|
||||
network = models.Network.objects.get(name=network_name,
|
||||
release=release)
|
||||
return network.vlan_id
|
||||
|
||||
|
||||
def generate_passwords(d):
|
||||
stack = []
|
||||
new_dict = d.copy()
|
||||
class EndPointManager:
|
||||
def __init__(self, data_driver, name, scheme):
|
||||
|
||||
def create_pass():
|
||||
return ''.join(
|
||||
self.data_driver = data_driver
|
||||
self.name = name
|
||||
self.scheme = scheme
|
||||
self.data = {}
|
||||
|
||||
def generator_ip_repo(self, args):
|
||||
return settings.REPO_ADDRESS
|
||||
|
||||
def generator_ip(self, network_name):
|
||||
network_name = str(network_name)
|
||||
ip = self.data_driver.node_ip(network_name)
|
||||
logger.debug("EndPointManager: generator_ip: %s" % ip)
|
||||
return ip
|
||||
|
||||
def generator_netmask(self, network_name):
|
||||
network_name = str(network_name)
|
||||
netmask = self.data_driver.node_netmask(network_name)
|
||||
logger.debug("EndPointManager: generator_netmask: %s" % netmask)
|
||||
return netmask
|
||||
|
||||
def generator_vlan(self, network_name):
|
||||
network_name = str(network_name)
|
||||
vlan_id = self.data_driver.node_vlan(network_name)
|
||||
logger.debug("EndPointManager: generator_vlan: %s" % vlan_id)
|
||||
return vlan_id
|
||||
|
||||
def generator_url(self, url_args):
|
||||
url_args = dict(url_args)
|
||||
ip = self.data_driver.node_ip(url_args['network'])
|
||||
url = "%s://%s:%s%s" % (url_args['protocol'],
|
||||
ip,
|
||||
url_args['port'],
|
||||
url_args.get('url', ''))
|
||||
logger.debug("EndPointManager: generator_url: %s" % url)
|
||||
return url
|
||||
|
||||
def generator_transparent(self, args):
|
||||
logger.debug("EndPointManager: generator_transparent: %s" % \
|
||||
args)
|
||||
return args
|
||||
|
||||
def generator_password(self, length=8):
|
||||
length = int(length)
|
||||
password = ''.join(
|
||||
choice(
|
||||
''.join((string.ascii_letters, string.digits))
|
||||
) for _ in xrange(10)
|
||||
)
|
||||
) for _ in xrange(length)
|
||||
)
|
||||
logger.debug("EndPointManager: generator_password: %s" % \
|
||||
password)
|
||||
return password
|
||||
|
||||
def construct(d, k):
|
||||
@classmethod
|
||||
def merge_dictionary(cls, dst, src):
|
||||
"""
|
||||
'True' way of merging two dictionaries
|
||||
(python dict.update() updates just top-level keys and items)
|
||||
"""
|
||||
stack = [(dst, src)]
|
||||
while stack:
|
||||
current_dst, current_src = stack.pop()
|
||||
for key in current_src:
|
||||
if key not in current_dst:
|
||||
current_dst[key] = current_src[key]
|
||||
else:
|
||||
if isinstance(current_src[key], dict) \
|
||||
and isinstance(current_dst[key], dict):
|
||||
stack.append((current_dst[key], current_src[key]))
|
||||
else:
|
||||
current_dst[key] = current_src[key]
|
||||
return dst
|
||||
|
||||
@classmethod
|
||||
def list2dict(cls, d, k):
|
||||
"""
|
||||
Creating a nested dictionary:
|
||||
['a', 'b', 'c', 'd'] => {'a': {'b': {'c': 'd'}}}
|
||||
|
@ -87,142 +158,312 @@ def generate_passwords(d):
|
|||
_d = copy.deepcopy(d)
|
||||
if len(k) > 1:
|
||||
_k = k.pop(0)
|
||||
_d[_k] = construct(d, k)
|
||||
_d[_k] = cls.list2dict(d, k)
|
||||
return _d
|
||||
return k.pop(0)
|
||||
|
||||
def search_pwd(node, cdict):
|
||||
"""
|
||||
Recursively searching for 'password' fields
|
||||
"""
|
||||
for a, val in node.items():
|
||||
stack.append(a)
|
||||
if isinstance(val, dict):
|
||||
search_pwd(val, cdict)
|
||||
elif "password" in a:
|
||||
k = stack[:]
|
||||
k.append(create_pass())
|
||||
c = construct({}, k)
|
||||
cdict = merge_dictionary(cdict, c)
|
||||
stack.pop()
|
||||
return cdict
|
||||
def instantiate(self):
|
||||
for k in self.scheme:
|
||||
logger.debug("EndPointManager: generating %s" % k)
|
||||
generator = getattr(self, self.scheme[k]["generator"])
|
||||
generator_args = self.scheme[k]["generator_args"]
|
||||
generated = generator(generator_args)
|
||||
|
||||
search_pwd(d, new_dict)
|
||||
return new_dict
|
||||
attributes = self.scheme[k]["attribute"]
|
||||
"""
|
||||
example of attribute:
|
||||
["service.mysql.user", "service.postgresql.user"]
|
||||
"""
|
||||
if not isinstance(attributes, (list, tuple)):
|
||||
attributes = [attributes]
|
||||
|
||||
for attribute in attributes:
|
||||
attribute_keys = re.split(ur'\.', attribute)
|
||||
logger.debug("EndPointManager: attribute_keys: %s" % \
|
||||
str(attribute_keys))
|
||||
|
||||
attribute_keys.append(generated)
|
||||
logger.debug("EndPointManager: attribute_keys: %s" % \
|
||||
str(attribute_keys))
|
||||
attribute_dict = self.list2dict({}, attribute_keys)
|
||||
logger.debug("EndPointManager: attribute_dict: %s" % \
|
||||
str(attribute_dict))
|
||||
|
||||
self.merge_dictionary(self.data, attribute_dict)
|
||||
|
||||
def get_data(self):
|
||||
logger.debug("EndPointManager: data: %s" % \
|
||||
str(self.data))
|
||||
return self.data
|
||||
|
||||
|
||||
class DeployGenerator:
|
||||
@classmethod
|
||||
def components4(cls, cluster_id, method="list"):
|
||||
|
||||
if method == "list":
|
||||
attrs = []
|
||||
else:
|
||||
attrs = {}
|
||||
|
||||
nodes = Node.objects.filter(cluster__id=cluster_id)
|
||||
for node in nodes:
|
||||
|
||||
node_attrs = cls.node_attrs(node.id)
|
||||
|
||||
if isinstance(attrs, (list,)):
|
||||
node_attrs["node_id"] = node.id
|
||||
attrs.append(node_attrs)
|
||||
else:
|
||||
attrs[node.id] = node_attrs
|
||||
|
||||
return attrs
|
||||
|
||||
@classmethod
|
||||
def node_attrs(self, node_id, force=False):
|
||||
node = Node.objects.get(id=node_id)
|
||||
if force or not node.node_attrs:
|
||||
|
||||
node_ips = {}
|
||||
ip_addrs = IPAddr.objects.filter(node__id=node.id)
|
||||
for ip_addr in ip_addrs:
|
||||
network = Network.objects.get(id=ip_addr.network.id)
|
||||
node_ips[network.name] = ip_addr.ip_addr
|
||||
|
||||
node_attrs = {
|
||||
"node_ips": node_ips
|
||||
}
|
||||
|
||||
roles = node.roles.all()
|
||||
for role in roles:
|
||||
recipes = role.recipes.all()
|
||||
for recipe in recipes:
|
||||
if recipe.attribute:
|
||||
node_attrs = merge_dictionary(
|
||||
node_attrs,
|
||||
recipe.attribute.attribute
|
||||
)
|
||||
node_attrs = generate_passwords(node_attrs)
|
||||
node.node_attrs = node_attrs
|
||||
node.save()
|
||||
return Node.objects.get(id=node_id).node_attrs
|
||||
|
||||
@classmethod
|
||||
def recipes(cls, cluster_id):
|
||||
used_recipes = []
|
||||
nodes = Node.objects.filter(cluster__id=cluster_id)
|
||||
if not nodes:
|
||||
raise EmptyListError("Node list is empty")
|
||||
|
||||
for node in nodes:
|
||||
roles = node.roles.all()
|
||||
for role in roles:
|
||||
recipes = role.recipes.all()
|
||||
for recipe in recipes:
|
||||
used_recipes.append(recipe.recipe)
|
||||
|
||||
return used_recipes
|
||||
|
||||
|
||||
class DatabagGenerator:
|
||||
class DeployManager:
|
||||
def __init__(self, cluster_id):
|
||||
self.cluster_id = cluster_id
|
||||
self.node_jsons = {}
|
||||
self.use_recipes = []
|
||||
self.cluster_component_ids = [
|
||||
c.id for n, r, c in self._cluster_iterator()
|
||||
]
|
||||
self.release_id = models.Cluster.objects.get(id=cluster_id).release.id
|
||||
|
||||
def generate(self):
|
||||
nodes = Node.objects.filter(cluster__id=self.cluster_id)
|
||||
if not nodes:
|
||||
raise EmptyListError("Node list is empty")
|
||||
def sorted_components(self):
|
||||
graph = {}
|
||||
for component in models.Com.objects.filter(
|
||||
id__in=self.cluster_component_ids
|
||||
):
|
||||
self._resolve_cluster_deps(graph, component)
|
||||
|
||||
for node in nodes:
|
||||
node_json = {}
|
||||
add_attrs = {}
|
||||
try:
|
||||
sorted_components = self._topol_sort(graph)
|
||||
except KeyError:
|
||||
raise Exception("Cluster dependencies cannot be resolved")
|
||||
|
||||
roles_for_node = node.roles.all()
|
||||
logger.debug("sorted_components: %s" % \
|
||||
pprint.pformat(sorted_components))
|
||||
return sorted_components
|
||||
|
||||
node_json['cluster_id'] = self.cluster_id
|
||||
for f in node._meta.fields:
|
||||
if f.name != 'cluster':
|
||||
node_json[f.name] = getattr(node, f.name)
|
||||
def _cluster_iterator(self):
|
||||
for node in models.Node.objects.filter(cluster__id=self.cluster_id):
|
||||
for role in node.roles.all():
|
||||
for component in role.components.all():
|
||||
yield [node, role, component]
|
||||
|
||||
node_json['roles'] = []
|
||||
for role in roles_for_node:
|
||||
recipes = role.recipes.all()
|
||||
rc = []
|
||||
for r in recipes:
|
||||
rc.append("recipe[%s]" % r.recipe)
|
||||
self.use_recipes.append(r.recipe)
|
||||
if r.attribute:
|
||||
add_attrs = merge_dictionary(
|
||||
add_attrs,
|
||||
r.attribute.attribute
|
||||
def _resolve_cluster_deps(self, graph, component):
|
||||
if component.name not in graph:
|
||||
graph[component.name] = []
|
||||
requires = component.requires.all()
|
||||
logger.debug("Resolving cluster: component %s requires: %s" % \
|
||||
(component.name,
|
||||
str([p.name for p in requires])))
|
||||
|
||||
for provided_by in models.Com.objects.filter(
|
||||
id__in=self.cluster_component_ids,
|
||||
provides__in=requires
|
||||
):
|
||||
graph[component.name].append(provided_by.name)
|
||||
self._resolve_cluster_deps(graph, provided_by)
|
||||
|
||||
def _topol_sort(self, graph):
|
||||
""" Depth First Traversal algorithm for sorting DAG graph.
|
||||
|
||||
Example graph: 1 depends on 4; 3 depends on 2 and 6; etc.
|
||||
Example code:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> graph = {1: [4], 2: [], 3: [2,6], 4:[2,3], 5: [], 6: [2]}
|
||||
>>> topol_sort(graph)
|
||||
[2, 6, 3, 4, 1, 5]
|
||||
|
||||
Exception is raised if there is a cycle:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> graph = {1: [4], 2: [], 3: [2,6], 4:[2,3,1], 5: [], 6: [2]}
|
||||
>>> topol_sort(graph)
|
||||
...
|
||||
Exception: Graph contains cycles, processed 4 depends on 1
|
||||
|
||||
"""
|
||||
|
||||
def dfs(v):
|
||||
color[v] = "gray"
|
||||
for w in graph[v]:
|
||||
if color[w] == "black":
|
||||
continue
|
||||
elif color[w] == "gray":
|
||||
raise Exception(
|
||||
"Graph contains cycles, processed %s depends on %s" % \
|
||||
(v, w))
|
||||
dfs(w)
|
||||
color[v] = "black"
|
||||
_sorted.append(v)
|
||||
|
||||
_sorted = []
|
||||
color = {}
|
||||
for j in graph:
|
||||
color[j] = "white"
|
||||
for i in graph:
|
||||
if color[i] == "white":
|
||||
dfs(i)
|
||||
|
||||
return _sorted
|
||||
|
||||
def clean_cluster(self):
|
||||
models.EndPoint.objects.filter(
|
||||
node__in=models.Node.objects.filter(cluster__id=self.cluster_id)
|
||||
).delete()
|
||||
|
||||
def instantiate_cluster(self):
|
||||
|
||||
for node in models.Node.objects.filter(cluster__id=self.cluster_id):
|
||||
|
||||
"""
|
||||
it is needed to be checked if node have only one component
|
||||
assignment of given component and only one given point
|
||||
"""
|
||||
components_used = []
|
||||
points_used = []
|
||||
|
||||
data_driver = EndPointDataDriver(node)
|
||||
|
||||
roles = node.roles.all()
|
||||
for role in roles:
|
||||
components = role.components.all()
|
||||
for component in components:
|
||||
if component.name in components_used:
|
||||
raise Exception(
|
||||
"Duplicated component: node: %s com: %s" % \
|
||||
(node.id, component.name))
|
||||
components_used.append(component.name)
|
||||
|
||||
provides = list(component.provides.all())
|
||||
|
||||
logger.debug("Com %s provides %s" % \
|
||||
(component.name,
|
||||
str([p.name for p in provides])))
|
||||
|
||||
for point in provides:
|
||||
if point.name in points_used:
|
||||
raise Exception(
|
||||
"Duplicated point: node: %s point: %s" % \
|
||||
(node.id, point.name))
|
||||
points_used.append(point.name)
|
||||
|
||||
logger.debug("Instantiating point: %s" % point.name)
|
||||
manager = EndPointManager(
|
||||
data_driver,
|
||||
point.name,
|
||||
point.scheme
|
||||
)
|
||||
add_attrs = generate_passwords(add_attrs)
|
||||
manager.instantiate()
|
||||
|
||||
node_json["roles"].append({
|
||||
"name": role.name,
|
||||
"recipes": rc
|
||||
})
|
||||
end_point = models.EndPoint(
|
||||
point=point,
|
||||
node=node,
|
||||
data=manager.get_data()
|
||||
)
|
||||
|
||||
node_json = merge_dictionary(node_json, add_attrs)
|
||||
end_point.save()
|
||||
|
||||
if 'network' in node.metadata:
|
||||
node_json['network'] = node.metadata['network']
|
||||
|
||||
self.node_jsons[node.id] = node_json
|
||||
class DeployDriver:
|
||||
def __init__(self, node, component):
|
||||
self.node = node
|
||||
self.component = component
|
||||
|
||||
@classmethod
|
||||
def merge_dictionary(cls, dst, src):
|
||||
"""
|
||||
'True' way of merging two dictionaries
|
||||
(python dict.update() updates just top-level keys and items)
|
||||
"""
|
||||
stack = [(dst, src)]
|
||||
while stack:
|
||||
current_dst, current_src = stack.pop()
|
||||
for key in current_src:
|
||||
if key not in current_dst:
|
||||
current_dst[key] = current_src[key]
|
||||
else:
|
||||
if isinstance(current_src[key], dict) \
|
||||
and isinstance(current_dst[key], dict):
|
||||
stack.append((current_dst[key], current_src[key]))
|
||||
else:
|
||||
current_dst[key] = current_src[key]
|
||||
return dst
|
||||
|
||||
def endpoint_iterator(self, node, component):
|
||||
logger.debug("endpoint_iterator: node: %s component: %s" % \
|
||||
(node.id, component.name))
|
||||
for point in component.provides.all():
|
||||
logger.debug("endpoint_iterator: component: %s provides: %s" % \
|
||||
(component.name, point.name))
|
||||
try:
|
||||
logger.debug("endpoint_iterator: looking for provided "\
|
||||
"endpoint point: %s node: %s" % \
|
||||
(point.name, node.id))
|
||||
ep = models.EndPoint.objects.get(point=point, node=node)
|
||||
except ObjectDoesNotExist as e:
|
||||
logger.debug("endpoint_iterator: provided endpoint "\
|
||||
"is not found point: %s node: %s" % \
|
||||
(point.name, node.id))
|
||||
raise Exception("Provided point %s instance is not found" % \
|
||||
point.name)
|
||||
except Exception as e:
|
||||
logger.debug("Exception: %s" % str(e))
|
||||
raise e
|
||||
else:
|
||||
logger.debug("endpoint_iterator: provided endpoint found " \
|
||||
"point: %s node: %s endpoint: %s" % \
|
||||
(point.name, node.id, ep.id))
|
||||
yield ep
|
||||
|
||||
for point in component.requires.all():
|
||||
"""
|
||||
FOR THE START WE TRY TO FIND ENDPOINT INSTANCE
|
||||
BOUND TO THIS NODE. IT IT FAILS THEN WE LOOK FOR
|
||||
ENDPOINT INSTANCES BOUND TO OTHER NODES IN CLUSTER
|
||||
"""
|
||||
try:
|
||||
ep = models.EndPoint.objects.get(
|
||||
point=point,
|
||||
node=node
|
||||
)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
yield ep
|
||||
|
||||
eps = models.EndPoint.objects.filter(point=point)
|
||||
|
||||
if eps:
|
||||
"""
|
||||
FIXME
|
||||
WE NEED MORE INTELLIGENT ALGORITHM TO CHOOSE
|
||||
WHICH ENDPOINT INSTANCE IS A MOST SUITABLE
|
||||
ONE FOR THIS COMPONENT. AT THE MOMENT WE
|
||||
SIMPLY RETURN FIRST FOUND INSTANCE
|
||||
"""
|
||||
ep = eps[0]
|
||||
logger.debug("endpoint_iterator: required endpoint found " \
|
||||
"point: %s node: %s" % \
|
||||
(point.name, ep.node.id))
|
||||
|
||||
yield ep
|
||||
else:
|
||||
raise Exception("Required point %s instance is not found" % \
|
||||
point.name)
|
||||
|
||||
def deploy_data(self):
|
||||
self.data = {}
|
||||
try:
|
||||
for endpoint in self.endpoint_iterator(self.node, self.component):
|
||||
logger.error("Found endpoint id: %s for n=%s c=%s" % \
|
||||
(endpoint.id, self.node.id,
|
||||
self.component.name))
|
||||
self.merge_dictionary(self.data, endpoint.data)
|
||||
except:
|
||||
logger.error("Error while getting endpoints for n=%s c=%s" % \
|
||||
(self.node.id, self.component.name))
|
||||
raise Exception("Getting endpoints failed: node=%s com=%s" % \
|
||||
(self.node.id, self.component.name))
|
||||
|
||||
logger.debug("Node: %s com: %s data: %s" % \
|
||||
(self.node.id, self.component.name, str(self.data)))
|
||||
return {
|
||||
"chef-solo": self.chef_solo_data,
|
||||
"puppet": self.puppet_data,
|
||||
}[self.component.deploy["driver"]]()
|
||||
|
||||
def chef_solo_data(self):
|
||||
chef_data = {
|
||||
"run_list": self.component.deploy["driver_args"]["run_list"]
|
||||
}
|
||||
if self.component.deploy["driver_args"].get("cooks", None) is not None:
|
||||
chef_data["cooks"] = \
|
||||
self.component.deploy["driver_args"]["cooks"]
|
||||
logger.debug("Chef-data: %s" % str(chef_data))
|
||||
self.merge_dictionary(chef_data, self.data)
|
||||
return chef_data
|
||||
|
||||
def puppet_data(self):
|
||||
return self.data
|
||||
|
|
|
@ -5,26 +5,44 @@ from django.db import models
|
|||
from django.contrib.auth.models import User
|
||||
from django.core.exceptions import ObjectDoesNotExist
|
||||
from jsonfield import JSONField
|
||||
from api.fields import RecipeField
|
||||
|
||||
|
||||
class Attribute(models.Model):
|
||||
cookbook = models.CharField(max_length=100)
|
||||
version = models.CharField(max_length=30)
|
||||
attribute = JSONField()
|
||||
class EndPoint(models.Model):
|
||||
point = models.ForeignKey('Point', related_name='endpoints')
|
||||
node = models.ForeignKey('Node', related_name='endpoints')
|
||||
data = JSONField()
|
||||
|
||||
class Meta:
|
||||
unique_together = ("point", "node")
|
||||
|
||||
|
||||
class Recipe(models.Model):
|
||||
recipe = RecipeField(max_length=100)
|
||||
attribute = models.ForeignKey(Attribute, null=True, blank=True)
|
||||
depends = models.ManyToManyField("Recipe",
|
||||
related_name="recipe_deps",
|
||||
)
|
||||
class Point(models.Model):
|
||||
name = models.CharField(max_length=100)
|
||||
release = models.ForeignKey('Release', related_name='points')
|
||||
scheme = JSONField()
|
||||
|
||||
class Meta:
|
||||
unique_together = ("name", "release")
|
||||
|
||||
|
||||
class Com(models.Model):
|
||||
name = models.CharField(max_length=100)
|
||||
release = models.ForeignKey('Release', related_name='components')
|
||||
requires = models.ManyToManyField(Point, related_name='required_by')
|
||||
provides = models.ManyToManyField(Point, related_name='provided_by')
|
||||
deploy = JSONField()
|
||||
|
||||
class Meta:
|
||||
unique_together = ("name", "release")
|
||||
|
||||
|
||||
class Role(models.Model):
|
||||
name = models.CharField(max_length=50)
|
||||
recipes = models.ManyToManyField(Recipe, related_name="roles")
|
||||
name = models.CharField(max_length=100)
|
||||
release = models.ForeignKey('Release', related_name='roles')
|
||||
components = models.ManyToManyField(Com, related_name="roles")
|
||||
|
||||
class Meta:
|
||||
unique_together = ("name", "release")
|
||||
|
||||
|
||||
class Release(models.Model):
|
||||
|
@ -32,7 +50,6 @@ class Release(models.Model):
|
|||
version = models.CharField(max_length=30)
|
||||
description = models.TextField(null=True, blank=True)
|
||||
networks_metadata = JSONField()
|
||||
roles = models.ManyToManyField(Role, related_name='releases')
|
||||
|
||||
class Meta:
|
||||
unique_together = ("name", "version")
|
||||
|
@ -88,7 +105,7 @@ class Task(models.Model):
|
|||
|
||||
|
||||
class Cluster(models.Model):
|
||||
name = models.CharField(max_length=100)
|
||||
name = models.CharField(max_length=100, unique=True)
|
||||
release = models.ForeignKey(Release, related_name='clusters')
|
||||
|
||||
# working around Django issue #10227
|
||||
|
@ -115,7 +132,6 @@ class Node(models.Model):
|
|||
status = models.CharField(max_length=30, choices=NODE_STATUSES,
|
||||
default='ready')
|
||||
metadata = JSONField()
|
||||
node_attrs = JSONField()
|
||||
mac = models.CharField(max_length=17)
|
||||
ip = models.CharField(max_length=15)
|
||||
fqdn = models.CharField(max_length=255)
|
||||
|
|
|
@ -64,7 +64,7 @@ class Validator:
|
|||
return (os, osversion, arch) in cls._supported_platform
|
||||
|
||||
# FIXME
|
||||
# IT NEEDED TO BE CHECKED IF PROVISION ALREADY HAS THAT PROFILE
|
||||
# IT IS NEEDED TO BE CHECKED IF PROVISION ALREADY HAS THAT PROFILE
|
||||
# IF NOT THEN PROFILE IS OBVIOUSLY INVALID
|
||||
@classmethod
|
||||
def is_profile_valid(cls, profile):
|
||||
|
@ -75,7 +75,7 @@ class Validator:
|
|||
return powertype in cls._supported_powertypes
|
||||
|
||||
# FIXME
|
||||
# IT NEEDED TO BE CHECKED IF POWER IS VALID
|
||||
# IT IS NEEDED TO BE CHECKED IF POWER IS VALID
|
||||
@classmethod
|
||||
def is_power_valid(cls, power):
|
||||
return True
|
||||
|
|
|
@ -170,5 +170,5 @@ PISTON_IGNORE_DUPE_MODELS = True
|
|||
|
||||
NETWORK_POOLS = {
|
||||
'public': ['172.18.0.0/16'],
|
||||
'private': ['10.0.0.0/8']
|
||||
'private': ['10.1.0.0/16']
|
||||
}
|
||||
|
|
|
@ -103,11 +103,23 @@ class TaskPool(object):
|
|||
self.pool.append(task)
|
||||
|
||||
@task_with_callbacks
|
||||
def _chord_task(taskset, clbk):
|
||||
def _chord_task(*args):
|
||||
|
||||
if len(args) == 3:
|
||||
taskset, clbk = args[1], args[2]
|
||||
else:
|
||||
taskset, clbk = args[0], args[1]
|
||||
|
||||
logger.error("TaskPool._chord_task: args: %s" % str(args))
|
||||
logger.error("TaskPool._chord_task: args length: %s" % len(args))
|
||||
logger.error("TaskPool._chord_task: taskset: %s" % str(taskset))
|
||||
logger.error("TaskPool._chord_task: clbk: %s" % str(clbk))
|
||||
|
||||
# We have to create separate subtask that contains chord expression
|
||||
# because otherwise chord functions get applied synchronously
|
||||
return chord([tsk['func'].subtask(args=tsk['args'], \
|
||||
kwargs=tsk['kwargs']) for tsk in taskset])(clbk)
|
||||
return chord([
|
||||
tsk['func'].subtask(args=tsk['args'], kwargs=tsk['kwargs']) \
|
||||
for tsk in taskset])(clbk)
|
||||
|
||||
def _get_head_task(self):
|
||||
prev_task = None
|
||||
|
|
|
@ -14,8 +14,8 @@ import tarfile
|
|||
import shutil
|
||||
from django.conf import settings
|
||||
|
||||
from nailgun.models import Cluster, Node, Role, Recipe
|
||||
from nailgun.helpers import SshConnect, DeployGenerator
|
||||
from nailgun.models import Cluster, Node, Role, Com
|
||||
from nailgun.helpers import SshConnect, DeployManager
|
||||
from nailgun.task_helpers import task_with_callbacks, TaskPool, topol_sort
|
||||
from nailgun.exceptions import SSHError, EmptyListError, DeployError
|
||||
from nailgun.provision import ProvisionConfig
|
||||
|
@ -52,38 +52,35 @@ def node_set_error_status(node_id):
|
|||
node.save()
|
||||
|
||||
|
||||
@task_with_callbacks(name='Deploy Cluster')
|
||||
@task_with_callbacks
|
||||
def deploy_cluster(cluster_id):
|
||||
|
||||
graph = {}
|
||||
for recipe in Recipe.objects.filter(
|
||||
recipe__in=DeployGenerator.recipes(cluster_id)):
|
||||
graph[recipe.recipe] = [r.recipe for r in recipe.depends.all()]
|
||||
deploy_manager = DeployManager(cluster_id)
|
||||
release = Cluster.objects.get(id=cluster_id).release
|
||||
logger.debug("deploy_cluster: Cluster release: %s" % release.id)
|
||||
|
||||
# NOTE(mihgen): Installation components dependency resolution
|
||||
# From nodes.roles.recipes we know recipes that needs to be applied
|
||||
# We have to apply them in an order according to specified dependencies
|
||||
# To sort in an order, we use DFS(Depth First Traversal) over DAG graph
|
||||
try:
|
||||
sorted_recipes = topol_sort(graph)
|
||||
except KeyError:
|
||||
raise DeployError("One or more recipes have unresolved dependencies")
|
||||
tree = TaskPool()
|
||||
# first element in sorted_recipes is the first recipe we have to apply
|
||||
for r in sorted_recipes:
|
||||
recipe = Recipe.objects.get(recipe=r)
|
||||
# We need to find nodes with these recipes
|
||||
roles = recipe.roles.all()
|
||||
nodes = Node.objects.filter(roles__in=roles, cluster__id=cluster_id)
|
||||
installed = []
|
||||
logger.debug("deploy_cluster: sorted_components: %s" % \
|
||||
deploy_manager.sorted_components())
|
||||
|
||||
# TODO(mihgen): What if there are no nodes with required role?
|
||||
# we need to raise an exception if there are any roles dependend
|
||||
for component_name in deploy_manager.sorted_components():
|
||||
logger.debug("deploy_cluster: Com: %s" % component_name)
|
||||
component = Com.objects.get(
|
||||
release=release,
|
||||
name=component_name)
|
||||
roles = component.roles.all()
|
||||
nodes = Node.objects.filter(roles__in=roles, cluster__id=cluster_id)
|
||||
|
||||
taskset = []
|
||||
for node in nodes:
|
||||
taskset.append({'func': bootstrap_node, 'args': [node.id],
|
||||
logger.debug("deploy_cluster: task: node: %s com: %s" % \
|
||||
(node.id, component.name))
|
||||
bootstrap_args = [node.id, component.name]
|
||||
taskset.append({'func': bootstrap_node, 'args': bootstrap_args,
|
||||
'kwargs': {}})
|
||||
tree.push_task(create_solo, (cluster_id, recipe.id))
|
||||
|
||||
# FIXME(mihgen): it there are no taskset items,
|
||||
# we included recipes which are not applied on nodes.
|
||||
# We have to include only recipes which are assigned to nodes
|
||||
|
@ -95,40 +92,6 @@ def deploy_cluster(cluster_id):
|
|||
return res
|
||||
|
||||
|
||||
@task_with_callbacks
|
||||
def create_solo(*args):
|
||||
logger = create_solo.get_logger()
|
||||
# FIXME(mihgen):
|
||||
# We have to do this ugly trick because chord precedes first argument
|
||||
if isinstance(args[0], list):
|
||||
args = args[1:]
|
||||
cluster_id, recipe = args[0], Recipe.objects.get(id=args[1])
|
||||
|
||||
# We need to find nodes with these recipes
|
||||
roles = recipe.roles.all()
|
||||
nodes = Node.objects.filter(roles__in=roles)
|
||||
|
||||
for node in nodes:
|
||||
node_solo = {
|
||||
"cluster_id": cluster_id,
|
||||
"run_list": ["recipe[%s]" % recipe.recipe],
|
||||
"components_list": DeployGenerator.components4(cluster_id),
|
||||
"components_dict": DeployGenerator.components4(cluster_id, "dict")
|
||||
}
|
||||
|
||||
# writing to solo
|
||||
with open(
|
||||
os.path.join(
|
||||
settings.CHEF_CONF_FOLDER,
|
||||
"".join([node.id, ".json"])
|
||||
),
|
||||
"w"
|
||||
) as entity:
|
||||
entity.write(json.dumps(node_solo, sort_keys=True, indent=4))
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def tcp_ping(host, port, timeout=5):
|
||||
try:
|
||||
s = socket.create_connection((str(host), int(port)), timeout)
|
||||
|
@ -139,7 +102,7 @@ def tcp_ping(host, port, timeout=5):
|
|||
|
||||
|
||||
@task_with_callbacks
|
||||
def bootstrap_node(node_id):
|
||||
def bootstrap_node(node_id, component_name):
|
||||
|
||||
node = Node.objects.get(id=node_id)
|
||||
|
||||
|
@ -193,7 +156,7 @@ def bootstrap_node(node_id):
|
|||
else:
|
||||
logger.debug("Trying to launch deploy script on node %s" % node_id)
|
||||
# Returns True if succeeded
|
||||
exit_status = ssh.run("/opt/nailgun/bin/deploy")
|
||||
exit_status = ssh.run("/opt/nailgun/bin/deploy %s" % component_name)
|
||||
ssh.close()
|
||||
|
||||
# ssh.run returns True, if command executed successfully
|
||||
|
@ -213,18 +176,25 @@ def bootstrap_node(node_id):
|
|||
|
||||
|
||||
def _is_node_bootstrap(node):
|
||||
ssh_user = 'root'
|
||||
ssh_key = settings.PATH_TO_BOOTSTRAP_SSH_KEY
|
||||
|
||||
logger.debug(
|
||||
"Checking if node %s is booted with bootstrap image" \
|
||||
% node.id
|
||||
)
|
||||
try:
|
||||
logger.debug(
|
||||
"Trying to establish ssh connection using bootstrap key"
|
||||
"Trying to establish ssh connection using bootstrap key" \
|
||||
"ip: %s key: %s user: %s" % \
|
||||
(node.ip,
|
||||
ssh_key,
|
||||
ssh_user)
|
||||
)
|
||||
ssh = SshConnect(
|
||||
node.ip,
|
||||
'root',
|
||||
settings.PATH_TO_BOOTSTRAP_SSH_KEY
|
||||
ssh_user,
|
||||
ssh_key
|
||||
)
|
||||
except (paramiko.AuthenticationException,
|
||||
paramiko.PasswordRequiredException):
|
||||
|
@ -234,6 +204,8 @@ def _is_node_bootstrap(node):
|
|||
logger.debug("Unknown error while ssh using bootstrap rsa key")
|
||||
return False
|
||||
else:
|
||||
logger.debug("Ssh connection succeeded: key: %s" % \
|
||||
ssh_key)
|
||||
ssh.close()
|
||||
return True
|
||||
|
||||
|
@ -272,13 +244,13 @@ def _provision_node(node_id):
|
|||
nd.power = ndp
|
||||
|
||||
logger.debug(
|
||||
"Trying to save node %s into provision system" \
|
||||
% node_id
|
||||
"Trying to save node %s into provision system: profile: %s " % \
|
||||
(node_id, pf.name)
|
||||
)
|
||||
nd.save()
|
||||
|
||||
logger.debug(
|
||||
"Trying to reboot node %s using %s" \
|
||||
% (node_id, ndp.power_type)
|
||||
"Trying to reboot node %s using %s in order to launch provisioning" % \
|
||||
(node_id, ndp.power_type)
|
||||
)
|
||||
nd.power_reboot()
|
||||
|
|
|
@ -8,7 +8,14 @@ from django.core.urlresolvers import reverse, NoReverseMatch
|
|||
|
||||
from piston.emitters import Emitter
|
||||
|
||||
from nailgun.models import Cluster, Node, Recipe, Role, Release, Attribute
|
||||
from nailgun import models
|
||||
from nailgun.models import Cluster
|
||||
from nailgun.models import Node
|
||||
from nailgun.models import Role
|
||||
from nailgun.models import Release
|
||||
from nailgun.models import Com
|
||||
from nailgun.models import Point
|
||||
from nailgun.models import EndPoint
|
||||
from nailgun.api import urls as api_urls
|
||||
from nailgun import tasks
|
||||
|
||||
|
@ -30,54 +37,6 @@ class TestHandlers(TestCase):
|
|||
|
||||
def setUp(self):
|
||||
self.request = http.HttpRequest()
|
||||
self.old_meta = {'block_device': 'value',
|
||||
'interfaces': 'val2',
|
||||
'cpu': 'asf',
|
||||
'memory': 'sd'
|
||||
}
|
||||
|
||||
self.node_name = "test.server.com"
|
||||
|
||||
self.node = Node(id="080000000001",
|
||||
cluster_id=1,
|
||||
name=self.node_name,
|
||||
ip="127.0.0.1",
|
||||
metadata=self.old_meta)
|
||||
self.node.save()
|
||||
|
||||
self.another_node = Node(
|
||||
id="080000000000",
|
||||
name="test2.server.com",
|
||||
ip="127.0.0.2",
|
||||
metadata=self.old_meta)
|
||||
self.another_node.save()
|
||||
|
||||
self.recipe = Recipe()
|
||||
self.recipe.recipe = 'cookbook::recipe@2.1'
|
||||
self.recipe.save()
|
||||
self.second_recipe = Recipe()
|
||||
self.second_recipe.recipe = 'nova::compute@0.1.0'
|
||||
self.second_recipe.save()
|
||||
self.third_recipe = Recipe()
|
||||
self.third_recipe.recipe = 'nova::monitor@0.1.0'
|
||||
self.third_recipe.save()
|
||||
|
||||
self.role = Role()
|
||||
self.role.save()
|
||||
self.role.recipes.add(self.recipe)
|
||||
self.role.name = "My role"
|
||||
self.role.save()
|
||||
|
||||
self.another_role = Role()
|
||||
self.another_role.save()
|
||||
self.another_role.recipes.add(self.recipe)
|
||||
self.another_role.name = "My role 2"
|
||||
self.another_role.save()
|
||||
|
||||
self.node.roles = [self.role]
|
||||
self.node.save()
|
||||
self.node_url = reverse('node_handler',
|
||||
kwargs={'node_id': self.node.id})
|
||||
|
||||
self.new_meta = {'block_device': 'new-val',
|
||||
'interfaces': 'd',
|
||||
|
@ -85,21 +44,19 @@ class TestHandlers(TestCase):
|
|||
'memory': 'a'
|
||||
}
|
||||
|
||||
self.another_cluster = Cluster(id=2,
|
||||
release_id=1,
|
||||
name='Another cluster')
|
||||
self.another_cluster.save()
|
||||
self.clusters = models.Cluster.objects.all()
|
||||
self.releases = models.Release.objects.all()
|
||||
self.roles = models.Role.objects.all()
|
||||
self.nodes = models.Node.objects.all()
|
||||
self.points = models.Point.objects.all()
|
||||
self.com = models.Com.objects.all()
|
||||
self.node_url = reverse('node_handler',
|
||||
kwargs={'node_id': self.nodes[0].id})
|
||||
|
||||
self.meta_json = json.dumps(self.new_meta)
|
||||
|
||||
def tearDown(self):
|
||||
self.another_cluster.delete()
|
||||
self.node.delete()
|
||||
self.role.delete()
|
||||
self.another_role.delete()
|
||||
self.recipe.delete()
|
||||
self.second_recipe.delete()
|
||||
self.third_recipe.delete()
|
||||
pass
|
||||
|
||||
def test_all_api_urls_500(self):
|
||||
test_urls = {}
|
||||
|
@ -107,16 +64,23 @@ class TestHandlers(TestCase):
|
|||
test_urls[pattern.name] = pattern.callback.handler.allowed_methods
|
||||
|
||||
url_ids = {
|
||||
'cluster_handler': {'cluster_id': 1},
|
||||
'cluster_handler': {'cluster_id': self.clusters[0].id},
|
||||
'node_handler': {'node_id': 'A' * 12},
|
||||
'task_handler': {'task_id': 'a' * 36},
|
||||
'network_handler': {'network_id': 1},
|
||||
'release_handler': {'release_id': 1},
|
||||
'role_handler': {'role_id': 1},
|
||||
'node_role_available': {'node_id': 'A' * 12, 'role_id': 1},
|
||||
'recipe_handler': {'recipe_id': 1},
|
||||
'attribute_handler': {'attribute_id': 1},
|
||||
'deployment_type_collection_handler': {'cluster_id': 1},
|
||||
'release_handler': {'release_id': self.releases[0].id},
|
||||
'role_handler': {'role_id': self.roles[0].id},
|
||||
'endpoint_handler': {'node_id': self.nodes[0].id,
|
||||
'component_name': 'abc'},
|
||||
'point_handler': {'point_id': self.points[0].id},
|
||||
'com_handler': {'component_id': self.com[0].id},
|
||||
'node_role_available': {
|
||||
'node_id': 'A' * 12,
|
||||
'role_id': self.roles[0].id
|
||||
},
|
||||
'deployment_type_collection_handler': {
|
||||
'cluster_id': self.clusters[0].id
|
||||
},
|
||||
}
|
||||
|
||||
skip_urls = [
|
||||
|
@ -142,7 +106,7 @@ class TestHandlers(TestCase):
|
|||
json.dumps({
|
||||
'name': yet_another_cluster_name,
|
||||
'release': 1,
|
||||
'nodes': [self.another_node.id],
|
||||
'nodes': [self.nodes[0].id],
|
||||
}),
|
||||
"application/json"
|
||||
)
|
||||
|
@ -152,9 +116,8 @@ class TestHandlers(TestCase):
|
|||
name=yet_another_cluster_name
|
||||
)
|
||||
self.assertEquals(len(clusters_from_db), 1)
|
||||
self.assertEquals(clusters_from_db[0].nodes.all()[0].id,
|
||||
self.another_node.id)
|
||||
cluster = clusters_from_db[0]
|
||||
self.assertEquals(cluster.nodes.all()[0].id, self.nodes[0].id)
|
||||
self.assertEquals(len(cluster.release.networks.all()), 3)
|
||||
# test delete
|
||||
resp = self.client.delete(
|
||||
|
@ -170,7 +133,7 @@ class TestHandlers(TestCase):
|
|||
|
||||
resp = self.client.put(
|
||||
reverse('cluster_handler',
|
||||
kwargs={'cluster_id': self.another_cluster.id}),
|
||||
kwargs={'cluster_id': self.clusters[0].id}),
|
||||
json.dumps({'name': updated_name}),
|
||||
"application/json"
|
||||
)
|
||||
|
@ -186,23 +149,23 @@ class TestHandlers(TestCase):
|
|||
def test_cluster_node_list_update(self):
|
||||
resp = self.client.put(
|
||||
reverse('cluster_handler', kwargs={'cluster_id': 1}),
|
||||
json.dumps({'nodes': [self.node.id]}),
|
||||
json.dumps({'nodes': [self.nodes[0].id]}),
|
||||
"application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
nodes_from_db = Node.objects.filter(cluster_id=1)
|
||||
self.assertEquals(len(nodes_from_db), 1)
|
||||
self.assertEquals(nodes_from_db[0].id, self.node.id)
|
||||
self.assertEquals(nodes_from_db[0].id, self.nodes[0].id)
|
||||
|
||||
resp = self.client.put(
|
||||
reverse('cluster_handler', kwargs={'cluster_id': 1}),
|
||||
json.dumps({'nodes': [self.another_node.id]}),
|
||||
json.dumps({'nodes': [self.nodes[1].id]}),
|
||||
"application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
nodes_from_db = Node.objects.filter(cluster_id=1)
|
||||
self.assertEquals(len(nodes_from_db), 1)
|
||||
self.assertEquals(nodes_from_db[0].id, self.another_node.id)
|
||||
self.assertEquals(nodes_from_db[0].id, self.nodes[1].id)
|
||||
|
||||
def test_node_creation(self):
|
||||
node_id = '080000000003'
|
||||
|
@ -242,7 +205,7 @@ class TestHandlers(TestCase):
|
|||
"application/json")
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
|
||||
nodes_from_db = Node.objects.filter(id=self.node.id)
|
||||
nodes_from_db = Node.objects.filter(id=self.nodes[0].id)
|
||||
self.assertEquals(len(nodes_from_db), 1)
|
||||
self.assertEquals(nodes_from_db[0].metadata, self.new_meta)
|
||||
|
||||
|
@ -255,18 +218,18 @@ class TestHandlers(TestCase):
|
|||
def test_node_valid_list_of_new_roles_gets_updated(self):
|
||||
resp = self.client.put(self.node_url,
|
||||
json.dumps({
|
||||
'new_roles': [self.another_role.id],
|
||||
'new_roles': [self.roles[1].id],
|
||||
'redeployment_needed': True
|
||||
}), "application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
|
||||
node_from_db = Node.objects.get(id=self.node.id)
|
||||
node_from_db = Node.objects.get(id=self.nodes[0].id)
|
||||
self.assertEquals(node_from_db.redeployment_needed, True)
|
||||
self.assertEquals(len(node_from_db.roles.all()), 1)
|
||||
self.assertEquals(len(node_from_db.new_roles.all()), 1)
|
||||
self.assertEquals(node_from_db.new_roles.all()[0].id,
|
||||
self.another_role.id)
|
||||
self.roles[1].id)
|
||||
|
||||
def test_put_returns_400_if_no_body(self):
|
||||
resp = self.client.put(self.node_url, None, "application/json")
|
||||
|
@ -284,157 +247,113 @@ class TestHandlers(TestCase):
|
|||
self.assertEquals(resp.status_code, 400)
|
||||
|
||||
def test_put_returns_400_if_no_block_device_attr(self):
|
||||
meta = self.new_meta.copy()
|
||||
del meta['block_device']
|
||||
old_meta = self.nodes[0].metadata
|
||||
new_meta = self.new_meta.copy()
|
||||
del new_meta['block_device']
|
||||
resp = self.client.put(self.node_url,
|
||||
json.dumps({'metadata': meta}),
|
||||
json.dumps({'metadata': new_meta}),
|
||||
"application/json")
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
|
||||
nodes_from_db = Node.objects.filter(id=self.node.id)
|
||||
self.assertEquals(len(nodes_from_db), 1)
|
||||
self.assertEquals(nodes_from_db[0].metadata, self.old_meta)
|
||||
node_from_db = Node.objects.get(id=self.nodes[0].id)
|
||||
self.assertEquals(node_from_db.metadata, old_meta)
|
||||
|
||||
def test_put_returns_400_if_no_interfaces_attr(self):
|
||||
meta = self.new_meta.copy()
|
||||
del meta['interfaces']
|
||||
old_meta = self.nodes[0].metadata
|
||||
new_meta = self.new_meta.copy()
|
||||
del new_meta['interfaces']
|
||||
resp = self.client.put(self.node_url,
|
||||
json.dumps({'metadata': meta}),
|
||||
json.dumps({'metadata': new_meta}),
|
||||
"application/json")
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
|
||||
nodes_from_db = Node.objects.filter(id=self.node.id)
|
||||
self.assertEquals(len(nodes_from_db), 1)
|
||||
self.assertEquals(nodes_from_db[0].metadata, self.old_meta)
|
||||
node_from_db = Node.objects.get(id=self.nodes[0].id)
|
||||
self.assertEquals(node_from_db.metadata, old_meta)
|
||||
|
||||
def test_put_returns_400_if_interfaces_empty(self):
|
||||
meta = self.new_meta.copy()
|
||||
meta['interfaces'] = ""
|
||||
old_meta = self.nodes[0].metadata
|
||||
new_meta = self.new_meta.copy()
|
||||
new_meta['interfaces'] = ""
|
||||
resp = self.client.put(self.node_url,
|
||||
json.dumps({'metadata': meta}),
|
||||
json.dumps({'metadata': new_meta}),
|
||||
"application/json")
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
|
||||
nodes_from_db = Node.objects.filter(id=self.node.id)
|
||||
self.assertEquals(len(nodes_from_db), 1)
|
||||
self.assertEquals(nodes_from_db[0].metadata, self.old_meta)
|
||||
node_from_db = Node.objects.get(id=self.nodes[0].id)
|
||||
self.assertEquals(node_from_db.metadata, old_meta)
|
||||
|
||||
def test_put_returns_400_if_no_cpu_attr(self):
|
||||
meta = self.new_meta.copy()
|
||||
del meta['cpu']
|
||||
old_meta = self.nodes[0].metadata
|
||||
new_meta = self.new_meta.copy()
|
||||
del new_meta['cpu']
|
||||
resp = self.client.put(self.node_url,
|
||||
json.dumps({'metadata': meta}),
|
||||
json.dumps({'metadata': new_meta}),
|
||||
"application/json")
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
|
||||
nodes_from_db = Node.objects.filter(id=self.node.id)
|
||||
self.assertEquals(len(nodes_from_db), 1)
|
||||
self.assertEquals(nodes_from_db[0].metadata, self.old_meta)
|
||||
node_from_db = Node.objects.get(id=self.nodes[0].id)
|
||||
self.assertEquals(node_from_db.metadata, old_meta)
|
||||
|
||||
def test_put_returns_400_if_no_memory_attr(self):
|
||||
meta = self.new_meta.copy()
|
||||
del meta['memory']
|
||||
old_meta = self.nodes[0].metadata
|
||||
new_meta = self.new_meta.copy()
|
||||
del new_meta['memory']
|
||||
resp = self.client.put(self.node_url,
|
||||
json.dumps({'metadata': meta}),
|
||||
json.dumps({'metadata': new_meta}),
|
||||
"application/json")
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
|
||||
nodes_from_db = Node.objects.filter(id=self.node.id)
|
||||
self.assertEquals(len(nodes_from_db), 1)
|
||||
self.assertEquals(nodes_from_db[0].metadata, self.old_meta)
|
||||
node_from_db = Node.objects.get(id=self.nodes[0].id)
|
||||
self.assertEquals(node_from_db.metadata, old_meta)
|
||||
|
||||
def test_attribute_create(self):
|
||||
resp = self.client.put(
|
||||
reverse('attribute_collection_handler'),
|
||||
json.dumps({
|
||||
'attribute': {'a': 'av'},
|
||||
'cookbook': 'cook_name',
|
||||
'version': '0.1',
|
||||
}), "application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEquals(resp.content, '1')
|
||||
# (mihgen): Disabled - we don't have attributes anymore
|
||||
#def test_attribute_create(self):
|
||||
#resp = self.client.put(
|
||||
#reverse('attribute_collection_handler'),
|
||||
#json.dumps({
|
||||
#'attribute': {'a': 'av'},
|
||||
#'cookbook': 'cook_name',
|
||||
#'version': '0.1',
|
||||
#}), "application/json"
|
||||
#)
|
||||
#self.assertEquals(resp.status_code, 200)
|
||||
#self.assertEquals(resp.content, '1')
|
||||
|
||||
def test_attribute_update(self):
|
||||
resp = self.client.put(
|
||||
reverse('attribute_collection_handler'),
|
||||
json.dumps({
|
||||
'attribute': {'a': 'b'},
|
||||
'cookbook': 'cook',
|
||||
'version': '0.1',
|
||||
}), "application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEquals(resp.content, '1')
|
||||
resp = self.client.put(
|
||||
reverse('attribute_collection_handler'),
|
||||
json.dumps({
|
||||
'attribute': {'a': 'new'},
|
||||
'cookbook': 'cook',
|
||||
'version': '0.1',
|
||||
}), "application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
self.assertEquals(resp.content, '1')
|
||||
attrs = Attribute.objects.all()
|
||||
self.assertEquals(len(attrs), 1)
|
||||
self.assertEquals(attrs[0].attribute, {'a': 'new'})
|
||||
|
||||
def test_recipe_create(self):
|
||||
recipe = 'cookbook::recipe@0.1.0'
|
||||
recipe_depends = [
|
||||
'cookbook2::depend@0.0.1',
|
||||
'cookbook3::other_depend@0.1.0'
|
||||
]
|
||||
resp = self.client.post(
|
||||
reverse('recipe_collection_handler'),
|
||||
json.dumps({
|
||||
'recipe': recipe,
|
||||
'depends': recipe_depends,
|
||||
'attribute': None,
|
||||
}),
|
||||
"application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 200)
|
||||
test_deps = Recipe.objects.filter(recipe__in=recipe_depends)
|
||||
self.assertEquals(len(test_deps), len(recipe_depends))
|
||||
|
||||
# test duplicate
|
||||
resp = self.client.post(
|
||||
reverse('recipe_collection_handler'),
|
||||
json.dumps({
|
||||
'recipe': recipe,
|
||||
'depends': [],
|
||||
}),
|
||||
"application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 409)
|
||||
|
||||
# test wrong format
|
||||
resp = self.client.post(
|
||||
reverse('recipe_collection_handler'),
|
||||
json.dumps({
|
||||
'recipe': 'ololo::onotole',
|
||||
'depends': []
|
||||
}),
|
||||
"application/json"
|
||||
)
|
||||
self.assertEquals(resp.status_code, 400)
|
||||
|
||||
recipe_from_db = Recipe.objects.filter(recipe=recipe)
|
||||
self.assertEquals(len(recipe_from_db), 1)
|
||||
#def test_attribute_update(self):
|
||||
#resp = self.client.put(
|
||||
#reverse('attribute_collection_handler'),
|
||||
#json.dumps({
|
||||
#'attribute': {'a': 'b'},
|
||||
#'cookbook': 'cook',
|
||||
#'version': '0.1',
|
||||
#}), "application/json"
|
||||
#)
|
||||
#self.assertEquals(resp.status_code, 200)
|
||||
#self.assertEquals(resp.content, '1')
|
||||
#resp = self.client.put(
|
||||
#reverse('attribute_collection_handler'),
|
||||
#json.dumps({
|
||||
#'attribute': {'a': 'new'},
|
||||
#'cookbook': 'cook',
|
||||
#'version': '0.1',
|
||||
#}), "application/json"
|
||||
#)
|
||||
#self.assertEquals(resp.status_code, 200)
|
||||
#self.assertEquals(resp.content, '1')
|
||||
#attrs = Attribute.objects.all()
|
||||
#self.assertEquals(len(attrs), 1)
|
||||
#self.assertEquals(attrs[0].attribute, {'a': 'new'})
|
||||
|
||||
def test_role_create(self):
|
||||
role_name = 'My role 3'
|
||||
role_recipes = [
|
||||
'nova::compute@0.1.0',
|
||||
'nova::monitor@0.1.0'
|
||||
]
|
||||
role_release = self.releases[0].id
|
||||
role_components = [c.name for c in self.com]
|
||||
resp = self.client.post(
|
||||
reverse('role_collection_handler'),
|
||||
json.dumps({
|
||||
'name': role_name,
|
||||
'recipes': role_recipes
|
||||
'release': role_release,
|
||||
'components': role_components
|
||||
}),
|
||||
"application/json"
|
||||
)
|
||||
|
@ -442,8 +361,8 @@ class TestHandlers(TestCase):
|
|||
|
||||
roles_from_db = Role.objects.filter(name=role_name)
|
||||
self.assertEquals(len(roles_from_db), 1)
|
||||
recipes = [r.recipe for r in roles_from_db[0].recipes.all()]
|
||||
self.assertEquals(set(role_recipes), set(recipes))
|
||||
components = [c.name for c in roles_from_db[0].components.all()]
|
||||
self.assertEquals(set(role_components), set(components))
|
||||
|
||||
@mock.patch('nailgun.tasks.deploy_cluster', celery.task.task(lambda: True))
|
||||
def test_jsons_created_for_chef_solo(self):
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from django.test import TestCase
|
||||
|
||||
from nailgun.models import Node, Role, Recipe
|
||||
from nailgun.models import Node, Role
|
||||
|
||||
|
||||
class TestNodeModel(TestCase):
|
||||
|
@ -22,41 +22,3 @@ class TestNodeModel(TestCase):
|
|||
self.assertEquals(all_nodes[0].cluster_id, 1)
|
||||
self.assertEquals(all_nodes[0].metadata,
|
||||
{'metakey': 'metavalue'})
|
||||
|
||||
|
||||
class TestRolesNodesAssociation(TestCase):
|
||||
|
||||
def test_roles_nodes_association(self):
|
||||
recipe = Recipe()
|
||||
recipe.recipe = 'cookbook@version::recipe'
|
||||
recipe.save()
|
||||
|
||||
role1 = Role()
|
||||
role1.save()
|
||||
role1.recipes.add(recipe)
|
||||
role1.name = "My role"
|
||||
role1.save()
|
||||
|
||||
role2 = Role()
|
||||
role2.save()
|
||||
role2.recipes.add(recipe)
|
||||
role2.name = "My role 2"
|
||||
role2.save()
|
||||
|
||||
node1 = Node()
|
||||
node1.id = "080000000001"
|
||||
node1.cluster_id = 1
|
||||
node1.name = "test.example.com"
|
||||
node1.save()
|
||||
node1.roles = [role1]
|
||||
node1.save()
|
||||
self.assertEquals(node1.roles.all()[0].id, 1)
|
||||
self.assertEquals(role1.nodes.all()[0].id, "080000000001")
|
||||
self.assertEquals(role1.nodes.all()[0].name, "test.example.com")
|
||||
|
||||
node1.roles.add(role2)
|
||||
self.assertEquals(len(node1.roles.all()), 2)
|
||||
|
||||
self.assertEquals(Node.objects.filter(
|
||||
roles__name__startswith="My r")[0].id,
|
||||
"080000000001")
|
||||
|
|
|
@ -12,7 +12,6 @@ from nailgun import tasks
|
|||
from nailgun import models
|
||||
from nailgun import exceptions
|
||||
from nailgun import task_helpers
|
||||
from nailgun.helpers import DeployGenerator
|
||||
|
||||
|
||||
class TestTasks(TestCase):
|
||||
|
@ -20,30 +19,15 @@ class TestTasks(TestCase):
|
|||
fixtures = ['default_cluster']
|
||||
|
||||
def setUp(self):
|
||||
self.node = models.Node(id="080000000001",
|
||||
cluster_id=1,
|
||||
name="test.example.com",
|
||||
ip="127.0.0.1",
|
||||
metadata={})
|
||||
self.node.save()
|
||||
|
||||
self.recipe = models.Recipe()
|
||||
self.recipe.recipe = 'cookbook::recipe@2.1'
|
||||
self.recipe.save()
|
||||
|
||||
self.role = models.Role()
|
||||
self.role.save()
|
||||
self.role.recipes = [self.recipe]
|
||||
self.role.name = "My role"
|
||||
self.role.save()
|
||||
|
||||
self.node.roles = [self.role]
|
||||
self.node.save()
|
||||
self.cluster = models.Cluster.objects.get(pk=1)
|
||||
self.nodes = models.Node.objects.all()
|
||||
self.node = self.nodes[0]
|
||||
self.components = models.Com.objects.all()
|
||||
self.component = self.components[0]
|
||||
self.roles = models.Role.objects.all()
|
||||
|
||||
def tearDown(self):
|
||||
self.node.delete()
|
||||
self.role.delete()
|
||||
self.recipe.delete()
|
||||
pass
|
||||
|
||||
@mock.patch('nailgun.tasks.tcp_ping')
|
||||
@mock.patch('nailgun.tasks.SshConnect')
|
||||
|
@ -55,7 +39,7 @@ class TestTasks(TestCase):
|
|||
tp_mock.return_value = True
|
||||
|
||||
self.assertEquals(self.node.status, "ready")
|
||||
res = tasks.bootstrap_node.delay(self.node.id)
|
||||
res = tasks.bootstrap_node.delay(self.node.id, self.component.name)
|
||||
self.assertEquals(res.state, "SUCCESS")
|
||||
node = models.Node.objects.get(id=self.node.id)
|
||||
self.assertEquals(node.status, "ready")
|
||||
|
@ -70,11 +54,11 @@ class TestTasks(TestCase):
|
|||
self.node.save()
|
||||
|
||||
tasks._provision_node = mock.MagicMock(return_value=None)
|
||||
tasks.bootstrap_node(self.node.id)
|
||||
tasks.bootstrap_node(self.node.id, self.component.name)
|
||||
self.assertEquals(tasks._provision_node.call_args_list,
|
||||
[call(self.node.id)])
|
||||
self.assertEquals(ssh.run.call_args_list,
|
||||
[call('/opt/nailgun/bin/deploy')])
|
||||
[call('/opt/nailgun/bin/deploy %s' % self.component.name)])
|
||||
|
||||
@mock.patch('nailgun.tasks.tcp_ping')
|
||||
@mock.patch('nailgun.tasks.SshConnect')
|
||||
|
@ -84,7 +68,7 @@ class TestTasks(TestCase):
|
|||
tp_mock.return_value = True
|
||||
tasks._provision_node = mock.MagicMock(return_value=None)
|
||||
|
||||
tasks.bootstrap_node(self.node.id)
|
||||
tasks.bootstrap_node(self.node.id, self.component.name)
|
||||
self.assertEquals(tasks._provision_node.call_args_list, [])
|
||||
|
||||
@mock.patch('nailgun.tasks.tcp_ping')
|
||||
|
@ -97,7 +81,7 @@ class TestTasks(TestCase):
|
|||
tp_mock.return_value = True
|
||||
|
||||
with self.assertRaises(exceptions.DeployError):
|
||||
tasks.bootstrap_node(self.node.id)
|
||||
tasks.bootstrap_node(self.node.id, self.component.name)
|
||||
|
||||
@mock.patch('nailgun.tasks.tcp_ping')
|
||||
@mock.patch('nailgun.tasks.SshConnect')
|
||||
|
@ -109,7 +93,7 @@ class TestTasks(TestCase):
|
|||
tp_mock.return_value = True
|
||||
|
||||
self.assertEquals(self.node.status, "ready")
|
||||
res = tasks.bootstrap_node.delay(self.node.id)
|
||||
res = tasks.bootstrap_node.delay(self.node.id, self.component.name)
|
||||
self.assertEquals(res.state, "FAILURE")
|
||||
self.assertIsInstance(res.result, exceptions.DeployError)
|
||||
self.assertTrue(res.ready)
|
||||
|
@ -118,107 +102,113 @@ class TestTasks(TestCase):
|
|||
|
||||
@mock.patch('nailgun.tasks.TaskPool')
|
||||
def test_one_recipe_deploy_cluster(self, tp):
|
||||
tasks.deploy_cluster('1')
|
||||
expected = [
|
||||
call(),
|
||||
call().push_task(tasks.create_solo, ('1', self.recipe.id)),
|
||||
call().push_task([{'args': [self.node.id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
call().push_task(tasks.update_cluster_status, ('1',)),
|
||||
call().apply_async()
|
||||
]
|
||||
tasks.deploy_cluster(self.cluster.id)
|
||||
expected = [call()]
|
||||
for node in self.cluster.nodes.all():
|
||||
for role in node.roles.all():
|
||||
for component in role.components.all():
|
||||
expected.append(call().push_task([{
|
||||
'args': [node.id, component.name],
|
||||
'func': tasks.bootstrap_node,
|
||||
'kwargs': {}
|
||||
}]))
|
||||
expected.append(call().push_task(tasks.update_cluster_status,
|
||||
(self.cluster.id,)))
|
||||
expected.append(call().apply_async())
|
||||
self.assertEquals(tasks.TaskPool.mock_calls, expected)
|
||||
|
||||
@mock.patch('nailgun.tasks.TaskPool')
|
||||
def test_deploy_cluster_with_recipe_deps(self, tp):
|
||||
# 0: 1,2; 1: 2; 2: ; 3: 2
|
||||
# Rigth order: 2,1,0,3
|
||||
rcps = [models.Recipe() for x in range(4)]
|
||||
for i, rec in enumerate(rcps):
|
||||
rec.recipe = 'cookbook::recipe%s@0.1' % i
|
||||
rec.save()
|
||||
# FIXME(vkramskikh): recipe test, rework using components and points
|
||||
# @mock.patch('nailgun.tasks.TaskPool')
|
||||
# def test_deploy_cluster_with_recipe_deps(self, tp):
|
||||
# # 0: 1,2; 1: 2; 2: ; 3: 2
|
||||
# # Rigth order: 2,1,0,3
|
||||
# rcps = [models.Recipe() for x in range(4)]
|
||||
# for i, rec in enumerate(rcps):
|
||||
# rec.recipe = 'cookbook::recipe%s@0.1' % i
|
||||
# rec.save()
|
||||
#
|
||||
# rcps[0].depends = [rcps[1], rcps[2]]
|
||||
# rcps[1].depends = [rcps[2]]
|
||||
# rcps[2].depends = []
|
||||
# rcps[3].depends = [rcps[2]]
|
||||
# map(lambda r: r.save(), rcps)
|
||||
#
|
||||
# roles = [models.Role() for x in range(3)]
|
||||
# for i, role in enumerate(roles):
|
||||
# role.name = "Role%s" % i
|
||||
# role.save()
|
||||
#
|
||||
# roles[0].recipes = [rcps[0], rcps[2]]
|
||||
# roles[1].recipes = [rcps[3]]
|
||||
# roles[2].recipes = [rcps[1]]
|
||||
# map(lambda r: r.save(), roles)
|
||||
#
|
||||
# nodes = [models.Node() for x in range(2)]
|
||||
# for i, node in enumerate(nodes):
|
||||
# node.name = "Node-%s" % i
|
||||
# node.id = "FF000000000%s" % i
|
||||
# node.ip = "127.0.0.%s" % i
|
||||
# node.cluster_id = 1
|
||||
# node.save()
|
||||
# nodes[0].roles = [roles[0]]
|
||||
# nodes[1].roles = [roles[1], roles[2]]
|
||||
#
|
||||
# tasks.deploy_cluster('1')
|
||||
# expected = [
|
||||
# # init
|
||||
# call(),
|
||||
# # first recipe, no deps, defined in setUp
|
||||
# call().push_task(tasks.create_solo, ('1', self.recipe.id)),
|
||||
# call().push_task([{'args': [self.node.id, self.component.name],
|
||||
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
# # Applying in order 2-> 1-> 0-> 3
|
||||
# call().push_task(tasks.create_solo, ('1', rcps[2].id)),
|
||||
# call().push_task([{'args': [nodes[0].id, self.component.name],
|
||||
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
# call().push_task(tasks.create_solo, ('1', rcps[1].id)),
|
||||
# call().push_task([{'args': [nodes[1].id, self.component.name],
|
||||
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
# call().push_task(tasks.create_solo, ('1', rcps[0].id)),
|
||||
# call().push_task([{'args': [nodes[0].id, self.component.name],
|
||||
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
# call().push_task(tasks.create_solo, ('1', rcps[3].id)),
|
||||
# call().push_task([{'args': [nodes[1].id, self.component.name],
|
||||
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
# # Last task for chord to succeed
|
||||
# call().push_task(tasks.update_cluster_status, ('1',)),
|
||||
# call().apply_async()
|
||||
# ]
|
||||
# self.assertEquals(tasks.TaskPool.mock_calls, expected)
|
||||
|
||||
rcps[0].depends = [rcps[1], rcps[2]]
|
||||
rcps[1].depends = [rcps[2]]
|
||||
rcps[2].depends = []
|
||||
rcps[3].depends = [rcps[2]]
|
||||
map(lambda r: r.save(), rcps)
|
||||
|
||||
roles = [models.Role() for x in range(3)]
|
||||
for i, role in enumerate(roles):
|
||||
role.name = "Role%s" % i
|
||||
role.save()
|
||||
|
||||
roles[0].recipes = [rcps[0], rcps[2]]
|
||||
roles[1].recipes = [rcps[3]]
|
||||
roles[2].recipes = [rcps[1]]
|
||||
map(lambda r: r.save(), roles)
|
||||
|
||||
nodes = [models.Node() for x in range(2)]
|
||||
for i, node in enumerate(nodes):
|
||||
node.name = "Node-%s" % i
|
||||
node.id = "FF000000000%s" % i
|
||||
node.ip = "127.0.0.%s" % i
|
||||
node.cluster_id = 1
|
||||
node.save()
|
||||
nodes[0].roles = [roles[0]]
|
||||
nodes[1].roles = [roles[1], roles[2]]
|
||||
|
||||
tasks.deploy_cluster('1')
|
||||
expected = [
|
||||
# init
|
||||
call(),
|
||||
# first recipe, no deps, defined in setUp
|
||||
call().push_task(tasks.create_solo, ('1', self.recipe.id)),
|
||||
call().push_task([{'args': [self.node.id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
# Applying in order 2-> 1-> 0-> 3
|
||||
call().push_task(tasks.create_solo, ('1', rcps[2].id)),
|
||||
call().push_task([{'args': [nodes[0].id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
call().push_task(tasks.create_solo, ('1', rcps[1].id)),
|
||||
call().push_task([{'args': [nodes[1].id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
call().push_task(tasks.create_solo, ('1', rcps[0].id)),
|
||||
call().push_task([{'args': [nodes[0].id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
call().push_task(tasks.create_solo, ('1', rcps[3].id)),
|
||||
call().push_task([{'args': [nodes[1].id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
# Last task for chord to succeed
|
||||
call().push_task(tasks.update_cluster_status, ('1',)),
|
||||
call().apply_async()
|
||||
]
|
||||
self.assertEquals(tasks.TaskPool.mock_calls, expected)
|
||||
|
||||
def test_deploy_cluster_error_when_recipe_not_in_cluster(self):
|
||||
rcps = [models.Recipe() for x in range(4)]
|
||||
for i, rec in enumerate(rcps):
|
||||
rec.recipe = 'cookbook::recipe%s@0.1' % i
|
||||
rec.save()
|
||||
rcps[0].depends = [rcps[1], rcps[2]]
|
||||
rcps[1].depends = [rcps[2]]
|
||||
rcps[2].depends = [rcps[3]]
|
||||
rcps[3].depends = []
|
||||
map(lambda r: r.save(), rcps)
|
||||
|
||||
roles = [models.Role() for x in range(3)]
|
||||
for i, role in enumerate(roles):
|
||||
role.name = "Role%s" % i
|
||||
role.save()
|
||||
|
||||
roles[0].recipes = [rcps[0], rcps[3]]
|
||||
roles[1].recipes = [rcps[2]]
|
||||
map(lambda r: r.save(), roles)
|
||||
self.node.roles = roles
|
||||
self.node.save()
|
||||
|
||||
graph = {}
|
||||
for recipe in models.Recipe.objects.filter(
|
||||
recipe__in=DeployGenerator.recipes(1)):
|
||||
graph[recipe.recipe] = [r.recipe for r in recipe.depends.all()]
|
||||
|
||||
self.assertRaises(exceptions.DeployError, tasks.deploy_cluster, '1')
|
||||
# FIXME(vkramskikh): recipe test, rework using components and points
|
||||
# def test_deploy_cluster_error_when_recipe_not_in_cluster(self):
|
||||
# rcps = [models.Recipe() for x in range(4)]
|
||||
# for i, rec in enumerate(rcps):
|
||||
# rec.recipe = 'cookbook::recipe%s@0.1' % i
|
||||
# rec.save()
|
||||
# rcps[0].depends = [rcps[1], rcps[2]]
|
||||
# rcps[1].depends = [rcps[2]]
|
||||
# rcps[2].depends = [rcps[3]]
|
||||
# rcps[3].depends = []
|
||||
# map(lambda r: r.save(), rcps)
|
||||
#
|
||||
# roles = [models.Role() for x in range(3)]
|
||||
# for i, role in enumerate(roles):
|
||||
# role.name = "Role%s" % i
|
||||
# role.save()
|
||||
#
|
||||
# roles[0].recipes = [rcps[0], rcps[3]]
|
||||
# roles[1].recipes = [rcps[2]]
|
||||
# map(lambda r: r.save(), roles)
|
||||
# self.node.roles = roles
|
||||
# self.node.save()
|
||||
#
|
||||
# graph = {}
|
||||
# for recipe in models.Recipe.objects.filter(
|
||||
# recipe__in=DeployGenerator.recipes(1)):
|
||||
# graph[recipe.recipe] = [r.recipe for r in recipe.depends.all()]
|
||||
#
|
||||
# self.assertRaises(exceptions.DeployError, tasks.deploy_cluster, '1')
|
||||
|
||||
@mock.patch('nailgun.tasks.TaskPool')
|
||||
def test_deploy_cluster_takes_right_cluster(self, tp):
|
||||
|
@ -228,71 +218,44 @@ class TestTasks(TestCase):
|
|||
# It will be node from other cluster
|
||||
node.cluster_id = 2
|
||||
node.save()
|
||||
node.roles = [self.role]
|
||||
node.roles = [self.roles[0]]
|
||||
node.save()
|
||||
|
||||
tasks.deploy_cluster('1')
|
||||
expected = [
|
||||
call(),
|
||||
call().push_task(tasks.create_solo, ('1', self.recipe.id)),
|
||||
call().push_task([{'args': [self.node.id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
call().push_task(tasks.update_cluster_status, ('1',)),
|
||||
call().apply_async()
|
||||
]
|
||||
tasks.deploy_cluster(self.cluster.id)
|
||||
expected = [call()]
|
||||
for node in self.cluster.nodes.all():
|
||||
for role in node.roles.all():
|
||||
for component in role.components.all():
|
||||
expected.append(call().push_task([{
|
||||
'args': [node.id, component.name],
|
||||
'func': tasks.bootstrap_node,
|
||||
'kwargs': {}
|
||||
}]))
|
||||
expected.append(call().push_task(tasks.update_cluster_status,
|
||||
(self.cluster.id,)))
|
||||
expected.append(call().apply_async())
|
||||
self.assertEquals(tasks.TaskPool.mock_calls, expected)
|
||||
|
||||
@mock.patch('nailgun.tasks.TaskPool')
|
||||
def test_deploy_cluster_nodes_with_same_recipes_generates_group(self, tp):
|
||||
# Adding second node with same recipes/roles
|
||||
node = models.Node()
|
||||
node.id = "FFF000000007"
|
||||
node.ip = "127.0.0.1"
|
||||
node.cluster_id = 1
|
||||
node.save()
|
||||
node.roles = [self.role]
|
||||
node.save()
|
||||
|
||||
tasks.deploy_cluster('1')
|
||||
expected = [
|
||||
call(),
|
||||
call().push_task(tasks.create_solo, ('1', self.recipe.id)),
|
||||
call().push_task([{'args': [self.node.id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}},
|
||||
{'args': [node.id],
|
||||
'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
call().push_task(tasks.update_cluster_status, ('1',)),
|
||||
call().apply_async()
|
||||
]
|
||||
self.assertEquals(tasks.TaskPool.mock_calls, expected)
|
||||
|
||||
def test_create_solo_task(self):
|
||||
dummy_list = []
|
||||
tasks.create_solo(dummy_list, 1, self.recipe.id)
|
||||
with open(
|
||||
os.path.join(
|
||||
settings.CHEF_CONF_FOLDER,
|
||||
"".join([self.node.id, ".json"])
|
||||
),
|
||||
"r"
|
||||
) as entity:
|
||||
solo = entity.read()
|
||||
|
||||
expected = {
|
||||
"cluster_id": 1,
|
||||
"components_dict": {
|
||||
self.node.id: {
|
||||
"node_ips": {}
|
||||
}
|
||||
},
|
||||
"components_list": [
|
||||
{
|
||||
"node_id": self.node.id,
|
||||
"node_ips": {}
|
||||
}
|
||||
],
|
||||
"run_list": [
|
||||
"recipe[%s]" % self.recipe.recipe
|
||||
]
|
||||
}
|
||||
self.assertEquals(expected, json.loads(solo))
|
||||
# FIXME(vkramskikh): recipe test, rework using components
|
||||
# def test_deploy_cluster_nodes_with_same_recipes_generates_group(self, tp):
|
||||
# # Adding second node with same recipes/roles
|
||||
# node = models.Node()
|
||||
# node.id = "FFF000000007"
|
||||
# node.ip = "127.0.0.1"
|
||||
# node.cluster_id = 1
|
||||
# node.save()
|
||||
# node.roles = [self.role]
|
||||
# node.save()
|
||||
#
|
||||
# tasks.deploy_cluster('1')
|
||||
# expected = [
|
||||
# call(),
|
||||
# call().push_task(tasks.create_solo, ('1', self.recipe.id)),
|
||||
# call().push_task([{'args': [self.node.id, self.component.name],
|
||||
# 'func': tasks.bootstrap_node, 'kwargs': {}},
|
||||
# {'args': [node.id, self.component.name],
|
||||
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
|
||||
# call().push_task(tasks.update_cluster_status, ('1',)),
|
||||
# call().apply_async()
|
||||
# ]
|
||||
#
|
||||
|
|
|
@ -67,7 +67,8 @@ fi
|
|||
|
||||
function run_tests {
|
||||
clean
|
||||
python manage.py test nailgun $noseopts $noseargs
|
||||
[ -z "$noseargs" ] && test_args=nailgun || test_args="$noseargs"
|
||||
python manage.py test $noseopts $test_args
|
||||
}
|
||||
|
||||
run_tests || exit 1
|
||||
|
|
|
@ -63,11 +63,11 @@ baseurl=http://download.fedoraproject.org/pub/epel/$(CENTOS_62_MAJOR)/$(CENTOS_6
|
|||
enabled=1
|
||||
gpgcheck=0
|
||||
|
||||
[mirantis]
|
||||
name=Mirantis Packages for CentOS
|
||||
baseurl=http://moc-ci.srt.mirantis.net/rpm
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
# [mirantis]
|
||||
# name=Mirantis Packages for CentOS
|
||||
# baseurl=http://moc-ci.srt.mirantis.net/rpm
|
||||
# enabled=1
|
||||
# gpgcheck=0
|
||||
endef
|
||||
|
||||
$/etc/yum.repos.d/base.repo: export contents:=$(yum_base_repo)
|
||||
|
|
|
@ -1,31 +1,30 @@
|
|||
apache2
|
||||
apt-utils
|
||||
chef
|
||||
cloud-init
|
||||
cobbler
|
||||
cobbler-web
|
||||
dnsmasq
|
||||
tftpd-hpa
|
||||
apt-utils
|
||||
rsync
|
||||
apache2
|
||||
libapache2-mod-wsgi
|
||||
python-dev
|
||||
python-django
|
||||
libapache2-svn
|
||||
python-ipaddr
|
||||
createrepo
|
||||
debconf-utils
|
||||
dnsmasq
|
||||
libapache2-mod-wsgi
|
||||
libapache2-svn
|
||||
libhttpclient-ruby
|
||||
libjson-ruby
|
||||
runit
|
||||
redis-server
|
||||
python-pip
|
||||
python-paramiko
|
||||
python-virtualenv
|
||||
python-dev
|
||||
libxml2-dev
|
||||
cloud-init
|
||||
ntp
|
||||
ohai
|
||||
python-dev
|
||||
python-django
|
||||
python-ipaddr
|
||||
python-paramiko
|
||||
python-pip
|
||||
python-virtualenv
|
||||
redis-server
|
||||
rsync
|
||||
rubygems
|
||||
ruby-json
|
||||
runit
|
||||
tftpd-hpa
|
||||
yum
|
||||
yum-utils
|
||||
createrepo
|
||||
ntp
|
||||
|
|
|
@ -1,24 +1,29 @@
|
|||
ruby
|
||||
ruby-devel
|
||||
ruby-ri
|
||||
ruby-rdoc
|
||||
ruby-shadow
|
||||
ruby-mysql
|
||||
gcc
|
||||
gcc-c++
|
||||
automake
|
||||
autoconf
|
||||
make
|
||||
automake
|
||||
cronie
|
||||
crontabs
|
||||
curl
|
||||
dmidecode
|
||||
rubygems
|
||||
openssh-server
|
||||
gcc
|
||||
gcc-c++
|
||||
make
|
||||
mysql
|
||||
mysql-server
|
||||
rabbitmq-server
|
||||
openstack-keystone
|
||||
openstack-glance
|
||||
wget
|
||||
crontabs
|
||||
cronie
|
||||
ntp
|
||||
numpy
|
||||
openssh-server
|
||||
openstack-dashboard
|
||||
openstack-glance
|
||||
openstack-keystone
|
||||
openstack-nova
|
||||
openstack-nova-novncproxy
|
||||
novnc
|
||||
rabbitmq-server
|
||||
ruby
|
||||
ruby-devel
|
||||
rubygems
|
||||
ruby-mysql
|
||||
ruby-rdoc
|
||||
ruby-ri
|
||||
ruby-shadow
|
||||
wget
|
||||
|
|
|
@ -7,22 +7,103 @@
|
|||
{"name": "management", "access": "private"},
|
||||
{"name": "storage", "access": "private"}
|
||||
],
|
||||
"roles": [{
|
||||
"name": "role1",
|
||||
"recipes": [
|
||||
"sample-cook::compute@0.3.0",
|
||||
"sample-cook::monitor@0.3.0"
|
||||
]
|
||||
},{
|
||||
"name": "role2",
|
||||
"recipes": [
|
||||
"sample-cook::default@0.3.0"
|
||||
]
|
||||
},{
|
||||
"name": "mysql_server",
|
||||
"recipes": [
|
||||
"mysql::server@0.1.0"
|
||||
]
|
||||
}
|
||||
"roles": [
|
||||
{
|
||||
"name": "role1",
|
||||
"components": [
|
||||
"sample_compute",
|
||||
"sample_monitor"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "role2",
|
||||
"components": [
|
||||
"sample_default"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "mysql_server",
|
||||
"components": [
|
||||
"mysql_server"
|
||||
]
|
||||
}
|
||||
],
|
||||
"components": [
|
||||
{
|
||||
"name": "sample_default",
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[sample-cook::default@0.3.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": ["default_endpoint"],
|
||||
"requires": ["monitor_endpoint"]
|
||||
},
|
||||
{
|
||||
"name": "sample_compute",
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[sample-cook::compute@0.3.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": ["compute_endpoint"],
|
||||
"requires": ["default_endpoint", "monitor_endpoint"]
|
||||
},
|
||||
{
|
||||
"name": "sample_monitor",
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[sample-cook::monitor@0.3.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": ["monitor_endpoint"],
|
||||
"requires": []
|
||||
},
|
||||
{
|
||||
"name": "mysql_server",
|
||||
"deploy": {
|
||||
"driver": "chef-solo",
|
||||
"driver_args": {
|
||||
"run_list": [
|
||||
"recipe[mysql::server@0.1.0]"
|
||||
]
|
||||
}
|
||||
},
|
||||
"provides": ["mysql_endpoint"],
|
||||
"requires": []
|
||||
}
|
||||
|
||||
],
|
||||
"points": [
|
||||
{
|
||||
"name": "mysql_endpoint",
|
||||
"scheme": {
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "default_endpoint",
|
||||
"scheme": {
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "monitor_endpoint",
|
||||
"scheme": {
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "compute_endpoint",
|
||||
"scheme": {
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import logging
|
|||
import time
|
||||
import json
|
||||
import urllib2
|
||||
import pprint
|
||||
from unittest import TestCase
|
||||
from subprocess import Popen, PIPE
|
||||
|
||||
|
@ -39,6 +40,7 @@ class TestNode(TestCase):
|
|||
self.slave_host = None
|
||||
self.slave_user = "root"
|
||||
self.slave_passwd = "r00tme"
|
||||
self.release_id = None
|
||||
|
||||
def setUp(self):
|
||||
admin_node = ci.environment.node['admin']
|
||||
|
@ -64,14 +66,46 @@ class TestNode(TestCase):
|
|||
COOKBOOKS_PATH,
|
||||
SAMPLE_REMOTE_PATH
|
||||
)
|
||||
|
||||
attempts = 0
|
||||
while True:
|
||||
releases = json.loads(self.client.get(
|
||||
"http://%s:8000/api/releases/" % self.admin_host
|
||||
))
|
||||
|
||||
for r in releases:
|
||||
logging.debug("Found release name: %s" % r["name"])
|
||||
if r["name"] == "Sample release":
|
||||
logging.debug("Sample release id: %s" % r["id"])
|
||||
self.release_id = r["id"]
|
||||
break
|
||||
|
||||
if self.release_id:
|
||||
break
|
||||
|
||||
if attempts >= 1:
|
||||
raise Exception("Release is not found")
|
||||
|
||||
logging.error("Sample release is not found. Trying to upload")
|
||||
with self.remote.sudo:
|
||||
cmd = "/opt/nailgun/bin/create_release -f %s" % \
|
||||
release_remote_path
|
||||
logging.info("Launching command: %s" % cmd)
|
||||
res = self.remote.execute(cmd)
|
||||
if res['exit_status'] != 0:
|
||||
self.remote.disconnect()
|
||||
raise Exception("Command failed: %s" % str(res))
|
||||
attempts += 1
|
||||
|
||||
|
||||
commands = [
|
||||
"/opt/nailgun/bin/install_cookbook %s" % cookbook_remote_path,
|
||||
"/opt/nailgun/bin/create_release %s" % release_remote_path
|
||||
"/opt/nailgun/bin/install_cookbook %s" % cookbook_remote_path
|
||||
]
|
||||
logging.info("Loading cookbooks to database...")
|
||||
with self.remote.sudo:
|
||||
for cmd in commands:
|
||||
logging.info("Launching command: %s" % cmd)
|
||||
res = self.remote.execute(cmd)
|
||||
logging.debug("Command result: %s" % pprint.pformat(res))
|
||||
if res['exit_status'] != 0:
|
||||
self.remote.disconnect()
|
||||
raise Exception("Command failed: %s" % str(res))
|
||||
|
@ -114,7 +148,8 @@ class TestNode(TestCase):
|
|||
logging.info("No clusters found - creating test cluster...")
|
||||
cluster = self.client.post(
|
||||
"http://%s:8000/api/clusters" % self.admin_host,
|
||||
data='{ "name": "MyOwnPrivateCluster", "release": 1 }'
|
||||
data='{ "name": "MyOwnPrivateCluster", "release": %s }' % \
|
||||
self.release_id
|
||||
)
|
||||
cluster = json.loads(cluster)
|
||||
|
||||
|
@ -130,12 +165,16 @@ class TestNode(TestCase):
|
|||
raise ValueError("Failed to add node into cluster")
|
||||
|
||||
roles_uploaded = json.loads(self.client.get(
|
||||
"http://%s:8000/api/roles/" % self.admin_host
|
||||
"http://%s:8000/api/roles?release_id=%s" % \
|
||||
(self.admin_host, self.release_id)
|
||||
))
|
||||
|
||||
"""
|
||||
FIXME
|
||||
WILL BE CHANGED WHEN RENDERING WILL BE REWRITTEN
|
||||
"""
|
||||
roles_ids = [
|
||||
role["id"] for role in roles_uploaded \
|
||||
if role["recipes"][0].startswith("sample-cook") \
|
||||
or role["recipes"][0].startswith("mysql")
|
||||
role["id"] for role in roles_uploaded
|
||||
]
|
||||
|
||||
resp = json.loads(self.client.put(
|
||||
|
@ -153,7 +192,8 @@ class TestNode(TestCase):
|
|||
|
||||
logging.info("Provisioning...")
|
||||
task = json.loads(self.client.put(
|
||||
"http://%s:8000/api/clusters/1/changes/" % self.admin_host
|
||||
"http://%s:8000/api/clusters/1/changes/" % self.admin_host,
|
||||
log=True
|
||||
))
|
||||
task_id = task['task_id']
|
||||
logging.info("Task created: %s" % task_id)
|
||||
|
|
Loading…
Reference in New Issue