nailgun reborn

This commit is contained in:
Nikolay Markov 2012-09-03 12:47:29 +04:00 committed by BeachHead Jenkins CI
parent 62d62de2c3
commit 6b2e54eae6
121 changed files with 1293 additions and 5128 deletions

20
nailgun/api/fields.py Normal file
View File

@ -0,0 +1,20 @@
# -*- coding: utf-8 -*-
import json
import sqlalchemy.types as types
class JSON(types.TypeDecorator):
impl = types.Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value

349
nailgun/api/handlers.py Normal file
View File

@ -0,0 +1,349 @@
# -*- coding: utf-8 -*-
import json
import logging
import web
import ipaddr
import settings
from helpers.vlan import VlanManager
from api.models import Release, Cluster, Node, Role, Network
def check_client_content_type(handler):
content_type = web.ctx.env.get("CONTENT_TYPE", "application/json")
if content_type != "application/json" \
and web.ctx.path.startswith("/api"):
raise web.unsupportedmediatype
return handler()
class JSONHandler(object):
fields = []
@classmethod
def render(cls, instance, fields=None):
json_data = {}
use_fields = fields if fields else cls.fields
for field in use_fields:
json_data[field] = getattr(instance, field)
return json_data
class ClusterHandler(JSONHandler):
fields = (
"id",
"name",
"release_id"
)
@classmethod
def render(cls, instance, fields=None):
json_data = JSONHandler.render(instance, fields=cls.fields)
json_data["nodes"] = map(
NodeHandler.render,
instance.nodes
)
return json_data
def GET(self, cluster_id):
web.header('Content-Type', 'application/json')
q = web.ctx.orm.query(Cluster)
cluster = q.filter(Cluster.id == cluster_id).first()
if not cluster:
return web.notfound()
return json.dumps(
self.render(cluster),
indent=4
)
def PUT(self, cluster_id):
web.header('Content-Type', 'application/json')
q = web.ctx.orm.query(Cluster).filter(Cluster.id == cluster_id)
cluster = q.first()
if not cluster:
return web.notfound()
# additional validation needed?
data = Cluster.validate_json(web.data())
# /additional validation needed?
for key, value in data.iteritems():
if key == "nodes":
nodes = web.ctx.orm.query(Node).filter(
Node.id.in_(value)
)
map(cluster.nodes.append, nodes)
else:
setattr(cluster, key, value)
web.ctx.orm.add(cluster)
web.ctx.orm.commit()
return json.dumps(
self.render(cluster),
indent=4
)
def DELETE(self, cluster_id):
cluster = web.ctx.orm.query(Cluster).filter(
Cluster.id == cluster_id
).first()
if not cluster:
return web.notfound()
web.ctx.orm.delete(cluster)
web.ctx.orm.commit()
raise web.webapi.HTTPError(
status="204 No Content",
data=""
)
class ClusterCollectionHandler(JSONHandler):
def GET(self):
web.header('Content-Type', 'application/json')
return json.dumps(map(
ClusterHandler.render,
web.ctx.orm.query(Cluster).all()
), indent=4)
def POST(self):
web.header('Content-Type', 'application/json')
data = Cluster.validate(web.data())
release = web.ctx.orm.query(Release).get(data["release"])
cluster = Cluster(
name=data["name"],
release=release
)
# TODO: discover how to add multiple objects
if 'nodes' in data and data['nodes']:
nodes = web.ctx.orm.query(Node).filter(
Node.id.in_(data['nodes'])
)
map(cluster.nodes.append, nodes)
web.ctx.orm.add(cluster)
web.ctx.orm.commit()
network_objects = web.ctx.orm.query(Network)
for network in release.networks_metadata:
for nw_pool in settings.NETWORK_POOLS[network['access']]:
nw_ip = ipaddr.IPv4Network(nw_pool)
new_network = None
for net in nw_ip.iter_subnets(new_prefix=24):
nw_exist = network_objects.filter(
Network.network == str(net)
).first()
if not nw_exist:
new_network = net
break
if new_network:
break
nw = Network(
release=release.id,
name=network['name'],
access=network['access'],
network=str(new_network),
gateway=str(new_network[1]),
range_l=str(new_network[3]),
range_h=str(new_network[-1]),
vlan_id=VlanManager.generate_id(network['name'])
)
web.ctx.orm.add(nw)
web.ctx.orm.commit()
raise web.webapi.created(json.dumps(
ClusterHandler.render(cluster),
indent=4
))
class ReleaseHandler(JSONHandler):
fields = (
"name",
"version",
"description",
"networks_metadata"
)
def GET(self, release_id):
web.header('Content-Type', 'application/json')
q = web.ctx.orm.query(Release)
release = q.filter(Release.id == release_id).first()
if not release:
return web.notfound()
return json.dumps(
self.render(release),
indent=4
)
def PUT(self, release_id):
web.header('Content-Type', 'application/json')
q = web.ctx.orm.query(Release)
release = q.filter(Release.id == release_id).first()
if not release:
return web.notfound()
# additional validation needed?
data = Release.validate_json(web.data())
# /additional validation needed?
for key, value in data.iteritems():
setattr(release, key, value)
web.ctx.orm.commit()
return json.dumps(
self.render(release),
indent=4
)
def DELETE(self, release_id):
release = web.ctx.orm.query(Release).filter(
Release.id == release_id
).first()
if not release:
return web.notfound()
web.ctx.orm.delete(release)
web.ctx.orm.commit()
raise web.webapi.HTTPError(
status="204 No Content",
data=""
)
class ReleaseCollectionHandler(JSONHandler):
def GET(self):
web.header('Content-Type', 'application/json')
return json.dumps(map(
ReleaseHandler.render,
web.ctx.orm.query(Release).all()
), indent=4)
def POST(self):
web.header('Content-Type', 'application/json')
data = Release.validate(web.data())
release = Release()
for key, value in data.iteritems():
setattr(release, key, value)
web.ctx.orm.add(release)
web.ctx.orm.commit()
raise web.webapi.created(json.dumps(
ReleaseHandler.render(release),
indent=4
))
class NodeHandler(JSONHandler):
fields = ('id', 'name', 'roles', 'status', 'mac', 'fqdn', 'ip',
'manufacturer', 'platform_name', 'redeployment_needed',
'os_platform')
def GET(self, node_id):
web.header('Content-Type', 'application/json')
q = web.ctx.orm.query(Node)
node = q.filter(Node.id == node_id).first()
if not node:
return web.notfound()
return json.dumps(
self.render(node),
indent=4
)
def PUT(self, node_id):
web.header('Content-Type', 'application/json')
q = web.ctx.orm.query(Node)
node = q.filter(Node.id == node_id).first()
if not node:
return web.notfound()
# additional validation needed?
data = Node.validate_update(web.data())
if not data:
raise web.badrequest()
# /additional validation needed?
for key, value in data.iteritems():
setattr(node, key, value)
web.ctx.orm.commit()
return json.dumps(
self.render(node),
indent=4
)
def DELETE(self, node_id):
node = web.ctx.orm.query(Node).filter(
Node.id == node_id
).first()
if not node:
return web.notfound()
web.ctx.orm.delete(node)
web.ctx.orm.commit()
raise web.webapi.HTTPError(
status="204 No Content",
data=""
)
class NodeCollectionHandler(JSONHandler):
def GET(self):
web.header('Content-Type', 'application/json')
return json.dumps(map(
NodeHandler.render,
web.ctx.orm.query(Node).all()
), indent=4)
def POST(self):
web.header('Content-Type', 'application/json')
data = Node.validate(web.data())
node = Node()
for key, value in data.iteritems():
setattr(node, key, value)
web.ctx.orm.add(node)
web.ctx.orm.commit()
raise web.webapi.created(json.dumps(
NodeHandler.render(node),
indent=4
))
class RoleCollectionHandler(JSONHandler):
def GET(self):
web.header('Content-Type', 'application/json')
data = Role.validate_json(web.data())
if 'release_id' in data:
return json.dumps(map(
RoleHandler.render,
web.ctx.orm.query(Role).filter(
Role.id == data["release_id"]
)
), indent=4)
roles = web.ctx.orm.query(Role).all()
if 'node_id' in data:
result = []
for role in roles:
# TODO role filtering
# use request.form.cleaned_data['node_id'] to filter roles
if False:
continue
# if the role is suitable for the node, set 'available' field
# to True. If it is not, set it to False and also describe the
# reason in 'reason' field of rendered_role
rendered_role = RoleHandler.render(role)
rendered_role['available'] = True
result.append(rendered_role)
return json.dumps(result)
else:
return json.dumps(map(RoleHandler.render, roles))
class RoleHandler(JSONHandler):
fields = ('id', 'name')
def GET(self, role_id):
q = web.ctx.orm.query(Role)
role = q.filter(Role.id == role_id).first()
if not role:
return web.notfound()
return json.dumps(
self.render(role),
indent=4
)

178
nailgun/api/models.py Normal file
View File

@ -0,0 +1,178 @@
# -*- coding: utf-8 -*-
import json
import web
import ipaddr
from sqlalchemy import Column, UniqueConstraint, Table
from sqlalchemy import Integer, String, Unicode, Boolean, ForeignKey, Enum
from sqlalchemy import create_engine
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
import settings
from api.fields import JSON
from api.validators import BasicValidator
engine = create_engine(settings.DATABASE_ENGINE)
Base = declarative_base()
class Release(Base, BasicValidator):
__tablename__ = 'releases'
__table_args__ = (
UniqueConstraint('name', 'version'),
)
id = Column(Integer, primary_key=True)
name = Column(Unicode(100), nullable=False)
version = Column(String(30), nullable=False)
description = Column(Unicode)
networks_metadata = Column(JSON)
roles = relationship("Role", backref="release")
clusters = relationship("Cluster", backref="release")
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if not "name" in d:
raise web.webapi.badrequest(
message="No release name specified"
)
if not "version" in d:
raise web.webapi.badrequest(
message="No release version specified"
)
if web.ctx.orm.query(Release).filter(
Release.name == d["name"] \
and Release.version == d["version"]
).first():
raise web.webapi.conflict
if "networks_metadata" in d:
for network in d["networks_metadata"]:
if not "name" in network or not "access" in network:
raise web.webapi.badrequest(
message="Invalid network data: %s" % str(network)
)
if network["access"] not in settings.NETWORK_POOLS:
raise web.webapi.badrequest(
message="Invalid access mode for network"
)
else:
d["networks_metadata"] = []
return d
class Role(Base):
__tablename__ = 'roles'
__table_args__ = (
UniqueConstraint('name', 'release_id'),
)
id = Column(Integer, primary_key=True)
name = Column(Unicode(100), nullable=False)
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False)
class Cluster(Base, BasicValidator):
__tablename__ = 'clusters'
id = Column(Integer, primary_key=True)
name = Column(Unicode(100), unique=True, nullable=False)
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False)
nodes = relationship("Node", backref="cluster")
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if web.ctx.orm.query(Cluster).filter(
Cluster.name == d["name"]
).first():
raise web.webapi.conflict
if d["release"]:
release = web.ctx.orm.query(Release).get(d["release"])
if not release:
raise web.webapi.badrequest(message="Invalid release id")
return d
nodes_roles = Table('nodes_roles', Base.metadata,
Column('node', Integer, ForeignKey('nodes.id')),
Column('role', Integer, ForeignKey('roles.id'))
)
nodes_new_roles = Table('nodes_new_roles', Base.metadata,
Column('node', Integer, ForeignKey('nodes.id')),
Column('role', Integer, ForeignKey('roles.id'))
)
class Node(Base, BasicValidator):
__tablename__ = 'nodes'
NODE_STATUSES = (
'offline',
'ready',
'discover',
'deploying',
'error'
)
id = Column(Integer, primary_key=True)
cluster_id = Column(Integer, ForeignKey('clusters.id'))
name = Column(Unicode(100))
status = Column(Enum(*NODE_STATUSES), nullable=False, default='ready')
meta = Column(JSON)
mac = Column(String(17), nullable=False)
ip = Column(String(15))
fqdn = Column(String(255))
manufacturer = Column(Unicode(50))
platform_name = Column(String(150))
os_platform = Column(String(150))
roles = relationship("Role",
secondary=nodes_roles,
backref="nodes")
new_roles = relationship("Role",
secondary=nodes_new_roles)
redeployment_needed = Column(Boolean, default=False)
@classmethod
def validate(cls, data):
d = cls.validate_json(data)
if not "mac" in d:
raise web.webapi.badrequest(message="No mac address specified")
return d
@classmethod
def validate_update(cls, data):
d = cls.validate_json(data)
if "status" in d and d["status"] not in cls.NODE_STATUSES:
raise web.webapi.badrequest(message="Invalid status for node")
return d
class IPAddr(Base):
__tablename__ = 'ip_addrs'
id = Column(Integer, primary_key=True)
network = Column(Integer, ForeignKey('networks.id'))
node = Column(Integer, ForeignKey('nodes.id'))
ip_addr = Column(String(25))
class Network(Base, BasicValidator):
__tablename__ = 'networks'
id = Column(Integer, primary_key=True)
release = Column(Integer, ForeignKey('releases.id'), nullable=False)
name = Column(Unicode(20), nullable=False)
access = Column(String(20), nullable=False)
vlan_id = Column(Integer)
network = Column(String(25), nullable=False)
range_l = Column(String(25))
range_h = Column(String(25))
gateway = Column(String(25))
nodes = relationship("Node",
secondary=IPAddr.__table__,
backref="networks")
@property
def netmask(self):
return str(ipaddr.IPv4Network(self.network).netmask)
@property
def broadcast(self):
return str(ipaddr.IPv4Network(self.network).broadcast)

18
nailgun/api/urls.py Normal file
View File

@ -0,0 +1,18 @@
#!/usr/bin/env python
import web
from api.handlers import ClusterHandler, ClusterCollectionHandler
from api.handlers import ReleaseHandler, ReleaseCollectionHandler
from api.handlers import NodeHandler, NodeCollectionHandler
urls = (
r'/releases/?$', 'ReleaseCollectionHandler',
r'/releases/(?P<release_id>\d+)/?$', 'ReleaseHandler',
r'/clusters/?$', 'ClusterCollectionHandler',
r'/clusters/(?P<cluster_id>\d+)/?$', 'ClusterHandler',
r'/nodes/?$', 'NodeCollectionHandler',
r'/nodes/(?P<node_id>\d+)/?$', 'NodeHandler',
)
api_app = web.application(urls, locals())

22
nailgun/api/validators.py Normal file
View File

@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
import json
import web
class BasicValidator(object):
@classmethod
def validate_json(cls, data):
if data:
try:
res = json.loads(data)
except:
raise web.webapi.badrequest(
message="Invalid json format!"
)
return res
return data
@classmethod
def validate(cls, data):
raise NotImplementedError("You should override this method")

37
nailgun/db.py Normal file
View File

@ -0,0 +1,37 @@
# -*- coding: utf-8 -*-
import web
from sqlalchemy.orm import scoped_session, sessionmaker
from api.models import engine, Release
def load_db_driver(handler):
web.ctx.orm = scoped_session(sessionmaker(bind=engine))
try:
return handler()
except web.HTTPError:
web.ctx.orm.commit()
raise
except:
web.ctx.orm.rollback()
raise
finally:
web.ctx.orm.commit()
def syncdb():
from api.models import Base
Base.metadata.create_all(engine)
def dropdb():
from api.models import Base
Base.metadata.drop_all(engine)
def flush():
from api.models import Base
session = scoped_session(sessionmaker(bind=engine))
for table in reversed(Base.metadata.sorted_tables):
session.execute(table.delete())
session.commit()

18
nailgun/helpers/vlan.py Normal file
View File

@ -0,0 +1,18 @@
# -*- coding: utf-8 -*-
class VlanManager(object):
"""
A stub for some real logic in the future
"""
vlan_ids = {
'storage': 200,
'public': 300,
'floating': 400,
'fixed': 500,
'admin': 100
}
@classmethod
def generate_id(cls, name):
return cls.vlan_ids[name]

View File

@ -1,11 +0,0 @@
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
# sys.path.insert(0, os.getcwd())
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nailgun.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)

View File

@ -1,7 +0,0 @@
test-unit: test-unit-nailgun
.PHONY: test-unit-nailgun
test-unit-nailgun:
cd nailgun && ./run_tests.sh

View File

@ -1,120 +0,0 @@
import os
import sys
import time
import signal
import threading
import atexit
import Queue
_interval = 1.0
_times = {}
_files = []
_running = False
_queue = Queue.Queue()
_lock = threading.Lock()
def _restart(path):
_queue.put(True)
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Change detected to \'%s\'.' % (prefix, path)
print >> sys.stderr, '%s Triggering process restart.' % prefix
os.kill(os.getpid(), signal.SIGINT)
def _modified(path):
try:
# If path doesn't denote a file and were previously
# tracking it, then it has been removed or the file type
# has changed so force a restart. If not previously
# tracking the file then we can ignore it as probably
# pseudo reference such as when file extracted from a
# collection of modules contained in a zip file.
if not os.path.isfile(path):
return path in _times
# Check for when file last modified.
mtime = os.stat(path).st_mtime
if path not in _times:
_times[path] = mtime
# Force restart when modification time has changed, even
# if time now older, as that could indicate older file
# has been restored.
if mtime != _times[path]:
return True
except:
# If any exception occured, likely that file has been
# been removed just before stat(), so force a restart.
return True
return False
def _monitor():
while 1:
# Check modification times on all files in sys.modules.
for module in sys.modules.values():
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
if _modified(path):
return _restart(path)
# Check modification times on files which have
# specifically been registered for monitoring.
for path in _files:
if _modified(path):
return _restart(path)
# Go to sleep for specified interval.
try:
return _queue.get(timeout=_interval)
except:
pass
_thread = threading.Thread(target=_monitor)
_thread.setDaemon(True)
def _exiting():
try:
_queue.put(True)
except:
pass
_thread.join()
atexit.register(_exiting)
def track(path):
if not path in _files:
_files.append(path)
def start(interval=1.0):
global _interval
if interval < _interval:
_interval = interval
global _running
_lock.acquire()
if not _running:
prefix = 'monitor (pid=%d):' % os.getpid()
print >> sys.stderr, '%s Starting change monitor.' % prefix
_running = True
_thread.start()
_lock.release()

64
nailgun/nailgun.py Executable file
View File

@ -0,0 +1,64 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import logging
import web
import db
from api.handlers import check_client_content_type
from unit_test import TestRunner
from urls import urls
logging.basicConfig(level="DEBUG")
app = web.application(urls, locals())
app.add_processor(db.load_db_driver)
app.add_processor(check_client_content_type)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
run_parser = subparsers.add_parser(
'run', help='run application locally'
)
runwsgi_parser = subparsers.add_parser(
'runwsgi', help='run WSGI application'
)
test_parser = subparsers.add_parser(
'test', help='run unit tests'
)
syncdb_parser = subparsers.add_parser(
'syncdb', help='sync application database'
)
params, other_params = parser.parse_known_args()
sys.argv.pop(1)
if params.action == "syncdb":
logging.info("Syncing database...")
db.syncdb()
logging.info("Done")
elif params.action == "test":
logging.info("Running tests...")
TestRunner.run()
logging.info("Done")
elif params.action == "run":
app.run()
elif params.action == "runwsgi":
logging.info("Running WSGI app...")
server = web.httpserver.WSGIServer(
("0.0.0.0", 8080),
app.wsgifunc()
)
try:
server.start()
except KeyboardInterrupt:
logging.info("Stopping WSGI app...")
server.stop()
logging.info("Done")
else:
parser.print_help()

BIN
nailgun/nailgun.sqlite~ Normal file

Binary file not shown.

View File

@ -1,31 +0,0 @@
import json
import urllib
import httplib
from urlparse import urlparse
def query_api(url, method='GET', params={}):
if method not in ('GET', 'POST', 'PUT', 'DELETE'):
raise ValueError("Invalid method %s" % method)
parsed_url = urlparse(url)
body = None
path = parsed_url.path
if method in ('POST', 'PUT'):
body = urllib.urlencode(params)
elif params:
path = "%s?%s" % (path, urllib.urlencode(params))
conn = httplib.HTTPConnection(parsed_url.netloc)
conn.request(method, path, body)
response = conn.getresponse()
raw_data = response.read()
data = None
try:
data = json.loads(raw_data)
except ValueError:
pass
return (response.status, data)

View File

@ -1,3 +0,0 @@
import re
from django.db import models
from django import forms

View File

@ -1,160 +0,0 @@
import re
import simplejson as json
from django.core.exceptions import ValidationError
from django import forms
from django.forms.fields import Field, IntegerField, CharField, ChoiceField, \
BooleanField
from django.core.validators import RegexValidator
from nailgun.models import Cluster
from nailgun.models import Node
from nailgun.models import Role
from nailgun.models import Release
from nailgun.models import Network
from nailgun.models import Point
from nailgun.models import Com
import nailgun.api.validators as vld
import logging
logger = logging.getLogger('forms')
class RoleFilterForm(forms.Form):
node_id = Field(required=False, validators=[vld.validate_node_id])
release_id = Field(required=False, validators=[])
class RoleCreateForm(forms.ModelForm):
components = Field(validators=[], required=False)
def clean_components(self):
return [c.name for c in Com.objects.filter(
name__in=self.data['components'],
release=Release.objects.get(id=self.data['release'])
)]
class Meta:
model = Role
class PointFilterForm(forms.Form):
release = IntegerField(required=False)
class PointUpdateForm(forms.ModelForm):
scheme = Field(validators=[])
class Meta:
model = Point
exclude = ('name', 'release', 'provided_by', 'required_by')
class PointCreateForm(forms.ModelForm):
scheme = Field(required=False, validators=[])
class Meta:
model = Point
exclude = ('provided_by', 'required_by')
class ComFilterForm(forms.Form):
release = IntegerField(required=False)
class ComCreateForm(forms.ModelForm):
deploy = Field(validators=[])
requires = Field(validators=[], required=False)
provides = Field(validators=[], required=False)
def clean_requires(self):
return [p.name for p in Point.objects.filter(
name__in=self.data['requires'],
release=Release.objects.get(id=self.data['release'])
)]
def clean_provides(self):
return [p.name for p in Point.objects.filter(
name__in=self.data['provides'],
release=Release.objects.get(id=self.data['release'])
)]
class Meta:
model = Com
exclude = ('roles')
class ClusterForm(forms.Form):
name = CharField(max_length=100, required=False)
nodes = Field(required=False, validators=[vld.validate_node_ids])
task = Field(required=False, validators=[vld.forbid_modifying_tasks])
class ClusterCreationForm(forms.ModelForm):
nodes = Field(required=False, validators=[vld.validate_node_ids])
task = Field(required=False, validators=[vld.forbid_modifying_tasks])
class Meta:
model = Cluster
class NodeForm(forms.Form):
metadata = Field(required=False, validators=[vld.validate_node_metadata])
status = ChoiceField(required=False, choices=Node.NODE_STATUSES)
name = CharField(max_length=100, required=False)
fqdn = CharField(max_length=255, required=False)
ip = CharField(max_length=15, required=False)
mac = CharField(max_length=17, required=False)
manufacturer = CharField(max_length=50, required=False)
platform_name = CharField(max_length=150, required=False)
os_platform = CharField(max_length=150, required=False)
roles = Field(required=False, validators=[vld.forbid_modifying_roles])
new_roles = Field(required=False, validators=[vld.validate_node_roles])
redeployment_needed = BooleanField(required=False)
class NodeCreationForm(NodeForm):
id = CharField(validators=[vld.validate_node_id])
class NodeFilterForm(forms.Form):
cluster_id = IntegerField(required=False)
class ReleaseCreationForm(forms.ModelForm):
networks_metadata = Field(validators=[vld.validate_networks_metadata])
class Meta:
model = Release
def clean(self):
return self.cleaned_data
class NetworkCreationForm(forms.ModelForm):
release = CharField()
network = CharField(validators=[vld.validate_network])
range_l = CharField(validators=[vld.validate_ip])
range_h = CharField(validators=[vld.validate_ip])
gateway = CharField(validators=[vld.validate_ip])
class Meta:
model = Network
def clean_release(self):
release_id = self.cleaned_data["release"]
if not release_id:
raise ValidationError("Release id not specified!")
try:
r = Release.objects.get(id=release_id)
except Release.DoesNotExist:
raise ValidationError("Invalid release id!")
#self.instance.release = r
return r

View File

@ -1,803 +0,0 @@
import os
import copy
import re
import celery
import ipaddr
import json
from piston.handler import BaseHandler, HandlerMetaClass
from piston.utils import rc, validate
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.db import models
from nailgun.models import Cluster
from nailgun.models import Release
from nailgun.models import Role
from nailgun.models import Com
from nailgun.models import Point
from nailgun.models import EndPoint
from nailgun.models import Network
from nailgun.models import Node
from nailgun.models import Task
from nailgun.deployment_types import deployment_types
from nailgun.api.validators import validate_json, validate_json_list
from nailgun.api.forms import ClusterForm
from nailgun.api.forms import ClusterCreationForm
from nailgun.api.forms import RoleFilterForm
from nailgun.api.forms import RoleCreateForm
from nailgun.api.forms import PointFilterForm
from nailgun.api.forms import PointUpdateForm
from nailgun.api.forms import PointCreateForm
from nailgun.api.forms import ComFilterForm
from nailgun.api.forms import ComCreateForm
from nailgun.api.forms import NodeCreationForm
from nailgun.api.forms import NodeFilterForm
from nailgun.api.forms import NodeForm
from nailgun.api.forms import ReleaseCreationForm
from nailgun.api.forms import NetworkCreationForm
from nailgun import tasks
import nailgun.api.validators as vld
from nailgun.helpers import DeployManager
from nailgun.helpers import DeployDriver
import logging
logger = logging.getLogger(__name__)
handlers = {}
class HandlerRegistrator(HandlerMetaClass):
def __init__(cls, name, bases, dct):
super(HandlerRegistrator, cls).__init__(name, bases, dct)
if hasattr(cls, 'model'):
key = cls.model.__name__
if key in handlers:
raise Exception("Handler for %s already registered" % key)
handlers[key] = cls
class JSONHandler(BaseHandler):
"""
Basic JSON handler
"""
__metaclass__ = HandlerRegistrator
fields = None
@classmethod
def render(cls, item, fields=None):
json_data = {}
use_fields = fields if fields else cls.fields
if not use_fields:
raise ValueError("No fields for serialize")
for field in use_fields:
if isinstance(field, (tuple,)):
logger.debug("rendering: field is a tuple: %s" % str(field))
if field[1] == '*':
subfields = None
else:
subfields = field[1:]
value = getattr(item, field[0])
if value is None:
pass
elif value.__class__.__name__ in ('ManyRelatedManager',
'RelatedManager'):
try:
handler = handlers[value.model.__name__]
json_data[field[0]] = [
handler.render(o, fields=subfields) \
for o in value.all()]
except KeyError:
raise Exception("No handler for %s" % \
value.model.__name__)
elif value.__class__.__name__ in handlers:
handler = handlers[value.__class__.__name__]
json_data[field[0]] = handler.render(value,
fields=subfields)
else:
json_data[field[0]] = value.id
else:
value = getattr(item, field)
if value is None:
pass
elif value.__class__.__name__ in ('ManyRelatedManager',
'RelatedManager',):
json_data[field] = [getattr(o, 'id') \
for o in value.all()]
elif value.__class__.__name__ in handlers:
json_data[field] = value.id
else:
json_data[field] = value
return json_data
class TaskHandler(JSONHandler):
allowed_methods = ('GET',)
model = Task
@classmethod
def render(cls, task, fields=None):
result = {
'task_id': task.pk,
'name': task.name,
'ready': task.ready,
}
errors = task.errors
if len(errors):
result['error'] = '; '.join(map(lambda e: e.__str__(), errors))
return result
def read(self, request, task_id):
try:
task = Task.objects.get(id=task_id)
except ObjectDoesNotExist:
return rc.NOT_FOUND
return TaskHandler.render(task)
class ClusterChangesHandler(BaseHandler):
allowed_methods = ('PUT', 'DELETE')
def update(self, request, cluster_id):
try:
cluster = Cluster.objects.get(id=cluster_id)
except ObjectDoesNotExist:
return rc.NOT_FOUND
logger.debug("Cluster changes: Checking if another task is running")
if cluster.task:
if cluster.task.ready:
cluster.task.delete()
else:
response = rc.DUPLICATE_ENTRY
response.content = "Another task is running"
return response
logger.debug("Cluster changes: Updating node roles")
for node in cluster.nodes.filter(redeployment_needed=True):
node.roles = node.new_roles.all()
node.new_roles.clear()
node.redeployment_needed = False
node.save()
logger.debug("Cluster changes: Updating node networks")
for nw in cluster.release.networks.all():
for node in cluster.nodes.all():
nw.update_node_network_info(node)
logger.debug("Cluster changes: Trying to instantiate cluster")
dm = DeployManager(cluster_id)
dm.clean_cluster()
dm.instantiate_cluster()
logger.debug("Cluster changes: Trying to deploy cluster")
task = Task(task_name='deploy_cluster', cluster=cluster)
task.run(cluster_id)
response = rc.ACCEPTED
response.content = TaskHandler.render(task)
return response
def delete(self, request, cluster_id):
try:
cluster = Cluster.objects.get(id=cluster_id)
except ObjectDoesNotExist:
return rc.NOT_FOUND
for node in cluster.nodes.filter(redeployment_needed=True):
node.new_roles.clear()
node.redeployment_needed = False
node.save()
return rc.DELETED
class DeploymentTypeCollectionHandler(BaseHandler):
allowed_methods = ('GET',)
def read(self, request, cluster_id):
try:
cluster = Cluster.objects.get(id=cluster_id)
except ObjectDoesNotExist:
return rc.NOT_FOUND
return map(DeploymentTypeHandler.render, deployment_types.values())
class DeploymentTypeHandler(JSONHandler):
allowed_methods = ('PUT',)
fields = ('id', 'name', 'description')
def update(self, request, cluster_id, deployment_type_id):
try:
cluster = Cluster.objects.get(id=cluster_id)
deployment_type = deployment_types[deployment_type_id]
except ObjectDoesNotExist:
return rc.NOT_FOUND
deployment_type.assign_roles(cluster)
return {}
class EndPointCollectionHandler(BaseHandler):
allowed_methods = ('GET',)
def read(self, request, node_id=None, component_name=None):
if not node_id or not component_name:
return map(EndPointHandler.render,
EndPoint.objects.all())
try:
node = Node.objects.get(id=node_id)
component = Com.objects.get(
name=component_name,
release=node.cluster.release
)
dd = DeployDriver(node, component)
return dd.deploy_data()
except:
return rc.NOT_FOUND
class EndPointHandler(JSONHandler):
model = EndPoint
@classmethod
def render(cls, endpoint):
return endpoint.data
class ClusterCollectionHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
def read(self, request):
json_data = map(
ClusterHandler.render,
Cluster.objects.all()
)
return json_data
@validate_json(ClusterCreationForm)
def create(self, request):
data = request.form.cleaned_data
try:
cluster = Cluster.objects.get(
name=data['name']
)
return rc.DUPLICATE_ENTRY
except Cluster.DoesNotExist:
pass
cluster = Cluster()
for key, value in request.form.cleaned_data.items():
if key in request.form.data:
if key != 'nodes':
setattr(cluster, key, value)
cluster.save()
# TODO: solve vlan issues
vlan_ids = {
'storage': 200,
'public': 300,
'floating': 400,
'fixed': 500,
'admin': 100
}
for network in cluster.release.networks_metadata:
access = network['access']
if access not in settings.NETWORK_POOLS:
raise Exception("Incorrect access mode for network")
for nw_pool in settings.NETWORK_POOLS[access]:
nw_ip = ipaddr.IPv4Network(nw_pool)
new_network = None
for net in nw_ip.iter_subnets(new_prefix=24):
try:
nw_exist = Network.objects.get(network=net)
except Network.DoesNotExist:
new_network = net
break
if new_network:
break
nw = Network(
release=cluster.release,
name=network['name'],
access=access,
network=str(new_network),
gateway=str(new_network[1]),
range_l=str(new_network[3]),
range_h=str(new_network[-1]),
vlan_id=vlan_ids[network['name']]
)
nw.save()
if 'nodes' in request.form.data:
nodes = Node.objects.filter(
id__in=request.form.cleaned_data['nodes']
)
cluster.nodes = nodes
return ClusterHandler.render(cluster)
class ClusterHandler(JSONHandler):
allowed_methods = ('GET', 'PUT', 'DELETE')
model = Cluster
fields = ('id', 'name',
('nodes', '*'),
('release', '*'), 'task')
def read(self, request, cluster_id):
logger.debug("Cluster reading: id: %s" % cluster_id)
try:
cluster = Cluster.objects.get(id=cluster_id)
return ClusterHandler.render(cluster)
except ObjectDoesNotExist:
return rc.NOT_FOUND
@validate_json(ClusterForm)
def update(self, request, cluster_id):
try:
cluster = Cluster.objects.get(id=cluster_id)
for key, value in request.form.cleaned_data.items():
if key in request.form.data:
if key == 'nodes':
new_nodes = Node.objects.filter(id__in=value)
cluster.nodes = new_nodes
elif key == 'task':
cluster.task.delete()
else:
setattr(cluster, key, value)
cluster.save()
return ClusterHandler.render(cluster)
except ObjectDoesNotExist:
return rc.NOT_FOUND
def delete(self, request, cluster_id):
try:
cluster = Cluster.objects.get(id=cluster_id)
cluster.delete()
return rc.DELETED
except ObjectDoesNotExist:
return rc.NOT_FOUND
class NodeCollectionHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
@validate(NodeFilterForm, 'GET')
def read(self, request):
nodes = Node.objects.all()
if 'cluster_id' in request.form.data:
nodes = nodes.filter(
cluster_id=request.form.cleaned_data['cluster_id'])
return map(NodeHandler.render, nodes)
@validate_json(NodeCreationForm)
def create(self, request):
node = Node()
for key, value in request.form.cleaned_data.items():
if key in request.form.data:
if key != 'new_roles':
setattr(node, key, value)
node.save()
return NodeHandler.render(node)
class NodeHandler(JSONHandler):
allowed_methods = ('GET', 'PUT', 'DELETE')
model = Node
fields = ('id', 'name', 'info', 'status', 'mac', 'fqdn', 'ip',
'manufacturer', 'platform_name', 'redeployment_needed',
('roles', '*'), ('new_roles', '*'), 'os_platform')
def read(self, request, node_id):
try:
node = Node.objects.get(id=node_id)
return NodeHandler.render(node)
except ObjectDoesNotExist:
return rc.NOT_FOUND
@validate_json(NodeForm)
def update(self, request, node_id):
node, is_created = Node.objects.get_or_create(id=node_id)
for key, value in request.form.cleaned_data.items():
if key in request.form.data:
if key == 'new_roles':
new_roles = Role.objects.filter(id__in=value)
node.new_roles = new_roles
else:
setattr(node, key, value)
node.save()
return NodeHandler.render(node)
def delete(self, request, node_id):
try:
node = Node.objects.get(id=node_id)
node.delete()
return rc.DELETED
except ObjectDoesNotExist:
return rc.NOT_FOUND
class PointCollectionHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
@validate(PointFilterForm, 'GET')
def read(self, request):
logger.debug("Getting points from data: %s" % \
str(request.form.data))
if 'release' in request.form.data:
points = Point.objects.filter(
release__id=request.form.cleaned_data['release']
)
else:
points = Point.objects.all()
return map(PointHandler.render, points)
@validate_json(PointCreateForm)
def create(self, request):
data = request.form.cleaned_data
logger.debug("Creating Point from data: %s" % str(data))
try:
point = Point.objects.get(
name=data['name'],
release=data['release']
)
return rc.DUPLICATE_ENTRY
except Point.DoesNotExist:
pass
point = Point(
name=data['name'],
release=data['release']
)
if 'scheme' in data:
point.scheme = data['scheme']
else:
point.scheme = {}
point.save()
return PointHandler.render(point)
class PointHandler(JSONHandler):
allowed_methods = ('GET', 'PUT')
model = Point
fields = ('id', 'name', 'scheme', ('release', 'name'),
('required_by', 'name'),
('provided_by', 'name'))
def read(self, request, point_id):
try:
return PointHandler.render(Point.objects.get(id=point_id))
except ObjectDoesNotExist:
return rc.NOT_FOUND
@validate_json(PointUpdateForm)
def update(self, request, point_id):
data = request.form.cleaned_data
logger.debug("Updating Point from data: %s" % str(data))
try:
point = Point.objects.get(id=point_id)
except ObjectDoesNotExist:
return rc.NOT_FOUND
if data.get('scheme', None):
point.scheme = data['scheme']
point.save()
return PointHandler.render(point)
class ComCollectionHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
@validate(ComFilterForm, 'GET')
def read(self, request):
logger.debug("Getting components from data: %s" % \
str(request.form.data))
if 'release' in request.form.data:
components = Com.objects.filter(
release__id=request.form.cleaned_data['release']
)
else:
components = Com.objects.all()
return map(ComHandler.render, components)
@validate_json(ComCreateForm)
def create(self, request):
data = request.form.cleaned_data
logger.debug("Creating Com from data: %s" % str(data))
try:
component = Com.objects.get(
name=data['name'],
release=data['release']
)
return rc.DUPLICATE_ENTRY
except Com.DoesNotExist:
pass
component = Com(
name=data['name'],
release=data['release']
)
component.deploy = data['deploy']
component.save()
if data.get('requires', None):
for point_name in data['requires']:
try:
point = Point.objects.get(
name=point_name,
release=data['release']
)
except ObjectDoesNotExist:
return rc.NOT_FOUND
else:
component.requires.add(point)
if data.get('provides', None):
for point_name in data['provides']:
try:
point = Point.objects.get(
name=point_name,
release=data['release']
)
except ObjectDoesNotExist:
return rc.NOT_FOUND
else:
component.provides.add(point)
component.save()
return ComHandler.render(component)
class ComHandler(JSONHandler):
allowed_methods = ('GET',)
model = Com
fields = ('id', 'name', 'deploy', ('release', 'name'),
('requires', 'name'), ('provides', 'name'),
('roles', 'name'))
def read(self, request, component_id):
try:
return ComHandler.render(Com.objects.get(id=component_id))
except ObjectDoesNotExist:
return rc.NOT_FOUND
class RoleCollectionHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
@validate(RoleFilterForm, 'GET')
def read(self, request):
if 'release_id' in request.form.data:
return map(
RoleHandler.render,
Role.objects.filter(
release__id=request.form.data['release_id']
)
)
roles = Role.objects.all()
if 'node_id' in request.form.data:
result = []
for role in roles:
# TODO role filtering
# use request.form.cleaned_data['node_id'] to filter roles
if False:
continue
# if the role is suitable for the node, set 'available' field
# to True. If it is not, set it to False and also describe the
# reason in 'reason' field of rendered_role
rendered_role = RoleHandler.render(role)
rendered_role['available'] = True
result.append(rendered_role)
return result
else:
return map(RoleHandler.render, roles)
@validate_json(RoleCreateForm)
def create(self, request):
data = request.form.cleaned_data
logger.debug("Creating Role from data: %s" % str(data))
try:
role = Role.objects.get(
name=data['name'],
release=data['release']
)
return rc.DUPLICATE_ENTRY
except Role.DoesNotExist:
pass
role = Role(
name=data['name'],
release=data['release']
)
role.save()
if data.get('components', None):
for component_name in data['components']:
try:
component = Com.objects.get(
name=component_name,
release=data['release']
)
except ObjectDoesNotExist:
return rc.NOT_FOUND
else:
role.components.add(component)
role.save()
return RoleHandler.render(role)
class RoleHandler(JSONHandler):
allowed_methods = ('GET',)
model = Role
fields = ('id', 'name', ('release', 'id', 'name'),
('components', 'name'))
def read(self, request, role_id):
try:
return RoleHandler.render(Role.objects.get(id=role_id))
except ObjectDoesNotExist:
return rc.NOT_FOUND
class ReleaseCollectionHandler(BaseHandler):
logger.warning("Trying to add release")
allowed_methods = ('GET', 'POST')
model = Release
def read(self, request):
return map(ReleaseHandler.render, Release.objects.all())
@validate_json(ReleaseCreationForm)
def create(self, request):
data = request.form.cleaned_data
logger.debug("Creating release from data: %s" % str(data))
try:
release = Release.objects.get(
name=data['name'],
version=data['version']
)
return rc.DUPLICATE_ENTRY
except Release.DoesNotExist:
pass
release = Release(
name=data["name"],
version=data["version"],
description=data["description"],
networks_metadata=data["networks_metadata"]
)
release.save()
return ReleaseHandler.render(release)
class ReleaseHandler(JSONHandler):
allowed_methods = ('GET', 'DELETE')
model = Release
fields = ('id', 'name', 'version', 'description', 'networks_metadata',
('roles', 'name'), ('components', 'name'),
('points', 'name'))
def read(self, request, release_id):
try:
release = Release.objects.get(id=release_id)
return ReleaseHandler.render(release)
except ObjectDoesNotExist:
return rc.NOT_FOUND
def delete(self, request, release_id):
try:
release = Release.objects.get(id=release_id)
release.delete()
return rc.DELETED
except ObjectDoesNotExist:
return rc.NOT_FOUND
class NetworkHandler(JSONHandler):
allowed_methods = ('GET',)
model = Network
fields = ('id', 'network', 'name', 'access',
'vlan_id', 'range_l', 'range_h', 'gateway',
'release', 'nodes',
'release_id')
def read(self, request, network_id):
try:
network = Network.objects.get(id=network_id)
return NetworkHandler.render(network)
except Network.DoesNotExist:
return rc.NOT_FOUND
class NetworkCollectionHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
def read(self, request):
return map(NetworkHandler.render, Network.objects.all())
@validate_json(NetworkCreationForm)
def create(self, request):
data = request.form.cleaned_data
try:
release = Network.objects.get(
name=data['name'],
network=data['network']
)
return rc.DUPLICATE_ENTRY
except Network.DoesNotExist:
pass
nw = Network(
name=data['name'],
network=data['network'],
release=data['release'],
access=data['access'],
range_l=data['range_l'],
range_h=data['range_h'],
gateway=data['gateway'],
vlan_id=data['vlan_id']
)
nw.save()
return NetworkHandler.render(nw)

View File

@ -1,87 +0,0 @@
from django.conf.urls import patterns, include, url
from piston.resource import Resource
from nailgun.api.handlers import ClusterCollectionHandler, ClusterHandler, \
NodeCollectionHandler, NodeHandler, \
NetworkHandler, NetworkCollectionHandler, \
RoleCollectionHandler, RoleHandler, \
ReleaseCollectionHandler, ReleaseHandler, \
ClusterChangesHandler, \
DeploymentTypeCollectionHandler, \
DeploymentTypeHandler, \
TaskHandler
from nailgun.api.handlers import ComCollectionHandler
from nailgun.api.handlers import ComHandler
from nailgun.api.handlers import PointCollectionHandler
from nailgun.api.handlers import PointHandler
from nailgun.api.handlers import EndPointCollectionHandler
class JsonResource(Resource):
def determine_emitter(self, request, *args, **kwargs):
return 'json'
urlpatterns = patterns('',
url(r'^clusters/?$',
JsonResource(ClusterCollectionHandler),
name='cluster_collection_handler'),
url(r'^clusters/(?P<cluster_id>\d+)/?$',
JsonResource(ClusterHandler),
name='cluster_handler'),
url(r'^nodes/?$',
JsonResource(NodeCollectionHandler),
name='node_collection_handler'),
url(r'^nodes/(?P<node_id>[\dA-F]{12})/?$',
JsonResource(NodeHandler),
name='node_handler'),
url(r'^networks/?$',
JsonResource(NetworkCollectionHandler),
name='network_collection_handler'),
url(r'^networks/(?P<network_id>\d+)/?$',
JsonResource(NetworkHandler),
name='network_handler'),
url(r'^clusters/(?P<cluster_id>\d+)/changes/?$',
JsonResource(ClusterChangesHandler),
name='cluster_changes_handler'),
url(r'^tasks/(?P<task_id>[\da-f\-]{36})/?$',
JsonResource(TaskHandler),
name='task_handler'),
url(r'^roles/?$',
JsonResource(RoleCollectionHandler),
name='role_collection_handler'),
url(r'^roles/(?P<role_id>\d+)/?$',
JsonResource(RoleHandler),
name='role_handler'),
url(r'^coms/?$',
JsonResource(ComCollectionHandler),
name='com_collection_handler'),
url(r'^coms/(?P<component_id>\d+)/?$',
JsonResource(ComHandler),
name='com_handler'),
url(r'^points/?$',
JsonResource(PointCollectionHandler),
name='point_collection_handler'),
url(r'^points/(?P<point_id>\d+)/?$',
JsonResource(PointHandler),
name='point_handler'),
url(r'^endpoints/(?P<node_id>[\dA-F]{12})/(?P<component_name>\w+)/?$',
JsonResource(EndPointCollectionHandler),
name='endpoint_handler'),
url(r'^endpoints/?$',
JsonResource(EndPointCollectionHandler),
name='endpoint_collection_handler'),
url(r'^releases/?$',
JsonResource(ReleaseCollectionHandler),
name='release_collection_handler'),
url(r'^releases/(?P<release_id>\d+)/?$',
JsonResource(ReleaseHandler),
name='release_handler'),
url(r'^clusters/(?P<cluster_id>\d+)/deployment_types/?$',
JsonResource(DeploymentTypeCollectionHandler),
name='deployment_type_collection_handler'),
url(r'^clusters/(?P<cluster_id>\d+)/deployment_types/' \
r'(?P<deployment_type_id>\w+)/?$',
JsonResource(DeploymentTypeHandler),
name='deployment_type_handler'),
)

View File

@ -1,183 +0,0 @@
import json
import re
import ipaddr
from piston.utils import FormValidationError, HttpStatusCode, rc
from piston.decorator import decorator
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from nailgun.models import Cluster
from nailgun.models import Node
from nailgun.models import Role
from nailgun.models import Release
from nailgun.models import Network
import logging
logger = logging.getLogger("validators")
# Handler decorator for JSON validation using forms
def validate_json(v_form):
@decorator
def wrap(f, self, request, *a, **kwa):
logger.debug("Validation json: trying to find out content_type")
content_type = request.content_type.split(';')[0]
logger.debug("Validation json: content_type: %s" % content_type)
if content_type != "application/json":
response = rc.BAD_REQUEST
response.content = "Invalid content type, must be application/json"
raise HttpStatusCode(response)
try:
parsed_body = json.loads(request.body)
logger.debug("Validation json: body: %s" % str(parsed_body))
except:
response = rc.BAD_REQUEST
response.content = "Invalid JSON object"
raise HttpStatusCode(response)
if not isinstance(parsed_body, dict):
logger.debug("Validation json: parsed_body is not dict")
response = rc.BAD_REQUEST
response.content = "Dictionary expected"
raise HttpStatusCode(response)
logger.debug("Validation json: trying to construct form from v_form")
try:
form = v_form(parsed_body, request.FILES)
except Exception as e:
logger.debug("Validation json: error: %s" % str(e.message))
logger.debug("Validation json: form: %s" % str(form))
if form.is_valid():
setattr(request, 'form', form)
return f(self, request, *a, **kwa)
else:
raise FormValidationError(form)
return wrap
def validate_json_list(v_form):
@decorator
def wrap(f, self, request, *a, **kwa):
content_type = request.content_type.split(';')[0]
if content_type != "application/json":
response = rc.BAD_REQUEST
response.content = "Invalid content type, must be application/json"
raise HttpStatusCode(response)
try:
parsed_body = json.loads(request.body)
except:
response = rc.BAD_REQUEST
response.content = "Invalid JSON object"
raise HttpStatusCode(response)
if not isinstance(parsed_body, list):
response = rc.BAD_REQUEST
response.content = "List expected"
raise HttpStatusCode(response)
if not len(parsed_body):
response = rc.BAD_REQUEST
response.content = "No entries to update"
raise HttpStatusCode(response)
forms = []
for entry in parsed_body:
form = v_form(entry, request.FILES)
if form.is_valid():
forms.append(form)
else:
raise FormValidationError(form)
setattr(request, 'forms', forms)
return f(self, request, *a, **kwa)
return wrap
"""
FORM DATA VALIDATORS
"""
validate_node_id = RegexValidator(regex=re.compile('^[\dA-F]{12}$'))
def validate_node_ids(value):
if isinstance(value, list):
for node_id in value:
validate_node_id(node_id)
else:
raise ValidationError('Node list must be a list of node IDs')
def validate_node_metadata(value):
if value is not None:
if isinstance(value, dict):
for field in ('block_device', 'interfaces', 'cpu', 'memory'):
# TODO(mihgen): We need more comprehensive checks here
# For example, now, it's possible to store value[field] = []
if not field in value or value[field] == "":
raise ValidationError("Node metadata '%s' \
field is required" % field)
else:
raise ValidationError('Node metadata must be a dictionary')
def validate_node_roles(value):
if not isinstance(value, list) or \
not all(map(lambda i: isinstance(i, int), value)):
raise ValidationError('Role list must be a list of integers')
def validate_release_node_roles(data):
if not data or not isinstance(data, list):
raise ValidationError('Invalid roles list')
if not all(map(lambda i: 'name' in i, data)):
raise ValidationError('Role name is empty')
for role in data:
if 'components' not in role or not role['components']:
raise ValidationError('Components list for role "%s" \
should not be empty' % role['name'])
def validate_release_points(data):
if not data or not isinstance(data, list):
raise ValidationError('Invalid points list')
if not all(map(lambda i: 'name' in i, data)):
raise ValidationError('Point name is empty')
def validate_release_components(data):
if not data or not isinstance(data, list):
raise ValidationError('Invalid components list')
if not all(map(lambda i: 'name' in i, data)):
raise ValidationError('Component name is empty')
def forbid_modifying_roles(value):
raise ValidationError('Role list cannot be modified directly')
def validate_networks_metadata(data):
if not isinstance(data, list):
raise ValidationError("There should be a list of network names")
def validate_network(data):
try:
a = ipaddr.IPv4Network(data)
except:
raise ValidationError("Invalid network format!")
def validate_ip(data):
try:
a = ipaddr.IPv4Address(data)
except:
raise ValidationError("Invalid IP address format!")
def forbid_modifying_tasks(value):
raise ValidationError("Tasks cannot be modified directly")

View File

@ -1,51 +0,0 @@
import itertools
deployment_types = {}
class TypeRegistrator(type):
def __init__(cls, name, bases, dct):
super(TypeRegistrator, cls).__init__(name, bases, dct)
if hasattr(cls, 'id'):
deployment_types[cls.id] = cls
class BaseDeploymentType(object):
__metaclass__ = TypeRegistrator
class SimpleDeploymentType(BaseDeploymentType):
id = 'simple'
name = 'Simple Deployment'
description = 'No redundancy. Best suited for non-critical ' \
'OpenStack installations (e.g. dev, staging, QA)'
@classmethod
def assign_roles(cls, cluster):
roles = cluster.release.roles.all()
nodes = itertools.cycle(cluster.nodes.all())
new_roles = {}
for role in roles:
node = nodes.next()
node.new_roles.add(role)
node.redeployment_needed = True
node.save()
class HighAvailabilityDeploymentType(BaseDeploymentType):
id = 'ha'
name = 'HA Deployment'
description = 'Built-in redundancy for OpenStack components ' \
'(database, rabbitmq, nova, swift). ' \
'Ideal for production deployments'
@classmethod
def assign_roles(cls, cluster):
roles = cluster.release.roles.all()
nodes = itertools.cycle(cluster.nodes.all())
new_roles = {}
for role in roles:
node = nodes.next()
node.new_roles.add(role)
node.redeployment_needed = True
node.save()

View File

@ -1,17 +0,0 @@
# TODO(enchantner): create exceptions for handling different situations
class EmptyListError(LookupError):
pass
class NotFound(LookupError):
pass
class SSHError(Exception):
pass
class DeployError(Exception):
pass

View File

@ -1,21 +0,0 @@
import os
import os.path
LOGPATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
LOGFILE = os.path.join(LOGPATH, "nailgun.log")
LOGLEVEL = "DEBUG"
CELERYLOGFILE = os.path.join(LOGPATH, "celery.log")
CELERYLOGLEVEL = "DEBUG"
CHEF_CONF_FOLDER = LOGPATH # For testing purposes
home = os.getenv("HOME")
PATH_TO_SSH_KEY = home and os.path.join(home, ".ssh", "id_rsa") or None
PATH_TO_BOOTSTRAP_SSH_KEY = home and \
os.path.join(home, ".ssh", "bootstrap.rsa") or None
COBBLER_URL = "http://localhost/cobbler_api"
COBBLER_USER = "cobbler"
COBBLER_PASSWORD = "cobbler"
COBBLER_PROFILE = "centos-6.3-x86_64"
REPO_ADDRESS = "127.0.0.1"

View File

@ -1,165 +0,0 @@
[
{
"model": "nailgun.release",
"pk": 1,
"fields": {
"name": "Default Release",
"version": "0.1.0",
"networks_metadata": [
{"name": "floating", "access": "public"},
{"name": "fixed", "access": "private"},
{"name": "admin", "access": "private"}
]
}
},
{
"model": "nailgun.cluster",
"pk": 1,
"fields": {
"name": "Default Cluster",
"release": 1
}
},
{
"model": "nailgun.point",
"pk": 1,
"fields": {
"name": "point0",
"release": 1,
"scheme": {
"attr0": {
"generator": "generator_ip",
"generator_args": "floating",
"attribute": "attr.path0"
}
}
}
},
{
"model": "nailgun.point",
"pk": 2,
"fields": {
"name": "point1",
"release": 1,
"scheme": {
"attr1": {
"generator": "generator_ip",
"generator_args": "floating",
"attribute": "attr.path1"
},
"attr2": {
"generator": "generator_ip",
"generator_args": "floating",
"attribute": "attr.path2"
}
}
}
},
{
"model": "nailgun.com",
"pk": 1,
"fields": {
"name": "component0",
"release": 1,
"deploy": {
"driver": "chef-solo",
"driver_args": {
"run_list": [
"recipe[cookbook0::recipe0@0.1.0]"
]
}
},
"provides": [1],
"requires": []
}
},
{
"model": "nailgun.com",
"pk": 2,
"fields": {
"name": "component1",
"release": 1,
"deploy": {
"driver": "chef-solo",
"driver_args": {
"run_list": [
"recipe[cookbook0::recipe1@0.1.0]",
"recipe[cookbook0::recipe2@0.1.0]"
]
}
},
"provides": [2],
"requires": [1]
}
},
{
"model": "nailgun.com",
"pk": 3,
"fields": {
"name": "component2",
"release": 1,
"deploy": {
"driver": "chef-solo",
"driver_args": {
"run_list": [
"recipe[cookbook1::recipe0@0.1.0]",
"recipe[cookbook2::recipe0@0.1.0]"
]
}
},
"provides": [2],
"provides": [],
"requires": [1, 2]
}
},
{
"model": "nailgun.role",
"pk": 1,
"fields": {
"name": "role1",
"release": 1,
"components": [1,2]
}
},
{
"model": "nailgun.role",
"pk": 2,
"fields": {
"name": "role2",
"release": 1,
"components": [3]
}
},
{
"model": "nailgun.node",
"pk": "080000000001",
"fields": {
"name": "test.example.com",
"ip": "127.0.0.1",
"metadata": {
"block_device": {},
"interfaces": {},
"cpu": {},
"memory": {}
},
"cluster": 1,
"roles": [1]
}
},
{
"model": "nailgun.node",
"pk": "080000000002",
"fields": {
"name": "test2.example.com",
"ip": "127.0.0.2",
"metadata": {
"block_device": {},
"interfaces": {},
"cpu": {},
"memory": {}
},
"cluster": 1,
"roles": [2]
}
}
]

View File

@ -1,384 +0,0 @@
[
{
"pk": 1,
"model": "nailgun.release",
"fields": {
"name": "Essex",
"version": "1.2.3",
"description": "Essex release description",
"networks_metadata": [
{"name": "floating", "access": "public"},
{"name": "fixed", "access": "private"},
{"name": "admin", "access": "private"}
]
}
},
{
"pk": 1,
"model": "nailgun.cluster",
"fields": {
"name": "Production",
"release": 1
}
},
{
"pk": 2,
"model": "nailgun.cluster",
"fields": {
"name": "Staging",
"release": 1
}
},
{
"model": "nailgun.point",
"pk": 1,
"fields": {
"name": "point0",
"release": 1,
"scheme": {
"attr0": {
"generator": "generator_ip",
"generator_args": "floating",
"attribute": "attr.path0"
}
}
}
},
{
"model": "nailgun.point",
"pk": 2,
"fields": {
"name": "point1",
"release": 1,
"scheme": {
"attr1": {
"generator": "generator_ip",
"generator_args": "floating",
"attribute": "attr.path1"
},
"attr2": {
"generator": "generator_ip",
"generator_args": "floating",
"attribute": "attr.path2"
}
}
}
},
{
"model": "nailgun.com",
"pk": 1,
"fields": {
"name": "component0",
"release": 1,
"deploy": {
"driver": "chef-solo",
"driver_args": {
"run_list": [
"recipe[cookbook0::recipe0@0.1.0]"
]
}
},
"provides": [1],
"requires": []
}
},
{
"model": "nailgun.com",
"pk": 2,
"fields": {
"name": "component1",
"release": 1,
"deploy": {
"driver": "chef-solo",
"driver_args": {
"run_list": [
"recipe[cookbook0::recipe1@0.1.0]",
"recipe[cookbook0::recipe2@0.1.0]"
]
}
},
"provides": [2],
"requires": [1]
}
},
{
"model": "nailgun.com",
"pk": 3,
"fields": {
"name": "component2",
"release": 1,
"deploy": {
"driver": "chef-solo",
"driver_args": {
"run_list": [
"recipe[cookbook1::recipe0@0.1.0]",
"recipe[cookbook2::recipe0@0.1.0]"
]
}
},
"provides": [2],
"provides": [],
"requires": [1, 2]
}
},
{
"model": "nailgun.role",
"pk": 1,
"fields": {
"name": "Controller",
"release": 1,
"components": [1,2]
}
},
{
"model": "nailgun.role",
"pk": 2,
"fields": {
"name": "Compute",
"release": 1,
"components": [3]
}
},
{
"pk": "111111111111",
"model": "nailgun.node",
"fields": {
"status": "ready",
"name": "",
"manufacturer": "Dell",
"platform_name": "Model-1",
"roles": [
1
],
"ip": "",
"fqdn": "",
"cluster": 1,
"mac": "C0:8D:DF:52:76:F1",
"metadata": {
"block_device": {
"ram0": {
"removable": "0",
"size": "1228800"
},
"sda": {
"vendor": "ATA",
"removable": "0",
"rev": "0.14",
"state": "running",
"timeout": "30",
"model": "QEMU HARDDISK",
"size": "16777216"
}
},
"interfaces": [
{
"addresses": {
"fe80::5054:ff:fe28:16c3": {
"prefixlen": "64",
"scope": "Link",
"family": "inet6"
},
"52:54:00:28:16:C3": {
"family": "lladdr"
},
"10.20.0.229": {
"prefixlen": "24",
"scope": "Global",
"netmask": "255.255.255.0",
"broadcast": "10.20.0.255",
"family": "inet"
}
},
"name": "eth0"
},
{
"default_interface": "eth0"
},
{
"default_gateway": "10.20.0.2"
}
],
"cpu": {
"real": 0,
"0": {
"family": "6",
"vendor_id": "GenuineIntel",
"mhz": "3192.766",
"stepping": "3",
"cache_size": "4096 KB",
"flags": [
"fpu",
"lahf_lm"
],
"model": "2",
"model_name": "QEMU Virtual CPU version 0.14.1"
},
"total": 1
},
"memory": {
"anon_pages": "16420kB",
"vmalloc_total": "34359738367kB",
"bounce": "0kB",
"active": "28576kB",
"inactive": "20460kB",
"nfs_unstable": "0kB",
"vmalloc_used": "7160kB",
"total": "1019548kB",
"slab": "16260kB",
"buffers": "4888kB",
"slab_unreclaim": "7180kB",
"swap": {
"cached": "0kB",
"total": "0kB",
"free": "0kB"
},
"dirty": "84kB",
"writeback": "0kB",
"vmalloc_chunk": "34359729156kB",
"free": "322008kB",
"page_tables": "1328kB",
"cached": "27728kB",
"commit_limit": "509772kB",
"committed_as": "54864kB",
"mapped": "5380kB",
"slab_reclaimable": "9080kB"
},
"serial": "Unknown",
"networks": {
"floating": {
"access": "public",
"device": "eth0",
"netmask": "255.255.255.0",
"vlan_id": 300,
"address": "172.18.0.2"
},
"admin": {
"access": "private",
"device": "eth0",
"netmask": "255.255.255.0",
"vlan_id": 100,
"address": "10.0.0.2"
},
"storage": {
"access": "private",
"device": "eth0",
"netmask": "255.255.255.0",
"vlan_id": 200,
"address": "10.0.1.2"
}
}
}
}
},
{
"pk": "222222222222",
"model": "nailgun.node",
"fields": {
"status": "error",
"name": "",
"manufacturer": "HP",
"platform_name": "Model-2",
"roles": [
1,
2
],
"ip": "",
"fqdn": "",
"cluster": 1,
"mac": "46:FC:5A:0C:F9:51"
}
},
{
"pk": "333333333333",
"model": "nailgun.node",
"fields": {
"status": "deploying",
"name": "",
"manufacturer": "OpenVZ",
"platform_name": "Model-3",
"roles": [
2
],
"ip": "",
"fqdn": "",
"cluster": 1,
"mac": "2E:04:78:86:69:1F"
}
},
{
"pk": "444444444444",
"model": "nailgun.node",
"fields": {
"status": "offline",
"name": "",
"manufacturer": "",
"platform_name": "No-Manufacturer",
"roles": [],
"ip": "",
"fqdn": "",
"cluster": 1,
"mac": "BC:10:A1:44:94:A0"
}
},
{
"pk": "555555555555",
"model": "nailgun.node",
"fields": {
"status": "ready",
"name": "Node with name",
"manufacturer": "VMWare",
"platform_name": "",
"roles": [],
"ip": "",
"fqdn": "",
"cluster": 1,
"mac": "B6:17:54:39:27:EA"
}
},
{
"pk": "000000000000",
"model": "nailgun.node",
"fields": {
"status": "ready",
"name": "Node without cluster",
"manufacturer": "",
"platform_name": "",
"roles": [],
"ip": "",
"fqdn": "",
"cluster": null,
"mac": "47:33:22:46:9B:92"
}
},
{
"pk": "000000000001",
"model": "nailgun.node",
"fields": {
"status": "ready",
"name": "Another node without cluster",
"manufacturer": "QEMU",
"platform_name": "",
"roles": [],
"ip": "",
"fqdn": "",
"cluster": null,
"mac": "84:67:BA:CA:69:95"
}
},
{
"pk": "000000000001",
"model": "nailgun.node",
"fields": {
"status": "ready",
"name": "node virtualbox",
"manufacturer": "virtualbox",
"platform_name": "",
"roles": [],
"ip": "",
"fqdn": "",
"cluster": null,
"mac": "3A:10:EC:04:9A:DE"
}
}
]

View File

@ -1,469 +0,0 @@
import logging
import socket
import paramiko
import copy
import string
import logging
from random import choice
import re
import time
import socket
import pprint
from nailgun import models
from nailgun import settings
from nailgun.exceptions import EmptyListError, NotFound
logger = logging.getLogger("helpers")
class SshConnect(object):
def __init__(self, host, user, keyfile=None, password=None):
try:
self.host = host
self.t = paramiko.Transport((host, 22))
if password:
self.t.connect(username=user, password=password)
elif keyfile:
self.t.connect(username=user,
pkey=paramiko.RSAKey.from_private_key_file(keyfile))
except:
self.close()
raise
def run(self, cmd, timeout=30):
logger.debug("[%s] Running command: %s", self.host, cmd)
chan = self.t.open_session()
chan.settimeout(timeout)
chan.exec_command(cmd)
return chan.recv_exit_status() == 0
def close(self):
try:
if self.t:
self.t.close()
except:
pass
class EndPointDataDriver:
def __init__(self, node):
self.node = node
def node_ip(self, network_name):
for ip_addr in models.IPAddr.objects.filter(node__id=self.node.id):
network = models.Network.objects.get(id=ip_addr.network.id)
if network.name == network_name:
return ip_addr.ip_addr
def node_netmask(self, network_name):
release = self.node.cluster.release
network = models.Network.objects.get(name=network_name,
release=release)
return network.netmask
def node_vlan(self, network_name):
release = self.node.cluster.release
network = models.Network.objects.get(name=network_name,
release=release)
return network.vlan_id
class EndPointManager:
def __init__(self, data_driver, name, scheme):
self.data_driver = data_driver
self.name = name
self.scheme = scheme
self.data = {}
def generator_ip_repo(self, args):
return settings.REPO_ADDRESS
def generator_ip(self, network_name):
network_name = str(network_name)
ip = self.data_driver.node_ip(network_name)
logger.debug("EndPointManager: generator_ip: %s" % ip)
return ip
def generator_netmask(self, network_name):
network_name = str(network_name)
netmask = self.data_driver.node_netmask(network_name)
logger.debug("EndPointManager: generator_netmask: %s" % netmask)
return netmask
def generator_vlan(self, network_name):
network_name = str(network_name)
vlan_id = self.data_driver.node_vlan(network_name)
logger.debug("EndPointManager: generator_vlan: %s" % vlan_id)
return vlan_id
def generator_url(self, url_args):
url_args = dict(url_args)
ip = self.data_driver.node_ip(url_args['network'])
url = "%s://%s:%s%s" % (url_args['protocol'],
ip,
url_args['port'],
url_args.get('url', ''))
logger.debug("EndPointManager: generator_url: %s" % url)
return url
def generator_transparent(self, args):
logger.debug("EndPointManager: generator_transparent: %s" % \
args)
return args
def generator_password(self, length=8):
length = int(length)
password = ''.join(
choice(
''.join((string.ascii_letters, string.digits))
) for _ in xrange(length)
)
logger.debug("EndPointManager: generator_password: %s" % \
password)
return password
@classmethod
def merge_dictionary(cls, dst, src):
"""
'True' way of merging two dictionaries
(python dict.update() updates just top-level keys and items)
"""
stack = [(dst, src)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isinstance(current_src[key], dict) \
and isinstance(current_dst[key], dict):
stack.append((current_dst[key], current_src[key]))
else:
current_dst[key] = current_src[key]
return dst
@classmethod
def list2dict(cls, d, k):
"""
Creating a nested dictionary:
['a', 'b', 'c', 'd'] => {'a': {'b': {'c': 'd'}}}
Merging it with the main dict updates the single key
"""
_d = copy.deepcopy(d)
if len(k) > 1:
_k = k.pop(0)
_d[_k] = cls.list2dict(d, k)
return _d
return k.pop(0)
def instantiate(self):
for k in self.scheme:
logger.debug("EndPointManager: generating %s" % k)
generator = getattr(self, self.scheme[k]["generator"])
generator_args = self.scheme[k]["generator_args"]
generated = generator(generator_args)
attributes = self.scheme[k]["attribute"]
"""
example of attribute:
["service.mysql.user", "service.postgresql.user"]
"""
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
for attribute in attributes:
attribute_keys = re.split(ur'\.', attribute)
logger.debug("EndPointManager: attribute_keys: %s" % \
str(attribute_keys))
attribute_keys.append(generated)
logger.debug("EndPointManager: attribute_keys: %s" % \
str(attribute_keys))
attribute_dict = self.list2dict({}, attribute_keys)
logger.debug("EndPointManager: attribute_dict: %s" % \
str(attribute_dict))
self.merge_dictionary(self.data, attribute_dict)
def get_data(self):
logger.debug("EndPointManager: data: %s" % \
str(self.data))
return self.data
class DeployManager:
def __init__(self, cluster_id):
self.cluster_id = cluster_id
self.cluster_component_ids = [
c.id for n, r, c in self._cluster_iterator()
]
self.release_id = models.Cluster.objects.get(id=cluster_id).release.id
def sorted_components(self):
graph = {}
for component in models.Com.objects.filter(
id__in=self.cluster_component_ids
):
self._resolve_cluster_deps(graph, component)
try:
sorted_components = self._topol_sort(graph)
except KeyError:
raise Exception("Cluster dependencies cannot be resolved")
logger.debug("sorted_components: %s" % \
pprint.pformat(sorted_components))
return sorted_components
def _cluster_iterator(self):
for node in models.Node.objects.filter(cluster__id=self.cluster_id):
for role in node.roles.all():
for component in role.components.all():
yield [node, role, component]
def _resolve_cluster_deps(self, graph, component):
if component.name not in graph:
graph[component.name] = []
requires = component.requires.all()
logger.debug("Resolving cluster: component %s requires: %s" % \
(component.name,
str([p.name for p in requires])))
for provided_by in models.Com.objects.filter(
id__in=self.cluster_component_ids,
provides__in=requires
):
graph[component.name].append(provided_by.name)
self._resolve_cluster_deps(graph, provided_by)
def _topol_sort(self, graph):
""" Depth First Traversal algorithm for sorting DAG graph.
Example graph: 1 depends on 4; 3 depends on 2 and 6; etc.
Example code:
.. code-block:: python
>>> graph = {1: [4], 2: [], 3: [2,6], 4:[2,3], 5: [], 6: [2]}
>>> topol_sort(graph)
[2, 6, 3, 4, 1, 5]
Exception is raised if there is a cycle:
.. code-block:: python
>>> graph = {1: [4], 2: [], 3: [2,6], 4:[2,3,1], 5: [], 6: [2]}
>>> topol_sort(graph)
...
Exception: Graph contains cycles, processed 4 depends on 1
"""
def dfs(v):
color[v] = "gray"
for w in graph[v]:
if color[w] == "black":
continue
elif color[w] == "gray":
raise Exception(
"Graph contains cycles, processed %s depends on %s" % \
(v, w))
dfs(w)
color[v] = "black"
_sorted.append(v)
_sorted = []
color = {}
for j in graph:
color[j] = "white"
for i in graph:
if color[i] == "white":
dfs(i)
return _sorted
def clean_cluster(self):
models.EndPoint.objects.filter(
node__in=models.Node.objects.filter(cluster__id=self.cluster_id)
).delete()
def instantiate_cluster(self):
for node in models.Node.objects.filter(cluster__id=self.cluster_id):
"""
it is needed to be checked if node have only one component
assignment of given component and only one given point
"""
components_used = []
points_used = []
data_driver = EndPointDataDriver(node)
roles = node.roles.all()
for role in roles:
components = role.components.all()
for component in components:
if component.name in components_used:
raise Exception(
"Duplicated component: node: %s com: %s" % \
(node.id, component.name))
components_used.append(component.name)
provides = list(component.provides.all())
logger.debug("Com %s provides %s" % \
(component.name,
str([p.name for p in provides])))
for point in provides:
if point.name in points_used:
raise Exception(
"Duplicated point: node: %s point: %s" % \
(node.id, point.name))
points_used.append(point.name)
logger.debug("Instantiating point: %s" % point.name)
manager = EndPointManager(
data_driver,
point.name,
point.scheme
)
manager.instantiate()
end_point = models.EndPoint(
point=point,
node=node,
data=manager.get_data()
)
end_point.save()
class DeployDriver:
def __init__(self, node, component):
self.node = node
self.component = component
@classmethod
def merge_dictionary(cls, dst, src):
"""
'True' way of merging two dictionaries
(python dict.update() updates just top-level keys and items)
"""
stack = [(dst, src)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
if key not in current_dst:
current_dst[key] = current_src[key]
else:
if isinstance(current_src[key], dict) \
and isinstance(current_dst[key], dict):
stack.append((current_dst[key], current_src[key]))
else:
current_dst[key] = current_src[key]
return dst
def endpoint_iterator(self, node, component):
logger.debug("endpoint_iterator: node: %s component: %s" % \
(node.id, component.name))
for point in component.provides.all():
logger.debug("endpoint_iterator: component: %s provides: %s" % \
(component.name, point.name))
try:
logger.debug("endpoint_iterator: looking for provided "\
"endpoint point: %s node: %s" % \
(point.name, node.id))
ep = models.EndPoint.objects.get(point=point, node=node)
except ObjectDoesNotExist as e:
logger.debug("endpoint_iterator: provided endpoint "\
"is not found point: %s node: %s" % \
(point.name, node.id))
raise Exception("Provided point %s instance is not found" % \
point.name)
except Exception as e:
logger.debug("Exception: %s" % str(e))
raise e
else:
logger.debug("endpoint_iterator: provided endpoint found " \
"point: %s node: %s endpoint: %s" % \
(point.name, node.id, ep.id))
yield ep
for point in component.requires.all():
"""
FOR THE START WE TRY TO FIND ENDPOINT INSTANCE
BOUND TO THIS NODE. IT IT FAILS THEN WE LOOK FOR
ENDPOINT INSTANCES BOUND TO OTHER NODES IN CLUSTER
"""
try:
ep = models.EndPoint.objects.get(
point=point,
node=node
)
except:
pass
else:
yield ep
eps = models.EndPoint.objects.filter(point=point)
if eps:
"""
FIXME
WE NEED MORE INTELLIGENT ALGORITHM TO CHOOSE
WHICH ENDPOINT INSTANCE IS A MOST SUITABLE
ONE FOR THIS COMPONENT. AT THE MOMENT WE
SIMPLY RETURN FIRST FOUND INSTANCE
"""
ep = eps[0]
logger.debug("endpoint_iterator: required endpoint found " \
"point: %s node: %s" % \
(point.name, ep.node.id))
yield ep
else:
raise Exception("Required point %s instance is not found" % \
point.name)
def deploy_data(self):
self.data = {}
try:
for endpoint in self.endpoint_iterator(self.node, self.component):
logger.error("Found endpoint id: %s for n=%s c=%s" % \
(endpoint.id, self.node.id,
self.component.name))
self.merge_dictionary(self.data, endpoint.data)
except:
logger.error("Error while getting endpoints for n=%s c=%s" % \
(self.node.id, self.component.name))
raise Exception("Getting endpoints failed: node=%s com=%s" % \
(self.node.id, self.component.name))
logger.debug("Node: %s com: %s data: %s" % \
(self.node.id, self.component.name, str(self.data)))
return {
"chef-solo": self.chef_solo_data,
"puppet": self.puppet_data,
}[self.component.deploy["driver"]]()
def chef_solo_data(self):
chef_data = {
"run_list": self.component.deploy["driver_args"]["run_list"]
}
if self.component.deploy["driver_args"].get("cooks", None) is not None:
chef_data["cooks"] = \
self.component.deploy["driver_args"]["cooks"]
logger.debug("Chef-data: %s" % str(chef_data))
self.merge_dictionary(chef_data, self.data)
return chef_data
def puppet_data(self):
return self.data

View File

@ -1,6 +0,0 @@
import traceback
class ExceptionLoggingMiddleware(object):
def process_exception(self, request, exception):
print traceback.format_exc()

View File

@ -1,243 +0,0 @@
import re
import ipaddr
import celery
from django.db import models
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from jsonfield import JSONField
class EndPoint(models.Model):
point = models.ForeignKey('Point', related_name='endpoints')
node = models.ForeignKey('Node', related_name='endpoints')
data = JSONField()
class Meta:
unique_together = ("point", "node")
class Point(models.Model):
name = models.CharField(max_length=100)
release = models.ForeignKey('Release', related_name='points')
scheme = JSONField()
class Meta:
unique_together = ("name", "release")
class Com(models.Model):
name = models.CharField(max_length=100)
release = models.ForeignKey('Release', related_name='components')
requires = models.ManyToManyField(Point, related_name='required_by')
provides = models.ManyToManyField(Point, related_name='provided_by')
deploy = JSONField()
class Meta:
unique_together = ("name", "release")
class Role(models.Model):
name = models.CharField(max_length=100)
release = models.ForeignKey('Release', related_name='roles')
components = models.ManyToManyField(Com, related_name="roles")
class Meta:
unique_together = ("name", "release")
class Release(models.Model):
name = models.CharField(max_length=100)
version = models.CharField(max_length=30)
description = models.TextField(null=True, blank=True)
networks_metadata = JSONField()
class Meta:
unique_together = ("name", "version")
class Task(models.Model):
id = models.CharField(max_length=36, primary_key=True)
cluster = models.OneToOneField('Cluster', related_name='+')
task_name = models.CharField(max_length=100)
def _get_celery_task(self):
from nailgun import tasks
return getattr(tasks, self.task_name)
@property
def name(self):
return self._get_celery_task().name
def run(self, *args):
task_result = self._get_celery_task().delay(*args)
self.id = task_result.task_id
self.save()
return task_result
@property
def celery_task_result(self):
return celery.result.AsyncResult(self.id)
def _flatten_celery_subtasks(self, task=None):
if task is None:
task = self.celery_task_result
result = [task]
if isinstance(task.result, celery.result.ResultSet):
result += reduce(list.__add__, \
map(self._flatten_celery_subtasks, task.result.results))
elif isinstance(task.result, celery.result.AsyncResult):
result += self._flatten_celery_subtasks(task.result)
return result
@property
def ready(self):
tasks = self._flatten_celery_subtasks()
return all(map(lambda t: t.ready(), tasks))
@property
def errors(self):
tasks = self._flatten_celery_subtasks()
errors = []
for task in tasks:
if isinstance(task.result, Exception):
errors.append(task.result)
return errors
class Cluster(models.Model):
name = models.CharField(max_length=100, unique=True)
release = models.ForeignKey(Release, related_name='clusters')
# working around Django issue #10227
@property
def task(self):
try:
return Task.objects.get(cluster=self)
except ObjectDoesNotExist:
return None
class Node(models.Model):
NODE_STATUSES = (
('offline', 'offline'),
('ready', 'ready'),
('discover', 'discover'),
('deploying', 'deploying'),
('error', 'error'),
)
id = models.CharField(max_length=12, primary_key=True)
cluster = models.ForeignKey(Cluster, related_name='nodes',
null=True, blank=True, on_delete=models.SET_NULL)
name = models.CharField(max_length=100, blank=True)
status = models.CharField(max_length=30, choices=NODE_STATUSES,
default='ready')
metadata = JSONField()
mac = models.CharField(max_length=17)
ip = models.CharField(max_length=15)
fqdn = models.CharField(max_length=255)
manufacturer = models.CharField(max_length=50, blank=True)
platform_name = models.CharField(max_length=150, blank=True)
os_platform = models.CharField(max_length=150, blank=True)
roles = models.ManyToManyField(Role, related_name='nodes')
new_roles = models.ManyToManyField(Role, related_name='+')
redeployment_needed = models.BooleanField(default=False)
@property
def info(self):
""" Safely aggregate metadata to provide short info for UI """
result = {}
try:
kilobytes = int(self.metadata['memory']['total'][:-2])
gigabytes = kilobytes / 1024.0 ** 2
result['ram'] = gigabytes
except Exception:
result['ram'] = None
try:
result['cpu'] = self.metadata['cpu']['real']
result['cores'] = self.metadata['cpu']['total']
except Exception:
result['cpu'] = None
result['cores'] = None
# FIXME: disk space calculating may be wrong
try:
result['hdd'] = 0
for name, info in self.metadata['block_device'].iteritems():
if re.match(r'^sd.$', name):
bytes = int(info['size']) * 512
terabytes = bytes / 1024.0 ** 4
result['hdd'] += terabytes
except Exception:
result['hdd'] = None
return result
class IPAddr(models.Model):
network = models.ForeignKey('Network')
node = models.ForeignKey(Node)
ip_addr = models.CharField(max_length=25)
class Network(models.Model):
release = models.ForeignKey(Release, related_name="networks")
name = models.CharField(max_length=20)
access = models.CharField(max_length=20)
vlan_id = models.PositiveIntegerField()
network = models.CharField(max_length=25)
range_l = models.CharField(max_length=25)
range_h = models.CharField(max_length=25)
gateway = models.CharField(max_length=25)
nodes = models.ManyToManyField(Node, through=IPAddr, null=True, blank=True)
@property
def netmask(self):
return str(ipaddr.IPv4Network(self.network).netmask)
@property
def broadcast(self):
return str(ipaddr.IPv4Network(self.network).broadcast)
def update_node_network_info(self, node):
nw = ipaddr.IPv4Network(self.network)
range_l = ipaddr.IPv4Address(self.range_l)
range_h = ipaddr.IPv4Address(self.range_h)
new_ip = None
for host in nw.iterhosts():
if range_l <= ipaddr.IPv4Address(host) <= range_h:
try:
IPAddr.objects.get(network=self, ip_addr=host)
except IPAddr.DoesNotExist:
new_ip = host
break
if not new_ip:
raise Exception("There is no free IP for node %s" % node.id)
new_ip_obj = IPAddr(network=self, ip_addr=new_ip, node=node)
new_ip_obj.save()
if not "networks" in node.metadata:
node.metadata["networks"] = {}
# FIXME: populate real value
if 'default_interface' in node.metadata['interfaces']:
device = node.metadata['interfaces']['default_interface']
else:
device = 'eth0'
node.metadata["networks"][self.name] = {
"access": self.access,
"device": device,
"vlan_id": self.vlan_id,
"address": str(new_ip),
"netmask": self.netmask,
# FIXME: do we need those?
# "broascast": self.broadcast,
# "gateway": self.gateway,
}
node.save()

View File

@ -1,48 +0,0 @@
import re
class ProvisionException(Exception):
pass
class ProvisionAlreadyExists(ProvisionException):
pass
class ProvisionDoesNotExist(ProvisionException):
pass
class ProvisionConfig:
cn = 'nailgun.provision.driver.cobbler.Cobbler'
class Provision:
def __init__(self):
raise NotImplementedError(
"Try to use ProvisionFactory.getInstance() method."
)
def save_profile(self):
raise NotImplementedError
def save_node(self):
raise NotImplementedError
class ProvisionFactory:
@classmethod
def getInstance(cls, config=ProvisionConfig()):
name = config.cn
module_name = '.'.join(re.split(ur'\.', name)[:-1])
class_name = re.split(ur'\.', name)[-1]
return getattr(
__import__(
module_name,
globals(),
locals(),
[class_name],
-1),
class_name
)(config)

View File

@ -1,367 +0,0 @@
from nailgun.provision import ProvisionException
from nailgun.provision import ProvisionAlreadyExists, ProvisionDoesNotExist
from nailgun.provision import Provision
import logging
import xmlrpclib
class Cobbler(Provision):
def __init__(self, config):
self.logger = logging.getLogger('provision.cobbler')
try:
self.url = config.url
self.user = config.user
self.password = config.password
except AttributeError as e:
self.logger.error(
'Provision configuration error.' \
' Not all necessary attributes are set properly.'
)
raise e
self.logger.debug(
'Cobbler config: url="%s", user="%s", password="%s"' \
% (self.url, self.user, self.password)
)
try:
self.server = xmlrpclib.Server(self.url)
self.token = self.server.login(self.user, self.password)
except ProvisionException as e:
self.logger.error(
'Error occured while connecting to provision server.'
)
raise e
def _get_any_profile(self):
profiles = self.server.get_profiles(self.token)
if profiles:
return profiles[0]
raise ProvisionException("There is no available profiles")
def system_by_name(self, name):
systems = self.server.find_system({'name': name}, self.token)
if systems:
if len(systems) > 1:
self.logger.error(
"There are more than one system found by pattern: %s" \
% name
)
raise ProvisionException(
"There are more than one system found by pattern: %s" \
% name
)
return systems[0]
return None
# FIXME
# IT NEEDED TO BE IMPLEMENTED AS ONLY METHOD FOR ADD AND EDIT
def add_system(self, name, mac, power, profile, kopts=""):
if self.system_by_name(name):
self.logger.error(
"Trying to add system that already exists: %s" \
% name
)
raise ProvisionAlreadyExists(
"System with name %s already exists. Try to edit it." \
% name
)
system_id = self.server.new_system(self.token)
self.server.modify_system(
system_id, 'name', name, self.token
)
self.server.modify_system(
system_id, 'profile', profile.name, self.token
)
self.server.modify_system(
system_id, 'kopts', kopts, self.token
)
self.server.modify_system(
system_id, 'modify_interface', {
"macaddress-eth0": mac,
}, self.token
)
self.server.modify_system(
system_id, 'power_type', power.power_type, self.token
)
if power.power_user:
self.server.modify_system(
system_id, 'power_user', power.power_user, self.token
)
if power.power_pass:
self.server.modify_system(
system_id, 'power_pass', power.power_pass, self.token
)
if power.power_id:
self.server.modify_system(
system_id, 'power_id', power.power_id, self.token
)
if power.power_address:
self.server.modify_system(
system_id, 'power_address', power.power_address, self.token
)
self.server.save_system(system_id, self.token)
return self.system_by_name(name)
def edit_system(self, name, mac, power, profile, kopts=""):
if not self.system_by_name(name):
self.logger.error(
"Trying to edit system that does not exist: %s" \
% name
)
raise ProvisionDoesNotExist(
"System with name %s does not exist. Try to edit it." \
% name
)
system_id = self.server.get_system_handle(name, self.token)
self.server.modify_system(
system_id, 'profile', profile.name, self.token
)
self.server.modify_system(
system_id, 'kopts', kopts, self.token
)
self.server.modify_system(
system_id, 'modify_interface',
{
"macaddress-eth0": mac,
}, self.token
)
self.server.modify_system(
system_id, 'power_type', power.power_type, self.token
)
if power.power_user:
self.server.modify_system(
system_id, 'power_user', power.power_user, self.token
)
if power.power_pass:
self.server.modify_system(
system_id, 'power_pass', power.power_pass, self.token
)
if power.power_id:
self.server.modify_system(
system_id, 'power_id', power.power_id, self.token
)
if power.power_address:
self.server.modify_system(
system_id, 'power_address', power.power_address, self.token
)
self.server.save_system(system_id, self.token)
return self.system_by_name(name)
def power_system(self, name, power):
if not self.system_by_name(name):
self.logger.error(
"Trying to power system that does not exist: %s" % name
)
raise ProvisionDoesNotExist(
"System with name %s does not exist. Try to edit it." % name
)
if power not in ('on', 'off', 'reboot', 'status'):
raise ValueError("Power has invalid value")
system_id = self.server.get_system_handle(name, self.token)
self.server.power_system(system_id, power, self.token)
return self.system_by_name(name)
def handle_system(self, name, mac, power, profile, kopts=""):
try:
self.edit_system(name, mac, power, profile, kopts)
self.logger.info("Edited system: %s" % name)
except ProvisionDoesNotExist:
self.add_system(name, mac, power, profile, kopts)
self.logger.info("Added system: %s" % name)
def del_system(self, name):
system = self.system_by_name(name)
if not system:
self.logger.error(
"Trying to remove system that does not exist: %s" % name
)
raise ProvisionDoesNotExist(
"There is no system with name %s" % name
)
self.server.remove_system(name, self.token)
self.logger.info("Removed system %s" % name)
def profile_by_name(self, name):
profiles = self.server.find_profile({'name': name}, self.token)
if profiles:
if len(profiles) > 1:
self.logger.error(
"There are more than one profile found by pattern: %s" \
% name
)
raise ProvisionException(
"There are more than one profile found by pattern: %s" \
% name
)
return profiles[0]
return None
# FIXME
# IT NEEDED TO BE IMPLEMENTED AS ONLY METHOD FOR ADD AND EDIT
def add_profile(self, name, distro, kickstart):
if self.profile_by_name(name):
self.logger.error(
"Trying to add profile that already exists: %s" % name
)
raise ProvisionAlreadyExists(
"Profile with name %s already exists. Try to edit it." \
% name
)
profile_id = self.server.new_profile(self.token)
self.server.modify_profile(profile_id, 'name', name, self.token)
self.server.modify_profile(profile_id, 'distro', distro, self.token)
self.server.modify_profile(
profile_id, 'kickstart', kickstart, self.token
)
self.server.save_profile(profile_id, self.token)
return self.profile_by_name(name)
def edit_profile(self, name, distro, kickstart):
if not self.profile_by_name(name):
self.logger.error(
"Trying to edit profile that does not exist: %s" % name
)
raise ProvisionDoesNotExist(
"Profile with name %s does not exist. Try to add it." % name
)
profile_id = self.server.get_profile_handle(name, self.token)
self.server.modify_profile(profile_id, 'distro', distro, self.token)
self.server.modify_profile(
profile_id, 'kickstart', kickstart, self.token
)
self.server.save_profile(profile_id, self.token)
return self.profile_by_name(name)
def handle_profile(self, name, distro, seed):
try:
self.edit_profile(name, distro, seed)
self.logger.info("Edited profile: %s" % name)
except ProvisionDoesNotExist:
self.add_profile(name, distro, seed)
self.logger.info("Added profile: %s" % name)
def del_profile(self, name):
profile = self.profile_by_name(name)
if not profile:
self.logger.error(
"Trying to remove profile that does not exist: %s" % name
)
raise ProvisionDoesNotExist(
"There is no profile with name %s" % name
)
self.server.remove_profile(name, self.token)
self.logger.info("Removed profile: %s" % name)
def distro_by_name(self, name):
distros = self.server.find_distro({'name': name}, self.token)
if distros:
if len(distros) > 1:
self.logger.error(
"There are more than one distro found by pattern: %s" \
% name
)
raise ProvisionException(
"There are more than one distro found by pattern %s" \
% name
)
return distros[0]
return None
# FIXME
# IT NEEDED TO BE IMPLEMENTED AS ONLY METHOD FOR ADD AND EDIT
def add_distro(self, name, kernel, initrd, arch, breed, osversion):
if self.distro_by_name(name):
self.logger.error(
"Trying to add distro that already exists: %s" \
% name
)
raise ProvisionAlreadyExists(
"Distro with name %s already exists. Try to edit it." \
% name
)
distro_id = self.server.new_distro(self.token)
self.server.modify_distro(distro_id, 'name', name, self.token)
self.server.modify_distro(distro_id, 'kernel', kernel, self.token)
self.server.modify_distro(distro_id, 'initrd', initrd, self.token)
self.server.modify_distro(distro_id, 'arch', arch, self.token)
self.server.modify_distro(distro_id, 'breed', breed, self.token)
self.server.modify_distro(
distro_id, 'os_version', osversion, self.token
)
self.server.save_distro(distro_id, self.token)
return self.distro_by_name(name)
def edit_distro(self, name, kernel, initrd, arch, breed, osversion):
if not self.distro_by_name(name):
self.logger.error(
"Trying to edit distro that does not exist: %s" % name
)
raise ProvisionDoesNotExist(
"Distro with name %s does not exist. Try to add it." \
% name
)
distro_id = self.server.get_distro_handle(name, self.token)
self.server.modify_distro(distro_id, 'kernel', kernel, self.token)
self.server.modify_distro(distro_id, 'initrd', initrd, self.token)
self.server.modify_distro(distro_id, 'arch', arch, self.token)
self.server.modify_distro(distro_id, 'breed', breed, self.token)
self.server.modify_distro(
distro_id, 'os_version', osversion, self.token
)
self.server.save_distro(distro_id, self.token)
return self.distro_by_name(name)
def handle_distro(self, name, kernel, initrd, arch, os, osversion):
try:
self.edit_distro(name, kernel, initrd, arch, os, osversion)
self.logger.info("Edited distro: %s" % name)
except ProvisionDoesNotExist:
self.add_distro(name, kernel, initrd, arch, os, osversion)
self.logger.info("Added distro: %s" % name)
def del_distro(self, name):
distro = self.distro_by_name(name)
if not distro:
self.logger.error(
"Trying to remove distro that does not exist: %s" % name
)
raise ProvisionDoesNotExist(
"There is no distro with name %s" % name
)
self.server.remove_distro(name, self.token)
self.logger.info("Removed distro %s" % name)
# API
def save_profile(self, profile):
self.handle_distro(profile.name,
profile.kernel,
profile.initrd,
profile.arch,
profile.os,
profile.osversion)
self.handle_profile(profile.name,
profile.name,
profile.seed)
def save_node(self, node):
self.handle_system(node.name,
node.mac,
node.power,
node.profile,
node.kopts,
)
def power_on(self, node):
self.power_system(node.name, 'on')
def power_off(self, node):
self.power_system(node.name, 'off')
def power_reboot(self, node):
self.power_system(node.name, 'reboot')
def power_status(self, node):
raise NotImplementedError

View File

@ -1,81 +0,0 @@
import re
from nailgun.provision import ProvisionException
import logging
class ModelObject(object):
_driver = None
@property
def driver(self):
if self._driver is None:
raise ProvisionException("Driver is not set properly.")
return self._driver
@driver.setter
def driver(self, driver):
self._driver = driver
class Validator:
_supported_os = (
"ubuntu",
"redhat",
)
_supported_osversion = (
"precise",
"rhel6",
)
_supported_arch = (
"x86_64",
)
_supported_platform = (
("ubuntu", "precise", "x86_64"),
("redhat", "rhel6", "x86_64"),
)
_supported_powertypes = (
"virsh",
"ssh",
)
@classmethod
def is_mac_valid(cls, mac):
rex = re.compile(ur'^([0-9abcdef]{2}:){5}[0-9abcdef]{2}$', re.I)
return rex.match(mac)
@classmethod
def is_os_valid(cls, os):
return os in cls._supported_os
@classmethod
def is_osversion_valid(cls, osversion):
return osversion in cls._supported_osversion
@classmethod
def is_arch_valid(cls, arch):
return arch in cls._supported_arch
@classmethod
def is_platform_valid(cls, os, osversion, arch):
return (os, osversion, arch) in cls._supported_platform
# FIXME
# IT IS NEEDED TO BE CHECKED IF PROVISION ALREADY HAS THAT PROFILE
# IF NOT THEN PROFILE IS OBVIOUSLY INVALID
@classmethod
def is_profile_valid(cls, profile):
return True
@classmethod
def is_powertype_valid(cls, powertype):
return powertype in cls._supported_powertypes
# FIXME
# IT IS NEEDED TO BE CHECKED IF POWER IS VALID
@classmethod
def is_power_valid(cls, power):
return True

View File

@ -1,89 +0,0 @@
import logging
from nailgun.provision import ProvisionException
from . import ModelObject, Validator
class Node(ModelObject):
_mac = None
_profile = None
_kopts = ""
_pxe = False
_power = None
def __init__(self, name):
self.name = name
self.logger = logging.getLogger('provision.model.node')
def save(self):
self.driver.save_node(self)
@property
def mac(self):
if not self._mac:
raise ProvisionException("Mac is not set properly")
return self._mac
@mac.setter
def mac(self, mac):
if not Validator.is_mac_valid(mac):
raise ProvisionException("Mac is not valid")
self._mac = mac
@property
def profile(self):
if not self._profile:
raise ProvisionException("Profile is not set properly")
return self._profile
@profile.setter
def profile(self, profile):
if not Validator.is_profile_valid(profile):
raise ProvisionException("Profile is not valid")
self._profile = profile
@property
def kopts(self):
self.logger.debug("Node kopts getter: %s" % self._kopts)
return self._kopts
@kopts.setter
def kopts(self, kopts):
self.logger.debug("Node kopts setter: %s" % kopts)
self._kopts = kopts
@property
def pxe(self):
self.logger.debug("Node pxe getter: %s" % str(self._pxe))
return self._pxe
@pxe.setter
def pxe(self, pxe):
self.logger.debug("Node pxe setter: %s" % str(pxe))
if pxe:
self._pxe = True
else:
self._pxe = False
@property
def power(self):
if not self._power:
raise ProvisionException("Power is not set properly")
return self._power
@power.setter
def power(self, power):
if not Validator.is_power_valid(power):
raise ProvisionException("Power is not valid")
self._power = power
def power_on(self):
self.driver.power_on(self)
def power_off(self):
self.driver.power_off(self)
def power_reboot(self):
self.driver.power_reboot(self)
def power_status(self):
self.driver.power_status(self)

View File

@ -1,52 +0,0 @@
import logging
from nailgun.provision import ProvisionException
from . import Validator
class Power:
_power_user = None
_power_pass = None
_power_address = None
_power_id = None
def __init__(self, power_type):
if Validator.is_powertype_valid(power_type):
self._power_type = power_type
else:
raise ProvisionException("Power type is not valid")
@property
def power_type(self):
return self._power_type
@property
def power_user(self):
return self._power_user
@power_user.setter
def power_user(self, power_user):
self._power_user = power_user
@property
def power_pass(self):
return self._power_pass
@power_pass.setter
def power_pass(self, power_pass):
self._power_pass = power_pass
@property
def power_address(self):
return self._power_address
@power_address.setter
def power_address(self, power_address):
self._power_address = power_address
@property
def power_id(self):
return self._power_id
@power_id.setter
def power_id(self, power_id):
self._power_id = power_id

View File

@ -1,102 +0,0 @@
import logging
from . import ModelObject, Validator
from nailgun.provision import ProvisionException
class Profile(ModelObject):
_arch = None
_kernel = None
_initrd = None
_os = None
_osversion = None
_seed = None
_kopts = ""
def __init__(self, name):
self.name = name
self.logger = logging.getLogger('provision.model.profile')
def save(self):
if not Validator.is_platform_valid(
self._os, self._osversion, self._arch
):
raise ProvisionException("Platform is not valid")
self.driver.save_profile(self)
@property
def arch(self):
if not self._arch:
raise ProvisionException("Arch is not set properly")
return self._arch
@arch.setter
def arch(self, arch):
if not Validator.is_arch_valid(arch):
raise ProvisionException("Arch is not valid")
self._arch = arch
@property
def kernel(self):
if not self._kernel:
raise ProvisionException("Kernel is not set properly")
return self._kernel
@kernel.setter
def kernel(self, kernel):
self._kernel = kernel
@property
def initrd(self):
if not self._initrd:
raise ProvisionException("Initrd is not set properly")
return self._initrd
@initrd.setter
def initrd(self, initrd):
self._initrd = initrd
@property
def os(self):
if not self._os:
raise ProvisionException("Os is not set properly")
return self._os
@os.setter
def os(self, os):
if not Validator.is_os_valid(os):
raise ProvisionException("Os is not valid")
self._os = os
@property
def osversion(self):
if not self._osversion:
raise ProvisionException("Osversion is not set properly")
return self._osversion
@osversion.setter
def osversion(self, osversion):
if not Validator.is_osversion_valid(osversion):
raise ProvisionException("Osversion is not valid")
self._osversion = osversion
@property
def seed(self):
if not self._seed:
raise ProvisionException("Seed is not set properly")
self.logger.debug("Profile seed getter: %s" % self._seed)
return self._seed
@seed.setter
def seed(self, seed):
self.logger.debug("Profile seed setter: %s" % seed)
self._seed = seed
@property
def kopts(self):
self.logger.debug("Profile kopts getter: %s" % self._kopts)
return self._kopts
@kopts.setter
def kopts(self, kopts):
self.logger.debug("Profile kopts setter: %s" % kopts)
self._kopts = kopts

View File

@ -1,128 +0,0 @@
from model import Validator
from model.profile import Profile
from model.node import Node
from model.power import Power
from nose.tools import eq_
class TestValidator:
def setUp(self):
self.mac = "c8:0a:a9:a6:ff:28"
self.platform = ("ubuntu", "precise", "x86_64")
self.os = "ubuntu"
self.osversion = "precise"
self.arch = "x86_64"
def test_is_mac_valid(self):
assert Validator.is_mac_valid(self.mac)
def test_is_platform_valid(self):
assert Validator.is_platform_valid(
self.platform[0],
self.platform[1],
self.platform[2]
)
def test_is_os_valid(self):
assert Validator.is_os_valid(self.os)
def test_is_osversion_valid(self):
assert Validator.is_osversion_valid(self.osversion)
def test_is_arch_valid(self):
assert Validator.is_arch_valid(self.arch)
class TestProfile:
def setUp(self):
self.profile = Profile('profile')
self.arch = "x86_64"
self.os = "ubuntu"
self.osversion = "precise"
self.kernel = "kernel"
self.initrd = "initrd"
self.seed = "seed"
self.kopts = "kopts"
def test_arch(self):
self.profile.arch = self.arch
eq_(self.profile.arch, self.arch)
def test_os(self):
self.profile.os = self.os
eq_(self.profile.os, self.os)
def test_osversion(self):
self.profile.osversion = self.osversion
eq_(self.profile.osversion, self.osversion)
def test_kernel(self):
self.profile.kernel = self.kernel
eq_(self.profile.kernel, self.kernel)
def test_initrd(self):
self.profile.initrd = self.initrd
eq_(self.profile.initrd, self.initrd)
def test_seed(self):
self.profile.seed = self.seed
eq_(self.profile.seed, self.seed)
def test_kopts(self):
self.profile.kopts = self.kopts
eq_(self.profile.kopts, self.kopts)
class TestNode:
def setUp(self):
self.node = Node('node')
self.mac = "c8:0a:a9:a6:ff:28"
self.profile = Profile('profile')
self.kopts = "kopts"
self.pxe = True
self.power = Power('ssh')
def test_mac(self):
self.node.mac = self.mac
eq_(self.node.mac, self.mac)
def test_profile(self):
self.node.profile = self.profile
eq_(self.node.profile, self.profile)
def test_kopts(self):
self.node.kopts = self.kopts
eq_(self.node.kopts, self.kopts)
def test_pxe(self):
self.node.pxe = self.pxe
eq_(self.node.pxe, self.pxe)
def test_power(self):
self.node.power = self.power
eq_(self.node.power, self.power)
class TestPower:
def setUp(self):
self.power = Power('ssh')
self.power_user = "user"
self.power_pass = "pass"
self.power_address = "localhost"
self.power_id = "localhost"
def test_power_user(self):
self.power.power_user = self.power_user
eq_(self.power.power_user, self.power_user)
def test_power_pass(self):
self.power.power_pass = self.power_pass
eq_(self.power.power_pass, self.power_pass)
def test_power_address(self):
self.power.power_address = self.power_address
eq_(self.power.power_address, self.power_address)
def test_power_id(self):
self.power.power_id = self.power_id
eq_(self.power.power_id, self.power_id)

View File

@ -1,174 +0,0 @@
import os
from nailgun.extrasettings import *
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
PROJECT_ROOT = os.path.dirname(SITE_ROOT)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'nailgun.sqlite'),
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
STATIC_DOC_ROOT = os.path.abspath(os.path.join(SITE_ROOT, 'static'))
# Additional locations of static files
STATICFILES_DIRS = (
STATIC_DOC_ROOT,
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'tqn)wkzzoisx7kl4l&amp;4wjr!w0o7nr_eg0+oho0$x4dp5y$gr71'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
if DEBUG:
MIDDLEWARE_CLASSES += ('nailgun.middleware.ExceptionLoggingMiddleware',)
ROOT_URLCONF = 'nailgun.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'nailgun.wsgi.application'
TEMPLATE_DIRS = (
os.path.abspath(os.path.join(SITE_ROOT, 'templates')),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'djcelery',
'nailgun',
'nailgun.api',
'nailgun.webui',
'django_nose',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
TEST_RUNNER = 'nailgun.testrunner.MyRunner'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s',
},
},
'handlers': {
'file': {
'class': 'logging.FileHandler',
'filename': LOGFILE,
'formatter': 'simple',
},
},
'root': {
'level': LOGLEVEL,
'handlers': ['file'],
},
}
# Celery settings
import djcelery
djcelery.setup_loader()
BROKER_URL = "redis://localhost:6379/0"
CELERY_RESULT_BACKEND = "redis"
CELERY_IMPORTS = ("nailgun.tasks",)
CELERY_DISABLE_RATE_LIMITS = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = False
CHEF_NODES_DATABAG_NAME = "nodes"
PISTON_IGNORE_DUPE_MODELS = True
NETWORK_POOLS = {
'public': ['172.18.0.0/16'],
'private': ['10.1.0.0/16']
}

View File

@ -1,142 +0,0 @@
import logging
from functools import wraps
from celery.task import task, chord, TaskSet
from nailgun.models import Cluster, Node
logger = logging.getLogger(__name__)
def topol_sort(graph):
""" Depth First Traversal algorithm for sorting DAG graph.
Example graph: 1 depends on 4; 3 depends on 2 and 6; etc.
Example code:
.. code-block:: python
>>> graph = {1: [4], 2: [], 3: [2,6], 4:[2,3], 5: [], 6: [2]}
>>> topol_sort(graph)
[2, 6, 3, 4, 1, 5]
Exception is raised if there is a cycle:
.. code-block:: python
>>> graph = {1: [4], 2: [], 3: [2,6], 4:[2,3,1], 5: [], 6: [2]}
>>> topol_sort(graph)
...
Exception: Graph contains cycles, processed 4 depends on 1
"""
def dfs(v):
color[v] = "gray"
for w in graph[v]:
if color[w] == "black":
continue
elif color[w] == "gray":
raise Exception(
"Graph contains cycles, processed %s depends on %s" % \
(v, w))
dfs(w)
color[v] = "black"
_sorted.append(v)
_sorted = []
color = {}
for j in graph:
color[j] = "white"
for i in graph:
if color[i] == "white":
dfs(i)
return _sorted
# This code is inspired by
# https://github.com/NetAngels/celery-tasktree/blob/master/celery_tasktree.py
def task_with_callbacks(func=None, **options):
""" decorator "task with callbacks"
Callback or list of callbacks which go to function in "callbacks" kwarg,
will be executed after the function, regardless of the subtask's return
status.
If subtask (function) result is an object, then a property named
"async_result" will be added to that object so that it will be possible to
join() for that result.
"""
def _decorate(func):
return task(run_with_callbacks(func), **options)
if func:
return _decorate(func)
else:
return _decorate
def run_with_callbacks(func):
"""Decorator "run with callbacks"
Function is useful as decorator for :meth:`run` method of tasks which are
subclasses of generic :class:`celery.task.Task` and are expected to be used
with callbacks.
"""
@wraps(func)
def wrapper(*args, **kwargs):
callback = kwargs.pop('callback', None)
retval = func(*args, **kwargs)
if callback:
retval = callback.apply_async()
return retval
return wrapper
class TaskPool(object):
def __init__(self):
self.pool = []
def push_task(self, func, args=None, kwargs={}):
task = {'func': func, 'args': args, 'kwargs': kwargs}
# TODO(mihgen): check that list of func has correct args
self.pool.append(task)
@task_with_callbacks
def _chord_task(*args):
if len(args) == 3:
taskset, clbk = args[1], args[2]
else:
taskset, clbk = args[0], args[1]
logger.error("TaskPool._chord_task: args: %s" % str(args))
logger.error("TaskPool._chord_task: args length: %s" % len(args))
logger.error("TaskPool._chord_task: taskset: %s" % str(taskset))
logger.error("TaskPool._chord_task: clbk: %s" % str(clbk))
# We have to create separate subtask that contains chord expression
# because otherwise chord functions get applied synchronously
return chord([
tsk['func'].subtask(args=tsk['args'], kwargs=tsk['kwargs']) \
for tsk in taskset])(clbk)
def _get_head_task(self):
prev_task = None
for t in reversed(self.pool):
if isinstance(t['func'], list):
task = self._chord_task.subtask((t['func'], prev_task))
else:
kwargs = t['kwargs'] or {}
if prev_task:
kwargs['callback'] = prev_task
task = t['func'].subtask(args=t['args'], kwargs=kwargs)
prev_task = task
print "Returning head task: %s" % task
return task
def apply_async(self):
# We need only head task. When it's execution is done,
# run_with_callbacks will call it's subtask
async_result = self._get_head_task().apply_async()
return async_result

View File

@ -1,256 +0,0 @@
import os
import os.path
import copy
import string
import logging
from random import choice
import re
import time
import socket
import json
import paramiko
import tarfile
import shutil
from django.conf import settings
from nailgun.models import Cluster, Node, Role, Com
from nailgun.helpers import SshConnect, DeployManager
from nailgun.task_helpers import task_with_callbacks, TaskPool, topol_sort
from nailgun.exceptions import SSHError, EmptyListError, DeployError
from nailgun.provision import ProvisionConfig
from nailgun.provision import ProvisionFactory
from nailgun.provision.model.profile import Profile as ProvisionProfile
from nailgun.provision.model.node import Node as ProvisionNode
from nailgun.provision.model.power import Power as ProvisionPower
from celery import current_app
from celery.utils import LOG_LEVELS
from celery.log import Logging
current_app.conf.CELERYD_LOG_LEVEL = LOG_LEVELS[settings.CELERYLOGLEVEL]
celery_logging = Logging(current_app)
celery_logging.setup_logger(logfile=settings.CELERYLOGFILE)
logger = celery_logging.get_default_logger()
@task_with_callbacks
def update_cluster_status(*args):
# FIXME(mihgen):
# We have to do this ugly trick because chord precedes first argument
if isinstance(args[0], list):
args = args[1:]
cluster_id = args[0]
return cluster_id
def node_set_error_status(node_id):
node = Node.objects.get(id=node_id)
node.status = "error"
node.save()
@task_with_callbacks
def deploy_cluster(cluster_id):
deploy_manager = DeployManager(cluster_id)
release = Cluster.objects.get(id=cluster_id).release
logger.debug("deploy_cluster: Cluster release: %s" % release.id)
tree = TaskPool()
# first element in sorted_recipes is the first recipe we have to apply
installed = []
logger.debug("deploy_cluster: sorted_components: %s" % \
deploy_manager.sorted_components())
for component_name in deploy_manager.sorted_components():
logger.debug("deploy_cluster: Com: %s" % component_name)
component = Com.objects.get(
release=release,
name=component_name)
roles = component.roles.all()
nodes = Node.objects.filter(roles__in=roles, cluster__id=cluster_id)
taskset = []
for node in nodes:
logger.debug("deploy_cluster: task: node: %s com: %s" % \
(node.id, component.name))
bootstrap_args = [node.id, component.name]
taskset.append({'func': bootstrap_node, 'args': bootstrap_args,
'kwargs': {}})
# FIXME(mihgen): it there are no taskset items,
# we included recipes which are not applied on nodes.
# We have to include only recipes which are assigned to nodes
if taskset:
tree.push_task(taskset)
tree.push_task(update_cluster_status, (cluster_id,))
res = tree.apply_async()
return res
def tcp_ping(host, port, timeout=5):
try:
s = socket.create_connection((str(host), int(port)), timeout)
except socket.error:
return False
s.close()
return True
@task_with_callbacks
def bootstrap_node(node_id, component_name):
node = Node.objects.get(id=node_id)
if node.status not in ["ready", "discover", "offline"]:
raise DeployError(
"Invalid node status '%s' - deployment aborted." \
% node.status
)
if node.status == "ready":
logger.debug("Provisioning skipped - node %s \
is already installed" % node_id)
elif node.status in ["discover", "offline"]:
logger.debug("Trying to provision node %s..." % node_id)
_provision_node(node_id)
logger.debug("Turning node %s status into 'deploying'" % node_id)
node.status = "deploying"
node.save()
# FIXME
# node.ip had been got from bootstrap agent
# there is no guarantee that installed slave node has
# the same ip as bootstrap node had
# it is necessary to install and launch agent on slave node
logger.debug("Waiting for node %s listen to %s:%s ..." \
% (node_id, str(node.ip), "22"))
while True:
if tcp_ping(node.ip, 22):
break
time.sleep(5)
logger.debug("Trying to connect to node %s over ssh" % node_id)
try:
ssh = SshConnect(node.ip, 'root', settings.PATH_TO_SSH_KEY)
except (paramiko.AuthenticationException,
paramiko.PasswordRequiredException,
paramiko.SSHException):
logger.error("Error occured while ssh connecting to node %s" % node_id)
message = "Task %s failed:" \
"Can't connect to IP=%s" \
% (bootstrap_node.request.id, node.ip)
node_set_error_status(node.id)
raise SSHError(message)
except Exception, error:
message = "Task %s failed:" \
"Error during ssh/deploy IP=%s: %s" \
% (bootstrap_node.request.id, node.ip, str(error))
node_set_error_status(node.id)
raise SSHError(message)
else:
logger.debug("Trying to launch deploy script on node %s" % node_id)
# Returns True if succeeded
exit_status = ssh.run("/opt/nailgun/bin/deploy %s" % component_name)
ssh.close()
# ssh.run returns True, if command executed successfully
# FIXME(mihgen): rename it/refactor, it's unclear
if not exit_status:
logger.error("Error occured while deploying node %s" % node_id)
message = "Task %s failed: " \
"Deployment exited with non-zero exit code. IP=%s" \
% (bootstrap_node.request.id, node.ip)
node_set_error_status(node.id)
raise DeployError(message)
logger.debug("Turning node %s status into 'ready'" % node_id)
node.status = "ready"
node.save()
return exit_status
def _is_node_bootstrap(node):
ssh_user = 'root'
ssh_key = settings.PATH_TO_BOOTSTRAP_SSH_KEY
logger.debug(
"Checking if node %s is booted with bootstrap image" \
% node.id
)
try:
logger.debug(
"Trying to establish ssh connection using bootstrap key" \
"ip: %s key: %s user: %s" % \
(node.ip,
ssh_key,
ssh_user)
)
ssh = SshConnect(
node.ip,
ssh_user,
ssh_key
)
except (paramiko.AuthenticationException,
paramiko.PasswordRequiredException):
logger.debug("Auth error while ssh using bootstrap rsa key")
return False
except Exception:
logger.debug("Unknown error while ssh using bootstrap rsa key")
return False
else:
logger.debug("Ssh connection succeeded: key: %s" % \
ssh_key)
ssh.close()
return True
# Call to Cobbler to make node ready.
def _provision_node(node_id):
node = Node.objects.get(id=node_id)
pc = ProvisionConfig()
pc.cn = "nailgun.provision.driver.cobbler.Cobbler"
pc.url = settings.COBBLER_URL
pc.user = settings.COBBLER_USER
pc.password = settings.COBBLER_PASSWORD
pd = ProvisionFactory.getInstance(pc)
pf = ProvisionProfile(settings.COBBLER_PROFILE)
ndp = ProvisionPower("ssh")
ndp.power_user = "root"
if _is_node_bootstrap(node):
logger.info("Node %s seems booted with bootstrap image" % node_id)
ndp.power_pass = "rsa:%s" % settings.PATH_TO_BOOTSTRAP_SSH_KEY
else:
logger.info("Node %s seems booted with real system" % node_id)
ndp.power_pass = "rsa:%s" % settings.PATH_TO_SSH_KEY
ndp.power_address = node.ip
nd = ProvisionNode(node_id)
nd.driver = pd
nd.mac = node.mac
nd.profile = pf
nd.pxe = True
nd.kopts = ""
nd.power = ndp
logger.debug(
"Trying to save node %s into provision system: profile: %s " % \
(node_id, pf.name)
)
nd.save()
logger.debug(
"Trying to reboot node %s using %s in order to launch provisioning" % \
(node_id, ndp.power_type)
)
nd.power_reboot()

View File

@ -1,11 +0,0 @@
from django_nose import NoseTestSuiteRunner
from djcelery.contrib.test_runner import CeleryTestSuiteRunner
from django.conf import settings
class MyRunner(NoseTestSuiteRunner, CeleryTestSuiteRunner):
def setup_test_environment(self, **kwargs):
super(MyRunner, self).setup_test_environment(**kwargs)
# As we don't have it in production, it should not be used in tests
settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = False

View File

@ -1,472 +0,0 @@
import simplejson as json
import mock
import celery
from django import http
from django.test import TestCase
from django.db.models import Model
from django.core.urlresolvers import reverse, NoReverseMatch
from piston.emitters import Emitter
from nailgun import models
from nailgun.models import Cluster
from nailgun.models import Node
from nailgun.models import Role
from nailgun.models import Release
from nailgun.models import Com
from nailgun.models import Point
from nailgun.models import EndPoint
from nailgun.api import urls as api_urls
from nailgun import tasks
# monkey patch!
def _construct_monkey(func):
def wrapped(self=None, *args, **kwargs):
if isinstance(self.data, Model):
raise NotImplementedError("Don't return model from handler!")
return func(self, *args, **kwargs)
return wrapped
Emitter.construct = _construct_monkey(Emitter.construct)
class TestHandlers(TestCase):
fixtures = ['default_cluster']
def setUp(self):
self.request = http.HttpRequest()
self.new_meta = {'block_device': 'new-val',
'interfaces': 'd',
'cpu': 'u',
'memory': 'a'
}
self.clusters = models.Cluster.objects.all()
self.releases = models.Release.objects.all()
self.roles = models.Role.objects.all()
self.nodes = models.Node.objects.all()
self.points = models.Point.objects.all()
self.com = models.Com.objects.all()
self.node_url = reverse('node_handler',
kwargs={'node_id': self.nodes[0].id})
self.meta_json = json.dumps(self.new_meta)
def tearDown(self):
pass
def test_all_api_urls_500(self):
test_urls = {}
for pattern in api_urls.urlpatterns:
test_urls[pattern.name] = pattern.callback.handler.allowed_methods
url_ids = {
'cluster_handler': {'cluster_id': self.clusters[0].id},
'node_handler': {'node_id': 'A' * 12},
'task_handler': {'task_id': 'a' * 36},
'network_handler': {'network_id': 1},
'release_handler': {'release_id': self.releases[0].id},
'role_handler': {'role_id': self.roles[0].id},
'endpoint_handler': {'node_id': self.nodes[0].id,
'component_name': 'abc'},
'point_handler': {'point_id': self.points[0].id},
'com_handler': {'component_id': self.com[0].id},
'node_role_available': {
'node_id': 'A' * 12,
'role_id': self.roles[0].id
},
'deployment_type_collection_handler': {
'cluster_id': self.clusters[0].id
},
}
skip_urls = [
'task_handler'
]
for url, methods in test_urls.iteritems():
if url in skip_urls:
continue
kw = {}
if url in url_ids:
kw = url_ids[url]
if 'GET' in methods:
test_url = reverse(url, kwargs=kw)
resp = self.client.get(test_url)
self.assertNotEqual(str(resp.status_code)[0], '5')
def test_cluster_creation(self):
yet_another_cluster_name = 'Yet another cluster'
resp = self.client.post(
reverse('cluster_collection_handler'),
json.dumps({
'name': yet_another_cluster_name,
'release': 1,
'nodes': [self.nodes[0].id],
}),
"application/json"
)
self.assertEquals(resp.status_code, 200)
clusters_from_db = Cluster.objects.filter(
name=yet_another_cluster_name
)
self.assertEquals(len(clusters_from_db), 1)
cluster = clusters_from_db[0]
self.assertEquals(cluster.nodes.all()[0].id, self.nodes[0].id)
self.assertEquals(len(cluster.release.networks.all()), 3)
# test delete
resp = self.client.delete(
reverse('cluster_handler', kwargs={'cluster_id': cluster.id}),
"",
"application/json"
)
self.assertEquals(resp.status_code, 204)
def test_cluster_update(self):
updated_name = 'Updated cluster'
clusters_before = len(Cluster.objects.all())
resp = self.client.put(
reverse('cluster_handler',
kwargs={'cluster_id': self.clusters[0].id}),
json.dumps({'name': updated_name}),
"application/json"
)
self.assertEquals(resp.status_code, 200)
clusters_from_db = Cluster.objects.filter(name=updated_name)
self.assertEquals(len(clusters_from_db), 1)
self.assertEquals(clusters_from_db[0].name, updated_name)
clusters_after = len(Cluster.objects.all())
self.assertEquals(clusters_before, clusters_after)
def test_cluster_node_list_update(self):
resp = self.client.put(
reverse('cluster_handler', kwargs={'cluster_id': 1}),
json.dumps({'nodes': [self.nodes[0].id]}),
"application/json"
)
self.assertEquals(resp.status_code, 200)
nodes_from_db = Node.objects.filter(cluster_id=1)
self.assertEquals(len(nodes_from_db), 1)
self.assertEquals(nodes_from_db[0].id, self.nodes[0].id)
resp = self.client.put(
reverse('cluster_handler', kwargs={'cluster_id': 1}),
json.dumps({'nodes': [self.nodes[1].id]}),
"application/json"
)
self.assertEquals(resp.status_code, 200)
nodes_from_db = Node.objects.filter(cluster_id=1)
self.assertEquals(len(nodes_from_db), 1)
self.assertEquals(nodes_from_db[0].id, self.nodes[1].id)
def test_node_creation(self):
node_id = '080000000003'
resp = self.client.post(
reverse('node_collection_handler'),
json.dumps({'id': node_id}),
"application/json")
self.assertEquals(resp.status_code, 200)
nodes_from_db = Node.objects.filter(id=node_id)
self.assertEquals(len(nodes_from_db), 1)
# test delete
resp = self.client.delete(
reverse('node_handler', kwargs={'node_id': node_id}),
"",
"application/json"
)
self.assertEquals(resp.status_code, 204)
def test_node_creation_using_put(self):
node_id = '080000000002'
resp = self.client.put(
reverse('node_handler', kwargs={'node_id': node_id}),
json.dumps({}),
"application/json")
self.assertEquals(resp.status_code, 200)
nodes_from_db = Node.objects.filter(id=node_id)
self.assertEquals(len(nodes_from_db), 1)
def test_node_valid_metadata_gets_updated(self):
resp = self.client.put(self.node_url,
json.dumps({'metadata': self.new_meta}),
"application/json")
self.assertEquals(resp.status_code, 200)
nodes_from_db = Node.objects.filter(id=self.nodes[0].id)
self.assertEquals(len(nodes_from_db), 1)
self.assertEquals(nodes_from_db[0].metadata, self.new_meta)
def test_node_valid_status_gets_updated(self):
params = {'status': 'error'}
resp = self.client.put(self.node_url, json.dumps(params),
"application/json")
self.assertEquals(resp.status_code, 200)
def test_node_valid_list_of_new_roles_gets_updated(self):
resp = self.client.put(self.node_url,
json.dumps({
'new_roles': [self.roles[1].id],
'redeployment_needed': True
}), "application/json"
)
self.assertEquals(resp.status_code, 200)
node_from_db = Node.objects.get(id=self.nodes[0].id)
self.assertEquals(node_from_db.redeployment_needed, True)
self.assertEquals(len(node_from_db.roles.all()), 1)
self.assertEquals(len(node_from_db.new_roles.all()), 1)
self.assertEquals(node_from_db.new_roles.all()[0].id,
self.roles[1].id)
def test_put_returns_400_if_no_body(self):
resp = self.client.put(self.node_url, None, "application/json")
self.assertEquals(resp.status_code, 400)
def test_put_returns_400_if_wrong_content_type(self):
params = {'metadata': self.meta_json}
resp = self.client.put(self.node_url, json.dumps(params), "plain/text")
self.assertEquals(resp.status_code, 400)
def test_put_returns_400_if_wrong_status(self):
params = {'status': 'invalid_status'}
resp = self.client.put(self.node_url, json.dumps(params),
"application/json")
self.assertEquals(resp.status_code, 400)
def test_put_returns_400_if_no_block_device_attr(self):
old_meta = self.nodes[0].metadata
new_meta = self.new_meta.copy()
del new_meta['block_device']
resp = self.client.put(self.node_url,
json.dumps({'metadata': new_meta}),
"application/json")
self.assertEquals(resp.status_code, 400)
node_from_db = Node.objects.get(id=self.nodes[0].id)
self.assertEquals(node_from_db.metadata, old_meta)
def test_put_returns_400_if_no_interfaces_attr(self):
old_meta = self.nodes[0].metadata
new_meta = self.new_meta.copy()
del new_meta['interfaces']
resp = self.client.put(self.node_url,
json.dumps({'metadata': new_meta}),
"application/json")
self.assertEquals(resp.status_code, 400)
node_from_db = Node.objects.get(id=self.nodes[0].id)
self.assertEquals(node_from_db.metadata, old_meta)
def test_put_returns_400_if_interfaces_empty(self):
old_meta = self.nodes[0].metadata
new_meta = self.new_meta.copy()
new_meta['interfaces'] = ""
resp = self.client.put(self.node_url,
json.dumps({'metadata': new_meta}),
"application/json")
self.assertEquals(resp.status_code, 400)
node_from_db = Node.objects.get(id=self.nodes[0].id)
self.assertEquals(node_from_db.metadata, old_meta)
def test_put_returns_400_if_no_cpu_attr(self):
old_meta = self.nodes[0].metadata
new_meta = self.new_meta.copy()
del new_meta['cpu']
resp = self.client.put(self.node_url,
json.dumps({'metadata': new_meta}),
"application/json")
self.assertEquals(resp.status_code, 400)
node_from_db = Node.objects.get(id=self.nodes[0].id)
self.assertEquals(node_from_db.metadata, old_meta)
def test_put_returns_400_if_no_memory_attr(self):
old_meta = self.nodes[0].metadata
new_meta = self.new_meta.copy()
del new_meta['memory']
resp = self.client.put(self.node_url,
json.dumps({'metadata': new_meta}),
"application/json")
self.assertEquals(resp.status_code, 400)
node_from_db = Node.objects.get(id=self.nodes[0].id)
self.assertEquals(node_from_db.metadata, old_meta)
# (mihgen): Disabled - we don't have attributes anymore
#def test_attribute_create(self):
#resp = self.client.put(
#reverse('attribute_collection_handler'),
#json.dumps({
#'attribute': {'a': 'av'},
#'cookbook': 'cook_name',
#'version': '0.1',
#}), "application/json"
#)
#self.assertEquals(resp.status_code, 200)
#self.assertEquals(resp.content, '1')
#def test_attribute_update(self):
#resp = self.client.put(
#reverse('attribute_collection_handler'),
#json.dumps({
#'attribute': {'a': 'b'},
#'cookbook': 'cook',
#'version': '0.1',
#}), "application/json"
#)
#self.assertEquals(resp.status_code, 200)
#self.assertEquals(resp.content, '1')
#resp = self.client.put(
#reverse('attribute_collection_handler'),
#json.dumps({
#'attribute': {'a': 'new'},
#'cookbook': 'cook',
#'version': '0.1',
#}), "application/json"
#)
#self.assertEquals(resp.status_code, 200)
#self.assertEquals(resp.content, '1')
#attrs = Attribute.objects.all()
#self.assertEquals(len(attrs), 1)
#self.assertEquals(attrs[0].attribute, {'a': 'new'})
def test_role_create(self):
role_name = 'My role 3'
role_release = self.releases[0].id
role_components = [c.name for c in self.com]
resp = self.client.post(
reverse('role_collection_handler'),
json.dumps({
'name': role_name,
'release': role_release,
'components': role_components
}),
"application/json"
)
self.assertEquals(resp.status_code, 200)
roles_from_db = Role.objects.filter(name=role_name)
self.assertEquals(len(roles_from_db), 1)
components = [c.name for c in roles_from_db[0].components.all()]
self.assertEquals(set(role_components), set(components))
@mock.patch('nailgun.tasks.deploy_cluster', celery.task.task(lambda: True))
def test_jsons_created_for_chef_solo(self):
url = reverse('cluster_changes_handler', kwargs={'cluster_id': 1})
resp = self.client.put(url)
self.assertEquals(resp.status_code, 202)
resp_json = json.loads(resp.content)
self.assertEquals(len(resp_json['task_id']), 36)
self.assertFalse(resp_json.get('error'))
def test_release_create(self):
release_name = "OpenStack"
release_version = "1.0.0"
release_description = "This is test release"
release_roles = [{
"name": "compute",
"recipes": [
"nova::compute@0.1.0",
"nova::monitor@0.1.0"
]
}, {
"name": "controller",
"recipes": [
"cookbook::recipe@2.1"
]
}
]
resp = self.client.post(
reverse('release_collection_handler'),
json.dumps({
'name': release_name,
'version': release_version,
'description': release_description,
'roles': release_roles,
'networks_metadata': [
{"name": "floating", "access": "public"},
{"name": "fixed", "access": "private"},
{"name": "storage", "access": "private"}
]
}),
"application/json"
)
self.assertEquals(resp.status_code, 200)
# test duplicate release
resp = self.client.post(
reverse('release_collection_handler'),
json.dumps({
'name': release_name,
'version': release_version,
'description': release_description,
'roles': release_roles,
'networks_metadata': [
{"name": "fixed", "access": "private"}
]
}),
"application/json"
)
self.assertEquals(resp.status_code, 409)
release_from_db = Release.objects.filter(
name=release_name,
version=release_version,
description=release_description
)
self.assertEquals(len(release_from_db), 1)
roles = []
for rl in release_from_db[0].roles.all():
roles.append({
'name': rl.name,
'recipes': [i.recipe for i in rl.recipes.all()]
})
for a, b in zip(sorted(roles), sorted(release_roles)):
self.assertEquals(a, b)
def test_network_create(self):
network_data = {
"name": "test_network",
"network": "10.0.0.0/24",
"range_l": "10.0.0.5",
"range_h": "10.0.0.10",
"gateway": "10.0.0.1",
"vlan_id": 100,
"release": 1,
"access": "public"
}
resp = self.client.post(
reverse('network_collection_handler'),
json.dumps(network_data),
"application/json"
)
self.assertEquals(resp.status_code, 200)
resp = self.client.post(
reverse('network_collection_handler'),
json.dumps(network_data),
"application/json"
)
self.assertEquals(resp.status_code, 409)
network_data["network"] = "test_fail"
resp = self.client.post(
reverse('network_collection_handler'),
json.dumps(network_data),
"application/json"
)
self.assertEqual(resp.status_code, 400)

View File

@ -1,24 +0,0 @@
from django.test import TestCase
from nailgun.models import Node, Role
class TestNodeModel(TestCase):
def test_creating_new_node_and_save_to_db(self):
node = Node()
node.id = "080000000001"
node.cluster_id = 1
node.name = "0-test_server.name.com"
node.metadata = {'metakey': 'metavalue'}
node.save()
all_nodes = Node.objects.all()
self.assertEquals(len(all_nodes), 1)
self.assertEquals(all_nodes[0], node)
self.assertEquals(all_nodes[0].name, "0-test_server.name.com")
self.assertEquals(all_nodes[0].cluster_id, 1)
self.assertEquals(all_nodes[0].metadata,
{'metakey': 'metavalue'})

View File

@ -1,9 +0,0 @@
from django.test import TestCase
class TestSampleEnvironmentFixtureLoad(TestCase):
fixtures = ['sample_environment']
def test(self):
pass

View File

@ -1,261 +0,0 @@
import os
import json
import mock
from mock import call
from django.test import TestCase
from django.db.models import Model
from django.conf import settings
from celery.task import task
from nailgun import tasks
from nailgun import models
from nailgun import exceptions
from nailgun import task_helpers
class TestTasks(TestCase):
fixtures = ['default_cluster']
def setUp(self):
self.cluster = models.Cluster.objects.get(pk=1)
self.nodes = models.Node.objects.all()
self.node = self.nodes[0]
self.components = models.Com.objects.all()
self.component = self.components[0]
self.roles = models.Role.objects.all()
def tearDown(self):
pass
@mock.patch('nailgun.tasks.tcp_ping')
@mock.patch('nailgun.tasks.SshConnect')
@mock.patch('nailgun.tasks._provision_node')
def test_bootstrap_node(self, pn_mock, ssh_mock, tp_mock):
ssh = ssh_mock.return_value
ssh.run.return_value = True
pn_mock.return_value = True
tp_mock.return_value = True
self.assertEquals(self.node.status, "ready")
res = tasks.bootstrap_node.delay(self.node.id, self.component.name)
self.assertEquals(res.state, "SUCCESS")
node = models.Node.objects.get(id=self.node.id)
self.assertEquals(node.status, "ready")
@mock.patch('nailgun.tasks.tcp_ping')
@mock.patch('nailgun.tasks.SshConnect')
def test_bootstrap_calls_provision_and_ssh(self, ssh_mock, tp_mock):
ssh = ssh_mock.return_value
ssh.run = mock.MagicMock(return_value=True)
tp_mock.return_value = True
self.node.status = "discover"
self.node.save()
tasks._provision_node = mock.MagicMock(return_value=None)
tasks.bootstrap_node(self.node.id, self.component.name)
self.assertEquals(tasks._provision_node.call_args_list,
[call(self.node.id)])
self.assertEquals(ssh.run.call_args_list,
[call('/opt/nailgun/bin/deploy %s' % self.component.name)])
@mock.patch('nailgun.tasks.tcp_ping')
@mock.patch('nailgun.tasks.SshConnect')
def test_bootstrap_does_not_call_provision(self, ssh_mock, tp_mock):
ssh = ssh_mock.return_value
ssh.run.return_value = True
tp_mock.return_value = True
tasks._provision_node = mock.MagicMock(return_value=None)
tasks.bootstrap_node(self.node.id, self.component.name)
self.assertEquals(tasks._provision_node.call_args_list, [])
@mock.patch('nailgun.tasks.tcp_ping')
@mock.patch('nailgun.tasks.SshConnect')
@mock.patch('nailgun.tasks._provision_node')
def test_bootstrap_raises_deploy_error(self, pn_mock, ssh_mock, tp_mock):
ssh = ssh_mock.return_value
ssh.run.return_value = False
pn_mock.return_value = True
tp_mock.return_value = True
with self.assertRaises(exceptions.DeployError):
tasks.bootstrap_node(self.node.id, self.component.name)
@mock.patch('nailgun.tasks.tcp_ping')
@mock.patch('nailgun.tasks.SshConnect')
@mock.patch('nailgun.tasks._provision_node')
def test_bootstrap_puts_error_in_task(self, pn_mock, ssh_mock, tp_mock):
ssh = ssh_mock.return_value
ssh.run.return_value = False
pn_mock.return_value = True
tp_mock.return_value = True
self.assertEquals(self.node.status, "ready")
res = tasks.bootstrap_node.delay(self.node.id, self.component.name)
self.assertEquals(res.state, "FAILURE")
self.assertIsInstance(res.result, exceptions.DeployError)
self.assertTrue(res.ready)
node = models.Node.objects.get(id=self.node.id)
self.assertEquals(node.status, "error")
@mock.patch('nailgun.tasks.TaskPool')
def test_one_recipe_deploy_cluster(self, tp):
tasks.deploy_cluster(self.cluster.id)
expected = [call()]
for node in self.cluster.nodes.all():
for role in node.roles.all():
for component in role.components.all():
expected.append(call().push_task([{
'args': [node.id, component.name],
'func': tasks.bootstrap_node,
'kwargs': {}
}]))
expected.append(call().push_task(tasks.update_cluster_status,
(self.cluster.id,)))
expected.append(call().apply_async())
self.assertEquals(tasks.TaskPool.mock_calls, expected)
# FIXME(vkramskikh): recipe test, rework using components and points
# @mock.patch('nailgun.tasks.TaskPool')
# def test_deploy_cluster_with_recipe_deps(self, tp):
# # 0: 1,2; 1: 2; 2: ; 3: 2
# # Rigth order: 2,1,0,3
# rcps = [models.Recipe() for x in range(4)]
# for i, rec in enumerate(rcps):
# rec.recipe = 'cookbook::recipe%s@0.1' % i
# rec.save()
#
# rcps[0].depends = [rcps[1], rcps[2]]
# rcps[1].depends = [rcps[2]]
# rcps[2].depends = []
# rcps[3].depends = [rcps[2]]
# map(lambda r: r.save(), rcps)
#
# roles = [models.Role() for x in range(3)]
# for i, role in enumerate(roles):
# role.name = "Role%s" % i
# role.save()
#
# roles[0].recipes = [rcps[0], rcps[2]]
# roles[1].recipes = [rcps[3]]
# roles[2].recipes = [rcps[1]]
# map(lambda r: r.save(), roles)
#
# nodes = [models.Node() for x in range(2)]
# for i, node in enumerate(nodes):
# node.name = "Node-%s" % i
# node.id = "FF000000000%s" % i
# node.ip = "127.0.0.%s" % i
# node.cluster_id = 1
# node.save()
# nodes[0].roles = [roles[0]]
# nodes[1].roles = [roles[1], roles[2]]
#
# tasks.deploy_cluster('1')
# expected = [
# # init
# call(),
# # first recipe, no deps, defined in setUp
# call().push_task(tasks.create_solo, ('1', self.recipe.id)),
# call().push_task([{'args': [self.node.id, self.component.name],
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
# # Applying in order 2-> 1-> 0-> 3
# call().push_task(tasks.create_solo, ('1', rcps[2].id)),
# call().push_task([{'args': [nodes[0].id, self.component.name],
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
# call().push_task(tasks.create_solo, ('1', rcps[1].id)),
# call().push_task([{'args': [nodes[1].id, self.component.name],
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
# call().push_task(tasks.create_solo, ('1', rcps[0].id)),
# call().push_task([{'args': [nodes[0].id, self.component.name],
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
# call().push_task(tasks.create_solo, ('1', rcps[3].id)),
# call().push_task([{'args': [nodes[1].id, self.component.name],
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
# # Last task for chord to succeed
# call().push_task(tasks.update_cluster_status, ('1',)),
# call().apply_async()
# ]
# self.assertEquals(tasks.TaskPool.mock_calls, expected)
# FIXME(vkramskikh): recipe test, rework using components and points
# def test_deploy_cluster_error_when_recipe_not_in_cluster(self):
# rcps = [models.Recipe() for x in range(4)]
# for i, rec in enumerate(rcps):
# rec.recipe = 'cookbook::recipe%s@0.1' % i
# rec.save()
# rcps[0].depends = [rcps[1], rcps[2]]
# rcps[1].depends = [rcps[2]]
# rcps[2].depends = [rcps[3]]
# rcps[3].depends = []
# map(lambda r: r.save(), rcps)
#
# roles = [models.Role() for x in range(3)]
# for i, role in enumerate(roles):
# role.name = "Role%s" % i
# role.save()
#
# roles[0].recipes = [rcps[0], rcps[3]]
# roles[1].recipes = [rcps[2]]
# map(lambda r: r.save(), roles)
# self.node.roles = roles
# self.node.save()
#
# graph = {}
# for recipe in models.Recipe.objects.filter(
# recipe__in=DeployGenerator.recipes(1)):
# graph[recipe.recipe] = [r.recipe for r in recipe.depends.all()]
#
# self.assertRaises(exceptions.DeployError, tasks.deploy_cluster, '1')
@mock.patch('nailgun.tasks.TaskPool')
def test_deploy_cluster_takes_right_cluster(self, tp):
node = models.Node()
node.id = "010000000007"
node.ip = "127.0.0.1"
# It will be node from other cluster
node.cluster_id = 2
node.save()
node.roles = [self.roles[0]]
node.save()
tasks.deploy_cluster(self.cluster.id)
expected = [call()]
for node in self.cluster.nodes.all():
for role in node.roles.all():
for component in role.components.all():
expected.append(call().push_task([{
'args': [node.id, component.name],
'func': tasks.bootstrap_node,
'kwargs': {}
}]))
expected.append(call().push_task(tasks.update_cluster_status,
(self.cluster.id,)))
expected.append(call().apply_async())
self.assertEquals(tasks.TaskPool.mock_calls, expected)
# FIXME(vkramskikh): recipe test, rework using components
# def test_deploy_cluster_nodes_with_same_recipes_generates_group(self, tp):
# # Adding second node with same recipes/roles
# node = models.Node()
# node.id = "FFF000000007"
# node.ip = "127.0.0.1"
# node.cluster_id = 1
# node.save()
# node.roles = [self.role]
# node.save()
#
# tasks.deploy_cluster('1')
# expected = [
# call(),
# call().push_task(tasks.create_solo, ('1', self.recipe.id)),
# call().push_task([{'args': [self.node.id, self.component.name],
# 'func': tasks.bootstrap_node, 'kwargs': {}},
# {'args': [node.id, self.component.name],
# 'func': tasks.bootstrap_node, 'kwargs': {}}]),
# call().push_task(tasks.update_cluster_status, ('1',)),
# call().apply_async()
# ]
#

View File

@ -1,10 +0,0 @@
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r'^api/', include('nailgun.api.urls')),
(r'^', include('nailgun.webui.urls')),
)

View File

@ -1 +0,0 @@
VENV = None

View File

@ -1,14 +0,0 @@
from django.conf.urls import patterns, include, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'django.views.static.serve',
{
'document_root': settings.STATIC_DOC_ROOT,
'path': 'index.html'
}, name='index'),
)

View File

@ -1,46 +0,0 @@
"""
WSGI config for ngui project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
import site
import nailgun.venv
if nailgun.venv.VENV:
prev_sys_path = list(sys.path)
site.addsitedir(nailgun.venv.VENV)
new_sys_path = []
for item in list(sys.path):
if item not in prev_sys_path:
new_sys_path.append(item)
sys.path.remove(item)
sys.path[:0] = new_sys_path
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nailgun.settings")
import monitor
monitor.start(interval=1.0)
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)

View File

@ -1,80 +0,0 @@
#!/bin/bash
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run tests"
echo ""
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -x, --xunit Generate reports (useful in Jenkins environment)"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --clean Only clean *.log, *.json, *.pyc, *.pid files, doesn't run tests"
echo " -h, --help Print this usage message"
echo ""
echo "By default it runs tests and pep8 check."
exit
}
function process_option {
case "$1" in
-h|--help) usage;;
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-x|--xunit) xunit=1;;
-c|--clean) clean=1;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
}
just_pep8=0
no_pep8=0
xunit=0
clean=0
noseargs=
noseopts=
for arg in "$@"; do
process_option $arg
done
function clean {
echo "cleaning *.pyc, *.json, *.log, *.pid files"
find . -type f -name "*.pyc" -delete
rm -f *.json
rm -f *.log
rm -f *.pid
}
if [ $clean -eq 1 ]; then
clean
exit 0
fi
# If enabled, tell nose to create xunit report
if [ $xunit -eq 1 ]; then
noseopts="--with-xunit"
fi
function run_pep8 {
pep8 --show-source --show-pep8 --count . || return 1
echo "PEP8 check passed successfully."
}
if [ $just_pep8 -eq 1 ]; then
run_pep8 || exit 1
exit
fi
function run_tests {
clean
[ -z "$noseargs" ] && test_args=nailgun || test_args="$noseargs"
python manage.py test $noseopts $test_args
}
run_tests || exit 1
if [ -z "$noseargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi

9
nailgun/settings.py Normal file
View File

@ -0,0 +1,9 @@
# -*- coding: utf-8 -*-
DATABASE_PATH = 'nailgun.sqlite'
DATABASE_ENGINE = 'sqlite:///%s' % DATABASE_PATH
NETWORK_POOLS = {
'public': ['172.18.0.0/16'],
'private': ['10.1.0.0/16']
}

View File

Before

Width:  |  Height:  |  Size: 127 KiB

After

Width:  |  Height:  |  Size: 127 KiB

View File

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View File

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View File

Before

Width:  |  Height:  |  Size: 8.6 KiB

After

Width:  |  Height:  |  Size: 8.6 KiB

View File

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View File

Before

Width:  |  Height:  |  Size: 2.8 KiB

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

Before

Width:  |  Height:  |  Size: 2.8 KiB

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

Before

Width:  |  Height:  |  Size: 3.6 KiB

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

Before

Width:  |  Height:  |  Size: 2.8 KiB

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

Before

Width:  |  Height:  |  Size: 4.0 KiB

After

Width:  |  Height:  |  Size: 4.0 KiB

View File

Before

Width:  |  Height:  |  Size: 1.7 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

Before

Width:  |  Height:  |  Size: 5.2 KiB

After

Width:  |  Height:  |  Size: 5.2 KiB

View File

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

Before

Width:  |  Height:  |  Size: 1.8 KiB

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

Before

Width:  |  Height:  |  Size: 10 KiB

After

Width:  |  Height:  |  Size: 10 KiB

Some files were not shown because too many files have changed in this diff Show More