Merge pull request #1518 from Mirantis/custom-attributes-for-provisioning

Extract provisioning serialization to separate class
This commit is contained in:
Dmitry Pyzhov 2013-09-18 01:56:32 -07:00
commit f13add2b68
15 changed files with 437 additions and 286 deletions

View File

@ -37,7 +37,6 @@ from nailgun.db import db
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun import orchestrator
from nailgun.task.manager import ClusterDeletionManager
from nailgun.task.manager import DeploymentTaskManager
@ -371,69 +370,3 @@ class ClusterAttributesDefaultsHandler(JSONHandler):
' editable attributes for cluster_id %s were reset'
' to default' % cluster_id)
return {"editable": cluster.attributes.editable}
class ClusterDefaultOrchestratorData(JSONHandler):
"""Cluster default data which will be passed
to orchestrator
"""
@content_json
def GET(self, cluster_id):
""":returns: JSONized default data which will be passed to orchestrator
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return orchestrator.serializers.serialize(cluster)
class ClusterOrchestratorData(JSONHandler):
"""Cluster data which will be passed
to orchestrator
"""
@content_json
def GET(self, cluster_id):
""":returns: JSONized data which will be passed to orchestrator
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return cluster.facts
@content_json
def PUT(self, cluster_id):
""":returns: JSONized data which will be passed to orchestrator
:http: * 200 (OK)
* 400 (wrong data specified)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(
Cluster,
cluster_id,
log_404=(
"warning",
"Error: there is no cluster "
"with id '{0}' in DB.".format(cluster_id)
)
)
data = self.checked_data()
cluster.facts = data
db().commit()
logger.debug('ClusterDefaultOrchestratorData:'
' facts for cluster_id {0} were uploaded'
.format(cluster_id))
return data
@content_json
def DELETE(self, cluster_id):
""":returns: {}
:http: * 202 (orchestrator data deletion process launched)
* 400 (failed to execute orchestrator data deletion process)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
cluster.facts = {}
db().commit()
raise web.accepted(data="{}")

View File

@ -0,0 +1,123 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import web
from nailgun.api.handlers.base import content_json
from nailgun.api.handlers.base import JSONHandler
from nailgun.api.models import Cluster
from nailgun.db import db
from nailgun.logger import logger
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator import provisioning_serializers
class DefaultOrchestratorInfo(JSONHandler):
"""Base class for default orchestrator data.
Need to redefine serializer variable
"""
@content_json
def GET(self, cluster_id):
""":returns: JSONized default data which will be passed to orchestrator
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return serializer.serialize(cluster) # nopep8
class OrchestratorInfo(JSONHandler):
"""Base class for replaced data."""
def get_orchestrator_info(self, cluster):
"""Method should return data
which will be passed to orchestrator
"""
raise NotImplementedError('Please Implement this method')
def update_orchestrator_info(self, cluster, data):
"""Method should override data which
will be passed to orchestrator
"""
raise NotImplementedError('Please Implement this method')
@content_json
def GET(self, cluster_id):
""":returns: JSONized data which will be passed to orchestrator
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
return self.get_orchestrator_info(cluster)
@content_json
def PUT(self, cluster_id):
""":returns: JSONized data which will be passed to orchestrator
:http: * 200 (OK)
* 400 (wrong data specified)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
data = self.checked_data()
self.update_orchestrator_info(cluster, data)
logger.debug('OrchestratorInfo:'
' facts for cluster_id {0} were uploaded'
.format(cluster_id))
return data
@content_json
def DELETE(self, cluster_id):
""":returns: {}
:http: * 202 (orchestrator data deletion process launched)
* 400 (failed to execute orchestrator data deletion process)
* 404 (cluster not found in db)
"""
cluster = self.get_object_or_404(Cluster, cluster_id)
self.update_orchestrator_info(cluster, {})
raise web.accepted(data="{}")
class DefaultProvisioningInfo(DefaultOrchestratorInfo):
serializer = provisioning_serializers
class DefaultDeploymentInfo(DefaultOrchestratorInfo):
serializer = deployment_serializers
class ProvisioningInfo(OrchestratorInfo):
def get_orchestrator_info(self, cluster):
return cluster.replaced_provisioning_info
def update_orchestrator_info(self, cluster, data):
cluster.replaced_provisioning_info = data
db().commit()
return cluster.replaced_provisioning_info
class DeploymentInfo(OrchestratorInfo):
def get_orchestrator_info(self, cluster):
return cluster.replaced_deployment_info
def update_orchestrator_info(self, cluster, data):
cluster.replaced_deployment_info = data
db().commit()
return cluster.replaced_deployment_info

View File

@ -123,7 +123,6 @@ class Cluster(Base):
STATUSES = ('new', 'deployment', 'operational', 'error', 'remove')
NET_MANAGERS = ('FlatDHCPManager', 'VlanManager')
GROUPING = ('roles', 'hardware', 'both')
facts = Column(JSON, default={})
id = Column(Integer, primary_key=True)
mode = Column(
Enum(*MODES, name='cluster_mode'),
@ -162,6 +161,8 @@ class Cluster(Base):
notifications = relationship("Notification", backref="cluster")
network_groups = relationship("NetworkGroup", backref="cluster",
cascade="delete")
replaced_deployment_info = Column(JSON, default={})
replaced_provisioning_info = Column(JSON, default={})
@property
def is_ha_mode(self):
@ -227,12 +228,21 @@ class Cluster(Base):
TaskHelper.update_slave_nodes_fqdn(nodes)
nodes_ids = sorted([n.id for n in nodes])
netmanager = NetworkManager()
if nodes_ids:
netmanager = NetworkManager()
netmanager.assign_ips(nodes_ids, 'management')
netmanager.assign_ips(nodes_ids, 'public')
netmanager.assign_ips(nodes_ids, 'storage')
def prepare_for_provisioning(self):
from nailgun.network.manager import NetworkManager
from nailgun.task.helpers import TaskHelper
netmanager = NetworkManager()
for node in TaskHelper.nodes_to_provision(self):
netmanager.assign_admin_ips(
node.id, len(node.meta.get('interfaces', [])))
class Node(Base):
__tablename__ = 'nodes'
@ -281,6 +291,10 @@ class Node(Base):
interfaces = relationship("NodeNICInterface", backref="node",
cascade="delete")
@property
def offline(self):
return not self.online
@property
def network_data(self):
# It is required for integration tests; to get info about nets

View File

@ -20,9 +20,11 @@ from nailgun.api.handlers.cluster import ClusterHandler
from nailgun.api.handlers.cluster import ClusterCollectionHandler
from nailgun.api.handlers.cluster import ClusterChangesHandler
from nailgun.api.handlers.cluster import ClusterAttributesHandler
from nailgun.api.handlers.cluster import ClusterOrchestratorData
from nailgun.api.handlers.cluster import ClusterAttributesDefaultsHandler
from nailgun.api.handlers.cluster import ClusterDefaultOrchestratorData
from nailgun.api.handlers.orchestrator import DefaultProvisioningInfo
from nailgun.api.handlers.orchestrator import DefaultDeploymentInfo
from nailgun.api.handlers.orchestrator import ProvisioningInfo
from nailgun.api.handlers.orchestrator import DeploymentInfo
from nailgun.api.handlers.network_configuration \
import NetworkConfigurationHandler
@ -86,10 +88,15 @@ urls = (
'NetworkConfigurationHandler',
r'/clusters/(?P<cluster_id>\d+)/network_configuration/verify/?$',
'NetworkConfigurationVerifyHandler',
r'/clusters/(?P<cluster_id>\d+)/orchestrator/?$',
'ClusterOrchestratorData',
r'/clusters/(?P<cluster_id>\d+)/orchestrator/defaults/?$',
'ClusterDefaultOrchestratorData',
r'/clusters/(?P<cluster_id>\d+)/orchestrator/deployment/?$',
'DeploymentInfo',
r'/clusters/(?P<cluster_id>\d+)/orchestrator/deployment/defaults/?$',
'DefaultDeploymentInfo',
r'/clusters/(?P<cluster_id>\d+)/orchestrator/provisioning/?$',
'ProvisioningInfo',
r'/clusters/(?P<cluster_id>\d+)/orchestrator/provisioning/defaults/?$',
'DefaultProvisioningInfo',
r'/nodes/?$',
'NodeCollectionHandler',

View File

@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Serializers for orchestrator"""
"""Deployment serializers for orchestrator"""
from nailgun.api.models import NetworkGroup
from nailgun.api.models import Node

View File

@ -0,0 +1,163 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provisioning serializers for orchestrator"""
import json
from nailgun.api.models import IPAddr
from nailgun.db import db
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun.settings import settings
from nailgun.task.helpers import TaskHelper
class ProvisioningSerializer(object):
"""Provisioning serializer"""
@classmethod
def serialize(cls, cluster):
"""Serialize cluster for provisioning."""
serialized_nodes = cls.serialize_nodes(cluster)
return {
'engine': {
'url': settings.COBBLER_URL,
'username': settings.COBBLER_USER,
'password': settings.COBBLER_PASSWORD},
'nodes': serialized_nodes}
@classmethod
def serialize_nodes(cls, cluster):
"""Serialize nodes."""
nodes_to_provision = TaskHelper.nodes_to_provision(cluster)
cluster_attrs = cluster.attributes.merged_attrs_values()
serialized_nodes = []
for node in nodes_to_provision:
serialized_node = cls.serialize_node(cluster_attrs, node)
serialized_nodes.append(serialized_node)
return serialized_nodes
@classmethod
def serialize_node(cls, cluster_attrs, node):
"""Serialize a single node."""
serialized_node = {
'power_address': node.ip,
'name': TaskHelper.make_slave_name(node.id),
'hostname': node.fqdn,
'power_pass': cls.get_ssh_key_path(node),
'profile': cluster_attrs['cobbler']['profile'],
'power_type': 'ssh',
'power_user': 'root',
'name_servers': '\"%s\"' % settings.DNS_SERVERS,
'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
'netboot_enabled': '1',
'ks_meta': {
'ks_spaces': "\"%s\"" % json.dumps(
node.attributes.volumes).replace("\"", "\\\""),
'puppet_auto_setup': 1,
'puppet_master': settings.PUPPET_MASTER_HOST,
'puppet_version': settings.PUPPET_VERSION,
'puppet_enable': 0,
'mco_auto_setup': 1,
'install_log_2_syslog': 1,
'mco_pskey': settings.MCO_PSKEY,
'mco_vhost': settings.MCO_VHOST,
'mco_host': settings.MCO_HOST,
'mco_user': settings.MCO_USER,
'mco_password': settings.MCO_PASSWORD,
'mco_connector': settings.MCO_CONNECTOR,
'mco_enable': 1,
'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', '')}}
serialized_node.update(cls.serialize_interfaces(node))
return serialized_node
@classmethod
def serialize_interfaces(cls, node):
interfaces = {}
interfaces_extra = {}
admin_ips = cls.get_admin_ips(node)
for interface in node.meta.get('interfaces', []):
name = interface['name']
interfaces[name] = {
'mac_address': interface['mac'],
'static': '0',
'netmask': settings.ADMIN_NETWORK['netmask'],
'ip_address': admin_ips.pop()}
# interfaces_extra field in cobbler ks_meta
# means some extra data for network interfaces
# configuration. It is used by cobbler snippet.
# For example, cobbler interface model does not
# have 'peerdns' field, but we need this field
# to be configured. So we use interfaces_extra
# branch in order to set this unsupported field.
interfaces_extra[name] = {
'peerdns': 'no',
'onboot': 'no'}
# We want node to be able to PXE boot via any of its
# interfaces. That is why we add all discovered
# interfaces into cobbler system. But we want
# assignted fqdn to be resolved into one IP address
# because we don't completely support multiinterface
# configuration yet.
if interface['mac'] == node.mac:
interfaces[name]['dns_name'] = node.fqdn
interfaces_extra[name]['onboot'] = 'yes'
return {
'interfaces': interfaces,
'interfaces_extra': interfaces_extra}
@classmethod
def get_admin_ips(cls, node):
netmanager = NetworkManager()
admin_net_id = netmanager.get_admin_network_id()
admin_ips = set([
i.ip_addr for i in db().query(IPAddr).
filter_by(node=node.id).
filter_by(network=admin_net_id)])
return admin_ips
@classmethod
def get_ssh_key_path(cls, node):
"""Assign power pass depend on node state."""
if node.status == "discover":
logger.info(
u'Node %s seems booted with bootstrap image', node.full_name)
return settings.PATH_TO_BOOTSTRAP_SSH_KEY
logger.info(u'Node %s seems booted with real system', node.full_name)
return settings.PATH_TO_SSH_KEY
def serialize(cluster):
"""Serialize cluster for provisioning."""
cluster.prepare_for_provisioning()
return ProvisioningSerializer.serialize(cluster)

View File

@ -161,7 +161,8 @@ class DeploymentTaskManager(TaskManager):
)
db().add(supertask)
db().commit()
if not self.cluster.facts:
if not self.cluster.replaced_provisioning_info \
and not self.cluster.replaced_deployment_info:
try:
self.check_before_deployment(supertask)
except errors.CheckBeforeDeploymentError:

View File

@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import json
import shlex
import subprocess
@ -22,7 +21,6 @@ import netaddr
from sqlalchemy.orm import ColumnProperty
from sqlalchemy.orm import object_mapper
from nailgun.api.models import IPAddr
from nailgun.api.models import NetworkGroup
from nailgun.api.models import Node
from nailgun.api.models import NodeNICInterface
@ -31,7 +29,8 @@ from nailgun.db import db
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.network.manager import NetworkManager
from nailgun.orchestrator.serializers import serialize
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator import provisioning_serializers
import nailgun.rpc as rpc
from nailgun.settings import settings
from nailgun.task.fake import FAKE_THREADS
@ -121,19 +120,16 @@ class DeploymentTask(object):
db().add(n)
db().commit()
message = {
# here we replace provisioning data if user redefined them
serialized_cluster = task.cluster.replaced_deployment_info or \
deployment_serializers.serialize(task.cluster)
return {
'method': 'deploy',
'respond_to': 'deploy_resp',
'args': {
'task_uuid': task.uuid,
# if task.cluster.facts not empty dict, it will be used
# instead of computing cluster facts through serialize
'deployment_info': task.cluster.facts
or serialize(task.cluster)
}
}
return message
'deployment_info': serialized_cluster}}
@classmethod
def execute(cls, task):
@ -146,148 +142,37 @@ class DeploymentTask(object):
class ProvisionTask(object):
@classmethod
def message(cls, task):
logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
# this variable is used to set 'auth_key' in cobbler ks_meta
cluster_attrs = task.cluster.attributes.merged_attrs_values()
nodes = TaskHelper.nodes_to_provision(task.cluster)
netmanager = NetworkManager()
USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
# TODO(NAME): For now we send nodes data to orchestrator
# which is cobbler oriented. But for future we
# need to use more abstract data structure.
nodes_data = []
for node in nodes:
if not node.online:
if not USE_FAKE:
raise errors.NodeOffline(
u"Node '%s' (id=%s) is offline."
" Remove it from environment and try again." %
(node.name, node.id)
)
else:
logger.warning(
u"Node '%s' (id=%s) is offline."
" Remove it from environment and try again." %
(node.name, node.id)
)
if USE_FAKE:
continue
cobbler_profile = cluster_attrs['cobbler']['profile']
if node.offline:
raise errors.NodeOffline(
u'Node "%s" is offline.'
' Remove it from environment and try again.' %
node.full_name)
node_data = {
'profile': cobbler_profile,
'power_type': 'ssh',
'power_user': 'root',
'power_address': node.ip,
'name': TaskHelper.make_slave_name(node.id),
'hostname': node.fqdn,
'name_servers': '\"%s\"' % settings.DNS_SERVERS,
'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
'netboot_enabled': '1',
'ks_meta': {
'puppet_auto_setup': 1,
'puppet_master': settings.PUPPET_MASTER_HOST,
'puppet_version': settings.PUPPET_VERSION,
'puppet_enable': 0,
'mco_auto_setup': 1,
'install_log_2_syslog': 1,
'mco_pskey': settings.MCO_PSKEY,
'mco_vhost': settings.MCO_VHOST,
'mco_host': settings.MCO_HOST,
'mco_user': settings.MCO_USER,
'mco_password': settings.MCO_PASSWORD,
'mco_connector': settings.MCO_CONNECTOR,
'mco_enable': 1,
'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
'ks_spaces': "\"%s\"" % json.dumps(
node.attributes.volumes).replace("\"", "\\\"")
}
}
TaskHelper.prepare_syslog_dir(node)
node.status = 'provisioning'
db().commit()
if node.status == "discover":
logger.info(
"Node %s seems booted with bootstrap image",
node.id
)
node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
else:
# If it's not in discover, we expect it to be booted
# in target system.
# TODO(NAME): Get rid of expectations!
logger.info(
"Node %s seems booted with real system",
node.id
)
node_data['power_pass'] = settings.PATH_TO_SSH_KEY
# FIXME: move this code (updating) into receiver.provision_resp
if not USE_FAKE:
node.status = "provisioning"
db().add(node)
db().commit()
# here we assign admin network IPs for node
# one IP for every node interface
netmanager.assign_admin_ips(
node.id,
len(node.meta.get('interfaces', []))
)
admin_net_id = netmanager.get_admin_network_id()
admin_ips = set([i.ip_addr for i in db().query(IPAddr).
filter_by(node=node.id).
filter_by(network=admin_net_id)])
for i in node.meta.get('interfaces', []):
if 'interfaces' not in node_data:
node_data['interfaces'] = {}
node_data['interfaces'][i['name']] = {
'mac_address': i['mac'],
'static': '0',
'netmask': settings.ADMIN_NETWORK['netmask'],
'ip_address': admin_ips.pop(),
}
# interfaces_extra field in cobbler ks_meta
# means some extra data for network interfaces
# configuration. It is used by cobbler snippet.
# For example, cobbler interface model does not
# have 'peerdns' field, but we need this field
# to be configured. So we use interfaces_extra
# branch in order to set this unsupported field.
if 'interfaces_extra' not in node_data:
node_data['interfaces_extra'] = {}
node_data['interfaces_extra'][i['name']] = {
'peerdns': 'no',
'onboot': 'no'
}
# We want node to be able to PXE boot via any of its
# interfaces. That is why we add all discovered
# interfaces into cobbler system. But we want
# assignted fqdn to be resolved into one IP address
# because we don't completely support multiinterface
# configuration yet.
if i['mac'] == node.mac:
node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'
nodes_data.append(node_data)
if not USE_FAKE:
TaskHelper.prepare_syslog_dir(node)
serialized_cluster = task.cluster.replaced_provisioning_info or \
provisioning_serializers.serialize(task.cluster)
message = {
'method': 'provision',
'respond_to': 'provision_resp',
'args': {
'task_uuid': task.uuid,
'engine': {
'url': settings.COBBLER_URL,
'username': settings.COBBLER_USER,
'password': settings.COBBLER_PASSWORD,
},
'nodes': nodes_data
}
}
'provisioning_info': serialized_cluster}}
return message
@classmethod

View File

@ -269,14 +269,12 @@ class TestHandlers(BaseIntegrationTest):
'respond_to': 'provision_resp',
'args': {
'task_uuid': provision_task_uuid,
'engine': {
'url': settings.COBBLER_URL,
'username': settings.COBBLER_USER,
'password': settings.COBBLER_PASSWORD,
},
'nodes': provision_nodes,
}
}
'provisioning_info': {
'engine': {
'url': settings.COBBLER_URL,
'username': settings.COBBLER_USER,
'password': settings.COBBLER_PASSWORD},
'nodes': provision_nodes}}}
args, kwargs = nailgun.task.manager.rpc.cast.call_args
self.assertEquals(len(args), 2)
@ -320,7 +318,7 @@ class TestHandlers(BaseIntegrationTest):
# provision method call [1][0][1][0]
n_rpc_provision = nailgun.task.manager.rpc.cast. \
call_args_list[1][0][1][0]['args']['nodes']
call_args_list[1][0][1][0]['args']['provisioning_info']['nodes']
# Nodes will be appended in provision list if
# they 'pending_deletion' = False and
# 'status' in ('discover', 'provisioning') or

View File

@ -188,56 +188,27 @@ class TestHandlers(BaseIntegrationTest):
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
def test_cluster_orchestrator_data(self, mocked_rpc):
def test_replaced_orchestrator_info_should_passed(self, mocked_rpc):
# creating cluster with nodes
self.env.create(
cluster_kwargs={
'mode': 'ha_compact'
},
'mode': 'multinode'},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller', 'cinder'], 'pending_addition': True},
{'roles': ['compute', 'cinder'], 'pending_addition': True},
{'roles': ['compute'], 'pending_addition': True},
{'roles': ['cinder'], 'pending_addition': True}])
{'roles': ['compute'], 'pending_addition': True}])
cluster = self.env.clusters[0]
orchestrator_data = {"field": "test"}
new_deployment_info = {"field": "deployment_info"}
new_provisioning_info = {"field": "provisioning_info"}
# assigning facts to cluster
cluster.facts = orchestrator_data
cluster.replaced_deployment_info = new_deployment_info
cluster.replaced_provisioning_info = new_provisioning_info
self.db.commit()
self.env.launch_deployment()
# intercepting arguments with which rpc.cast was called
args, kwargs = nailgun.task.manager.rpc.cast.call_args
self.datadiff(orchestrator_data, args[1][1]["args"]["deployment_info"])
def test_cluster_orchestrator_data_handler(self):
# creating cluster, cluster.facts default value is {}
cluster = self.env.create_cluster(api=False)
# updating facts
orchestrator_data = {"field": "test"}
orchestrator_data_json = json.dumps(orchestrator_data)
put_resp = self.app.put(
reverse('ClusterOrchestratorData',
kwargs={'cluster_id': cluster.id}),
orchestrator_data_json,
headers=self.default_headers
)
self.assertEquals(put_resp.status, 200)
self.assertEquals(cluster.facts, orchestrator_data)
# getting facts
get_resp = self.app.get(
reverse('ClusterOrchestratorData',
kwargs={'cluster_id': cluster.id}),
headers=self.default_headers
)
self.assertEquals(get_resp.status, 200)
self.datadiff(orchestrator_data, json.loads(get_resp.body))
# deleting facts
delete_resp = self.app.delete(
reverse('ClusterOrchestratorData',
kwargs={'cluster_id': cluster.id}),
headers=self.default_headers
)
self.assertEquals(delete_resp.status, 202)
self.assertEqual(cluster.facts, {})
self.datadiff(
new_provisioning_info, args[1][0]['args']['provisioning_info'])
self.datadiff(
new_deployment_info, args[1][1]['args']['deployment_info'])

View File

@ -375,7 +375,7 @@ class TestNetworkManager(BaseIntegrationTest):
self.env.launch_deployment()
rpc_nodes_provision = nailgun.task.manager.rpc.cast. \
call_args_list[0][0][1][0]['args']['nodes']
call_args_list[0][0][1][0]['args']['provisioning_info']['nodes']
map(
lambda (x, y): self.assertIn(

View File

@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import reverse
class TestHandlers(BaseIntegrationTest):
def setUp(self):
super(TestHandlers, self).setUp()
self.cluster = self.env.create_cluster(api=False)
def check_info_handler(self, handler_name, get_info):
# updating provisioning info
orchestrator_data = {"field": "test"}
put_resp = self.app.put(
reverse(handler_name,
kwargs={'cluster_id': self.cluster.id}),
json.dumps(orchestrator_data),
headers=self.default_headers)
self.assertEquals(put_resp.status, 200)
self.assertEquals(get_info(), orchestrator_data)
# getting provisioning info
get_resp = self.app.get(
reverse(handler_name,
kwargs={'cluster_id': self.cluster.id}),
headers=self.default_headers)
self.assertEquals(get_resp.status, 200)
self.datadiff(orchestrator_data, json.loads(get_resp.body))
# deleting provisioning info
delete_resp = self.app.delete(
reverse(handler_name,
kwargs={'cluster_id': self.cluster.id}),
headers=self.default_headers)
self.assertEquals(delete_resp.status, 202)
self.assertEqual(get_info(), {})
def test_cluster_provisioning_info(self):
self.check_info_handler(
'ProvisioningInfo',
lambda: self.cluster.replaced_provisioning_info)
def test_cluster_deployment_info(self):
self.check_info_handler(
'DeploymentInfo',
lambda: self.cluster.replaced_deployment_info)

View File

@ -20,8 +20,10 @@ from nailgun.api.models import IPAddrRange
from nailgun.api.models import NetworkGroup
from nailgun.api.models import Node
from nailgun.db import db
from nailgun.orchestrator.serializers import OrchestratorHASerializer
from nailgun.orchestrator.serializers import OrchestratorSerializer
from nailgun.orchestrator.deployment_serializers \
import OrchestratorHASerializer
from nailgun.orchestrator.deployment_serializers \
import OrchestratorSerializer
from nailgun.settings import settings
from nailgun.test.base import BaseIntegrationTest

View File

@ -15,7 +15,6 @@
# under the License.
import json
import unittest
from nailgun.api.models import Release
from nailgun.test.base import BaseIntegrationTest
@ -49,17 +48,3 @@ class TestHandlers(BaseIntegrationTest):
headers=self.default_headers,
expect_errors=True)
self.assertEquals(resp.status, 400)
@unittest.skip("Database constrains do not allow to delete release now")
def test_release_delete(self):
release = self.env.create_release(api=False)
resp = self.app.delete(
reverse('ReleaseHandler', kwargs={'release_id': release.id}),
params=json.dumps({
'name': 'Another test release',
'version': '1.0'
}),
headers=self.default_headers
)
self.assertEquals(204, resp.status)
self.assertEquals('', resp.body)

View File

@ -76,7 +76,9 @@ module Naily
Naily.logger.info("'provision' method called with data: #{data.inspect}")
reporter = Naily::Reporter.new(@producer, data['respond_to'], data['args']['task_uuid'])
@orchestrator.fast_provision(reporter, data['args']['engine'], data['args']['nodes'])
@orchestrator.fast_provision(reporter,
data['args']['provisioning_info']['engine'],
data['args']['provisioning_info']['nodes'])
end
def deploy(data)