Separate provisioning and deployment handlers in nailgun api
* handler for provisioning selected nodes PUT /clusters/1/provision/?nodes=1,2,3 * handler for deployment selected nodes PUT /clusters/1/deploy/?nodes=1,2,3 * added nodes parameter for default deployment info GET /clusters/1/orchestrator/deployment/defaults/?nodes=1,2 * and added nodes parameter for default and provisioning info GET /clusters/1/orchestrator/provisioning/defaults/?nodes=1,2 Implements: blueprint nailgun-separate-provisioning-and-deployment-handlers Change-Id: I2d6320fe15918f42c2071c62a65490cd7ad5d08c
This commit is contained in:
parent
1fc5cfc4c4
commit
89e73fa2ae
@ -96,10 +96,13 @@ class JSONHandler(object):
|
|||||||
|
|
||||||
def checked_data(self, validate_method=None, **kwargs):
|
def checked_data(self, validate_method=None, **kwargs):
|
||||||
try:
|
try:
|
||||||
|
data = kwargs.pop('data', web.data())
|
||||||
if validate_method:
|
if validate_method:
|
||||||
data = validate_method(web.data(), **kwargs)
|
method = validate_method
|
||||||
else:
|
else:
|
||||||
data = self.validator.validate(web.data(), **kwargs)
|
method = self.validator.validate
|
||||||
|
|
||||||
|
valid_data = method(data, **kwargs)
|
||||||
except (
|
except (
|
||||||
errors.InvalidInterfacesInfo,
|
errors.InvalidInterfacesInfo,
|
||||||
errors.InvalidMetadata
|
errors.InvalidMetadata
|
||||||
@ -117,7 +120,7 @@ class JSONHandler(object):
|
|||||||
Exception
|
Exception
|
||||||
) as exc:
|
) as exc:
|
||||||
raise web.badrequest(message=str(exc))
|
raise web.badrequest(message=str(exc))
|
||||||
return data
|
return valid_data
|
||||||
|
|
||||||
def get_object_or_404(self, model, *args, **kwargs):
|
def get_object_or_404(self, model, *args, **kwargs):
|
||||||
# should be in ('warning', 'Log message') format
|
# should be in ('warning', 'Log message') format
|
||||||
@ -133,8 +136,25 @@ class JSONHandler(object):
|
|||||||
if not obj:
|
if not obj:
|
||||||
if log_404:
|
if log_404:
|
||||||
getattr(logger, log_404[0])(log_404[1])
|
getattr(logger, log_404[0])(log_404[1])
|
||||||
raise web.notfound()
|
raise web.notfound('{0} not found'.format(model.__name__))
|
||||||
else:
|
else:
|
||||||
if log_get:
|
if log_get:
|
||||||
getattr(logger, log_get[0])(log_get[1])
|
getattr(logger, log_get[0])(log_get[1])
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
|
def get_objects_list_or_404(self, model, ids):
|
||||||
|
"""Get list of objects
|
||||||
|
|
||||||
|
:param model: model object
|
||||||
|
:param ids: list of ids
|
||||||
|
|
||||||
|
:raises: web.notfound
|
||||||
|
:returns: query object
|
||||||
|
"""
|
||||||
|
node_query = db.query(model).filter(model.id.in_(ids))
|
||||||
|
objects_count = node_query.count()
|
||||||
|
|
||||||
|
if len(set(ids)) != objects_count:
|
||||||
|
raise web.notfound('{0} not found'.format(model.__name__))
|
||||||
|
|
||||||
|
return node_query
|
||||||
|
@ -38,8 +38,8 @@ from nailgun.api.validators.cluster import ClusterValidator
|
|||||||
from nailgun.db import db
|
from nailgun.db import db
|
||||||
from nailgun.errors import errors
|
from nailgun.errors import errors
|
||||||
from nailgun.logger import logger
|
from nailgun.logger import logger
|
||||||
|
from nailgun.task.manager import ApplyChangesTaskManager
|
||||||
from nailgun.task.manager import ClusterDeletionManager
|
from nailgun.task.manager import ClusterDeletionManager
|
||||||
from nailgun.task.manager import DeploymentTaskManager
|
|
||||||
|
|
||||||
|
|
||||||
class ClusterHandler(JSONHandler):
|
class ClusterHandler(JSONHandler):
|
||||||
@ -272,7 +272,7 @@ class ClusterChangesHandler(JSONHandler):
|
|||||||
json.dumps(network_info, indent=4)
|
json.dumps(network_info, indent=4)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
task_manager = DeploymentTaskManager(
|
task_manager = ApplyChangesTaskManager(
|
||||||
cluster_id=cluster.id
|
cluster_id=cluster.id
|
||||||
)
|
)
|
||||||
task = task_manager.execute()
|
task = task_manager.execute()
|
||||||
|
@ -324,9 +324,7 @@ class NodeCollectionHandler(JSONHandler):
|
|||||||
:http: * 200 (nodes are successfully updated)
|
:http: * 200 (nodes are successfully updated)
|
||||||
* 400 (invalid nodes data specified)
|
* 400 (invalid nodes data specified)
|
||||||
"""
|
"""
|
||||||
data = self.checked_data(
|
data = self.checked_data(self.validator.validate_collection_update)
|
||||||
self.validator.validate_collection_update
|
|
||||||
)
|
|
||||||
|
|
||||||
q = db().query(Node)
|
q = db().query(Node)
|
||||||
nodes_updated = []
|
nodes_updated = []
|
||||||
|
@ -14,18 +14,48 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import traceback
|
||||||
import web
|
import web
|
||||||
|
|
||||||
from nailgun.api.handlers.base import content_json
|
from nailgun.api.handlers.base import content_json
|
||||||
from nailgun.api.handlers.base import JSONHandler
|
from nailgun.api.handlers.base import JSONHandler
|
||||||
|
from nailgun.api.handlers.tasks import TaskHandler
|
||||||
from nailgun.api.models import Cluster
|
from nailgun.api.models import Cluster
|
||||||
|
from nailgun.api.models import Node
|
||||||
|
from nailgun.api.validators.node import NodesFilterValidator
|
||||||
from nailgun.db import db
|
from nailgun.db import db
|
||||||
from nailgun.logger import logger
|
from nailgun.logger import logger
|
||||||
from nailgun.orchestrator import deployment_serializers
|
from nailgun.orchestrator import deployment_serializers
|
||||||
from nailgun.orchestrator import provisioning_serializers
|
from nailgun.orchestrator import provisioning_serializers
|
||||||
|
from nailgun.task.helpers import TaskHelper
|
||||||
|
from nailgun.task.manager import DeploymentTaskManager
|
||||||
|
from nailgun.task.manager import ProvisioningTaskManager
|
||||||
|
|
||||||
|
|
||||||
class DefaultOrchestratorInfo(JSONHandler):
|
class NodesFilterMixin(object):
|
||||||
|
validator = NodesFilterValidator
|
||||||
|
|
||||||
|
def get_default_nodes(self, cluster):
|
||||||
|
"""Method should be overriden and
|
||||||
|
return list of nodes
|
||||||
|
"""
|
||||||
|
raise NotImplementedError('Please Implement this method')
|
||||||
|
|
||||||
|
def get_nodes(self, cluster):
|
||||||
|
"""If nodes selected in filter
|
||||||
|
then returns them, else returns
|
||||||
|
default nodes.
|
||||||
|
"""
|
||||||
|
nodes = web.input(nodes=None).nodes
|
||||||
|
|
||||||
|
if nodes:
|
||||||
|
node_ids = self.checked_data(data=nodes)
|
||||||
|
return self.get_objects_list_or_404(Node, node_ids)
|
||||||
|
|
||||||
|
return self.get_default_nodes(cluster)
|
||||||
|
|
||||||
|
|
||||||
|
class DefaultOrchestratorInfo(NodesFilterMixin, JSONHandler):
|
||||||
"""Base class for default orchestrator data.
|
"""Base class for default orchestrator data.
|
||||||
Need to redefine serializer variable
|
Need to redefine serializer variable
|
||||||
"""
|
"""
|
||||||
@ -40,7 +70,8 @@ class DefaultOrchestratorInfo(JSONHandler):
|
|||||||
* 404 (cluster not found in db)
|
* 404 (cluster not found in db)
|
||||||
"""
|
"""
|
||||||
cluster = self.get_object_or_404(Cluster, cluster_id)
|
cluster = self.get_object_or_404(Cluster, cluster_id)
|
||||||
return self._serializer.serialize(cluster)
|
nodes = self.get_nodes(cluster)
|
||||||
|
return self._serializer.serialize(cluster, nodes)
|
||||||
|
|
||||||
|
|
||||||
class OrchestratorInfo(JSONHandler):
|
class OrchestratorInfo(JSONHandler):
|
||||||
@ -98,11 +129,17 @@ class DefaultProvisioningInfo(DefaultOrchestratorInfo):
|
|||||||
|
|
||||||
_serializer = provisioning_serializers
|
_serializer = provisioning_serializers
|
||||||
|
|
||||||
|
def get_default_nodes(self, cluster):
|
||||||
|
return TaskHelper.nodes_to_provision(cluster)
|
||||||
|
|
||||||
|
|
||||||
class DefaultDeploymentInfo(DefaultOrchestratorInfo):
|
class DefaultDeploymentInfo(DefaultOrchestratorInfo):
|
||||||
|
|
||||||
_serializer = deployment_serializers
|
_serializer = deployment_serializers
|
||||||
|
|
||||||
|
def get_default_nodes(self, cluster):
|
||||||
|
return TaskHelper.nodes_to_deploy(cluster)
|
||||||
|
|
||||||
|
|
||||||
class ProvisioningInfo(OrchestratorInfo):
|
class ProvisioningInfo(OrchestratorInfo):
|
||||||
|
|
||||||
@ -124,3 +161,45 @@ class DeploymentInfo(OrchestratorInfo):
|
|||||||
cluster.replace_deployment_info(data)
|
cluster.replace_deployment_info(data)
|
||||||
db().commit()
|
db().commit()
|
||||||
return cluster.replaced_deployment_info
|
return cluster.replaced_deployment_info
|
||||||
|
|
||||||
|
|
||||||
|
class SelectedNodesBase(NodesFilterMixin, JSONHandler):
|
||||||
|
"""Base class for running task manager on selected nodes."""
|
||||||
|
|
||||||
|
@content_json
|
||||||
|
def PUT(self, cluster_id):
|
||||||
|
""":returns: JSONized Task object.
|
||||||
|
:http: * 200 (task successfully executed)
|
||||||
|
* 404 (cluster or nodes not found in db)
|
||||||
|
* 400 (failed to execute task)
|
||||||
|
"""
|
||||||
|
cluster = self.get_object_or_404(Cluster, cluster_id)
|
||||||
|
nodes = self.get_nodes(cluster)
|
||||||
|
|
||||||
|
try:
|
||||||
|
task_manager = self.task_manager(cluster_id=cluster.id)
|
||||||
|
task = task_manager.execute(nodes)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warn(u'Cannot execute {0} task nodes: {1}'.format(
|
||||||
|
task_manager.__class__.__name__, traceback.format_exc()))
|
||||||
|
raise web.badrequest(str(exc))
|
||||||
|
|
||||||
|
return TaskHandler.render(task)
|
||||||
|
|
||||||
|
|
||||||
|
class ProvisionSelectedNodes(SelectedNodesBase):
|
||||||
|
"""Handler for provisioning selected nodes."""
|
||||||
|
|
||||||
|
task_manager = ProvisioningTaskManager
|
||||||
|
|
||||||
|
def get_default_nodes(self, cluster):
|
||||||
|
TaskHelper.nodes_to_provision(cluster)
|
||||||
|
|
||||||
|
|
||||||
|
class DeploySelectedNodes(SelectedNodesBase):
|
||||||
|
"""Handler for deployment selected nodes."""
|
||||||
|
|
||||||
|
task_manager = DeploymentTaskManager
|
||||||
|
|
||||||
|
def get_default_nodes(self, cluster):
|
||||||
|
TaskHelper.nodes_to_deploy(cluster)
|
||||||
|
@ -206,38 +206,6 @@ class Cluster(Base):
|
|||||||
map(db().delete, chs.all())
|
map(db().delete, chs.all())
|
||||||
db().commit()
|
db().commit()
|
||||||
|
|
||||||
def prepare_for_deployment(self):
|
|
||||||
from nailgun.network.manager import NetworkManager
|
|
||||||
from nailgun.task.helpers import TaskHelper
|
|
||||||
|
|
||||||
nodes = sorted(set(
|
|
||||||
TaskHelper.nodes_to_deploy(self) +
|
|
||||||
TaskHelper.nodes_in_provisioning(self)), key=lambda node: node.id)
|
|
||||||
|
|
||||||
TaskHelper.update_slave_nodes_fqdn(nodes)
|
|
||||||
|
|
||||||
nodes_ids = [n.id for n in nodes]
|
|
||||||
netmanager = NetworkManager()
|
|
||||||
if nodes_ids:
|
|
||||||
netmanager.assign_ips(nodes_ids, 'management')
|
|
||||||
netmanager.assign_ips(nodes_ids, 'public')
|
|
||||||
netmanager.assign_ips(nodes_ids, 'storage')
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
netmanager.assign_admin_ips(
|
|
||||||
node.id, len(node.meta.get('interfaces', [])))
|
|
||||||
|
|
||||||
def prepare_for_provisioning(self):
|
|
||||||
from nailgun.network.manager import NetworkManager
|
|
||||||
from nailgun.task.helpers import TaskHelper
|
|
||||||
|
|
||||||
netmanager = NetworkManager()
|
|
||||||
nodes = TaskHelper.nodes_to_provision(self)
|
|
||||||
TaskHelper.update_slave_nodes_fqdn(nodes)
|
|
||||||
for node in nodes:
|
|
||||||
netmanager.assign_admin_ips(
|
|
||||||
node.id, len(node.meta.get('interfaces', [])))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def network_manager(self):
|
def network_manager(self):
|
||||||
if self.net_provider == 'neutron':
|
if self.net_provider == 'neutron':
|
||||||
|
@ -103,6 +103,10 @@ class Node(Base):
|
|||||||
cascade="delete",
|
cascade="delete",
|
||||||
order_by="NodeNICInterface.id")
|
order_by="NodeNICInterface.id")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def uid(self):
|
||||||
|
return str(self.id)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def offline(self):
|
def offline(self):
|
||||||
return not self.online
|
return not self.online
|
||||||
|
@ -25,6 +25,7 @@ from nailgun.api.handlers.cluster import ClusterChangesHandler
|
|||||||
from nailgun.api.handlers.cluster import ClusterCollectionHandler
|
from nailgun.api.handlers.cluster import ClusterCollectionHandler
|
||||||
from nailgun.api.handlers.cluster import ClusterGeneratedData
|
from nailgun.api.handlers.cluster import ClusterGeneratedData
|
||||||
from nailgun.api.handlers.cluster import ClusterHandler
|
from nailgun.api.handlers.cluster import ClusterHandler
|
||||||
|
|
||||||
from nailgun.api.handlers.disks import NodeDefaultsDisksHandler
|
from nailgun.api.handlers.disks import NodeDefaultsDisksHandler
|
||||||
from nailgun.api.handlers.disks import NodeDisksHandler
|
from nailgun.api.handlers.disks import NodeDisksHandler
|
||||||
from nailgun.api.handlers.disks import NodeVolumesInformationHandler
|
from nailgun.api.handlers.disks import NodeVolumesInformationHandler
|
||||||
@ -59,7 +60,9 @@ from nailgun.api.handlers.notifications import NotificationHandler
|
|||||||
from nailgun.api.handlers.orchestrator import DefaultDeploymentInfo
|
from nailgun.api.handlers.orchestrator import DefaultDeploymentInfo
|
||||||
from nailgun.api.handlers.orchestrator import DefaultProvisioningInfo
|
from nailgun.api.handlers.orchestrator import DefaultProvisioningInfo
|
||||||
from nailgun.api.handlers.orchestrator import DeploymentInfo
|
from nailgun.api.handlers.orchestrator import DeploymentInfo
|
||||||
|
from nailgun.api.handlers.orchestrator import DeploySelectedNodes
|
||||||
from nailgun.api.handlers.orchestrator import ProvisioningInfo
|
from nailgun.api.handlers.orchestrator import ProvisioningInfo
|
||||||
|
from nailgun.api.handlers.orchestrator import ProvisionSelectedNodes
|
||||||
|
|
||||||
from nailgun.api.handlers.plugin import PluginCollectionHandler
|
from nailgun.api.handlers.plugin import PluginCollectionHandler
|
||||||
from nailgun.api.handlers.plugin import PluginHandler
|
from nailgun.api.handlers.plugin import PluginHandler
|
||||||
@ -116,6 +119,11 @@ urls = (
|
|||||||
r'/clusters/(?P<cluster_id>\d+)/generated/?$',
|
r'/clusters/(?P<cluster_id>\d+)/generated/?$',
|
||||||
ClusterGeneratedData,
|
ClusterGeneratedData,
|
||||||
|
|
||||||
|
r'/clusters/(?P<cluster_id>\d+)/provision/?$',
|
||||||
|
ProvisionSelectedNodes,
|
||||||
|
r'/clusters/(?P<cluster_id>\d+)/deploy/?$',
|
||||||
|
DeploySelectedNodes,
|
||||||
|
|
||||||
r'/nodes/?$',
|
r'/nodes/?$',
|
||||||
NodeCollectionHandler,
|
NodeCollectionHandler,
|
||||||
r'/nodes/(?P<node_id>\d+)/?$',
|
r'/nodes/(?P<node_id>\d+)/?$',
|
||||||
|
@ -236,3 +236,21 @@ class NodeDisksValidator(BasicValidator):
|
|||||||
if volumes_size > disk['size']:
|
if volumes_size > disk['size']:
|
||||||
raise errors.InvalidData(
|
raise errors.InvalidData(
|
||||||
u'Not enough free space on disk: %s' % disk)
|
u'Not enough free space on disk: %s' % disk)
|
||||||
|
|
||||||
|
|
||||||
|
class NodesFilterValidator(BasicValidator):
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def validate(cls, nodes):
|
||||||
|
"""Used for filtering nodes
|
||||||
|
:param nodes: list of ids in string representation.
|
||||||
|
Example: "1,99,3,4"
|
||||||
|
|
||||||
|
:returns: list of integers
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
node_ids = set(map(int, nodes.split(',')))
|
||||||
|
except ValueError:
|
||||||
|
raise errors.InvalidData('Provided id is not integer')
|
||||||
|
|
||||||
|
return node_ids
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
from netaddr import IPNetwork
|
from netaddr import IPNetwork
|
||||||
from sqlalchemy import and_
|
from sqlalchemy import and_
|
||||||
from sqlalchemy import or_
|
|
||||||
|
|
||||||
from nailgun.api.models import NetworkGroup
|
from nailgun.api.models import NetworkGroup
|
||||||
from nailgun.api.models import Node
|
from nailgun.api.models import Node
|
||||||
@ -56,11 +55,11 @@ class Priority(object):
|
|||||||
class DeploymentMultiSerializer(object):
|
class DeploymentMultiSerializer(object):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def serialize(cls, cluster):
|
def serialize(cls, cluster, nodes):
|
||||||
"""Method generates facts which
|
"""Method generates facts which
|
||||||
through an orchestrator passes to puppet
|
through an orchestrator passes to puppet
|
||||||
"""
|
"""
|
||||||
nodes = cls.serialize_nodes(cls.get_nodes_to_deployment(cluster))
|
nodes = cls.serialize_nodes(nodes)
|
||||||
common_attrs = cls.get_common_attrs(cluster)
|
common_attrs = cls.get_common_attrs(cluster)
|
||||||
|
|
||||||
cls.set_deployment_priorities(nodes)
|
cls.set_deployment_priorities(nodes)
|
||||||
@ -83,12 +82,6 @@ class DeploymentMultiSerializer(object):
|
|||||||
and_(Node.cluster == cluster,
|
and_(Node.cluster == cluster,
|
||||||
False == Node.pending_deletion)).order_by(Node.id)
|
False == Node.pending_deletion)).order_by(Node.id)
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_nodes_to_deployment(cls, cluster):
|
|
||||||
"""Nodes which need to deploy."""
|
|
||||||
return sorted(TaskHelper.nodes_to_deploy(cluster),
|
|
||||||
key=lambda node: node.id)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_common_attrs(cls, cluster):
|
def get_common_attrs(cls, cluster):
|
||||||
"""Cluster attributes."""
|
"""Cluster attributes."""
|
||||||
@ -115,8 +108,7 @@ class DeploymentMultiSerializer(object):
|
|||||||
for node in nodes:
|
for node in nodes:
|
||||||
for role in node.all_roles:
|
for role in node.all_roles:
|
||||||
node_list.append({
|
node_list.append({
|
||||||
# Yes, uid is really should be a string
|
'uid': node.uid,
|
||||||
'uid': str(node.id),
|
|
||||||
'fqdn': node.fqdn,
|
'fqdn': node.fqdn,
|
||||||
'name': TaskHelper.make_slave_name(node.id),
|
'name': TaskHelper.make_slave_name(node.id),
|
||||||
'role': role})
|
'role': role})
|
||||||
@ -163,7 +155,7 @@ class DeploymentMultiSerializer(object):
|
|||||||
"""
|
"""
|
||||||
node_attrs = {
|
node_attrs = {
|
||||||
# Yes, uid is really should be a string
|
# Yes, uid is really should be a string
|
||||||
'uid': str(node.id),
|
'uid': node.uid,
|
||||||
'fqdn': node.fqdn,
|
'fqdn': node.fqdn,
|
||||||
'status': node.status,
|
'status': node.status,
|
||||||
'role': role,
|
'role': role,
|
||||||
@ -185,54 +177,15 @@ class DeploymentHASerializer(DeploymentMultiSerializer):
|
|||||||
"""Serializer for ha mode."""
|
"""Serializer for ha mode."""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def serialize(cls, cluster):
|
def serialize(cls, cluster, nodes):
|
||||||
serialized_nodes = super(
|
serialized_nodes = super(
|
||||||
DeploymentHASerializer,
|
DeploymentHASerializer,
|
||||||
cls
|
cls
|
||||||
).serialize(cluster)
|
).serialize(cluster, nodes)
|
||||||
cls.set_primary_controller(serialized_nodes)
|
cls.set_primary_controller(serialized_nodes)
|
||||||
|
|
||||||
return serialized_nodes
|
return serialized_nodes
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def has_controller_nodes(cls, nodes):
|
|
||||||
for node in nodes:
|
|
||||||
if 'controller' in node.all_roles:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_nodes_to_deployment(cls, cluster):
|
|
||||||
"""Get nodes for deployment
|
|
||||||
* in case of failed controller should be redeployed
|
|
||||||
all controllers
|
|
||||||
* in case of failed non-controller should be
|
|
||||||
redeployed only node which was failed
|
|
||||||
"""
|
|
||||||
nodes = super(
|
|
||||||
DeploymentHASerializer,
|
|
||||||
cls
|
|
||||||
).get_nodes_to_deployment(cluster)
|
|
||||||
|
|
||||||
controller_nodes = []
|
|
||||||
|
|
||||||
# if list contain at least one controller
|
|
||||||
if cls.has_controller_nodes(nodes):
|
|
||||||
# retrive all controllers from cluster
|
|
||||||
controller_nodes = db().query(Node). \
|
|
||||||
filter(or_(
|
|
||||||
Node.role_list.any(name='controller'),
|
|
||||||
Node.pending_role_list.any(name='controller'),
|
|
||||||
Node.role_list.any(name='primary-controller'),
|
|
||||||
Node.pending_role_list.any(name='primary-controller')
|
|
||||||
)). \
|
|
||||||
filter(Node.cluster == cluster). \
|
|
||||||
filter(False == Node.pending_deletion). \
|
|
||||||
order_by(Node.id).all()
|
|
||||||
|
|
||||||
return sorted(set(nodes + controller_nodes),
|
|
||||||
key=lambda node: node.id)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_primary_controller(cls, nodes):
|
def set_primary_controller(cls, nodes):
|
||||||
"""Set primary controller for the first controller
|
"""Set primary controller for the first controller
|
||||||
@ -374,7 +327,7 @@ class NetworkDeploymentSerializer(object):
|
|||||||
'public_netmask': cls.get_addr(netw_data,
|
'public_netmask': cls.get_addr(netw_data,
|
||||||
'public')['netmask']}
|
'public')['netmask']}
|
||||||
[n.update(addresses) for n in attrs['nodes']
|
[n.update(addresses) for n in attrs['nodes']
|
||||||
if n['uid'] == str(node.id)]
|
if n['uid'] == node.uid]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def node_attrs(cls, node):
|
def node_attrs(cls, node):
|
||||||
@ -801,15 +754,14 @@ class NeutronNetworkDeploymentSerializer(object):
|
|||||||
return attrs
|
return attrs
|
||||||
|
|
||||||
|
|
||||||
def serialize(cluster):
|
def serialize(cluster, nodes):
|
||||||
"""Serialization depends on deployment mode
|
"""Serialization depends on deployment mode
|
||||||
"""
|
"""
|
||||||
cluster.prepare_for_deployment()
|
TaskHelper.prepare_for_deployment(cluster.nodes)
|
||||||
|
|
||||||
if cluster.mode == 'multinode':
|
if cluster.mode == 'multinode':
|
||||||
serializer = DeploymentMultiSerializer
|
serializer = DeploymentMultiSerializer
|
||||||
elif cluster.is_ha_mode:
|
elif cluster.is_ha_mode:
|
||||||
# Same serializer for all ha
|
|
||||||
serializer = DeploymentHASerializer
|
serializer = DeploymentHASerializer
|
||||||
|
|
||||||
return serializer.serialize(cluster)
|
return serializer.serialize(cluster, nodes)
|
||||||
|
@ -26,10 +26,11 @@ class ProvisioningSerializer(object):
|
|||||||
"""Provisioning serializer"""
|
"""Provisioning serializer"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def serialize(cls, cluster):
|
def serialize(cls, cluster, nodes):
|
||||||
"""Serialize cluster for provisioning."""
|
"""Serialize cluster for provisioning."""
|
||||||
|
|
||||||
serialized_nodes = cls.serialize_nodes(cluster)
|
cluster_attrs = cluster.attributes.merged_attrs_values()
|
||||||
|
serialized_nodes = cls.serialize_nodes(cluster_attrs, nodes)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'engine': {
|
'engine': {
|
||||||
@ -39,13 +40,10 @@ class ProvisioningSerializer(object):
|
|||||||
'nodes': serialized_nodes}
|
'nodes': serialized_nodes}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def serialize_nodes(cls, cluster):
|
def serialize_nodes(cls, cluster_attrs, nodes):
|
||||||
"""Serialize nodes."""
|
"""Serialize nodes."""
|
||||||
nodes_to_provision = TaskHelper.nodes_to_provision(cluster)
|
|
||||||
cluster_attrs = cluster.attributes.merged_attrs_values()
|
|
||||||
|
|
||||||
serialized_nodes = []
|
serialized_nodes = []
|
||||||
for node in nodes_to_provision:
|
for node in nodes:
|
||||||
serialized_node = cls.serialize_node(cluster_attrs, node)
|
serialized_node = cls.serialize_node(cluster_attrs, node)
|
||||||
serialized_nodes.append(serialized_node)
|
serialized_nodes.append(serialized_node)
|
||||||
|
|
||||||
@ -56,6 +54,7 @@ class ProvisioningSerializer(object):
|
|||||||
"""Serialize a single node."""
|
"""Serialize a single node."""
|
||||||
|
|
||||||
serialized_node = {
|
serialized_node = {
|
||||||
|
'uid': node.uid,
|
||||||
'power_address': node.ip,
|
'power_address': node.ip,
|
||||||
'name': TaskHelper.make_slave_name(node.id),
|
'name': TaskHelper.make_slave_name(node.id),
|
||||||
'hostname': node.fqdn,
|
'hostname': node.fqdn,
|
||||||
@ -155,8 +154,8 @@ class ProvisioningSerializer(object):
|
|||||||
return settings.PATH_TO_SSH_KEY
|
return settings.PATH_TO_SSH_KEY
|
||||||
|
|
||||||
|
|
||||||
def serialize(cluster):
|
def serialize(cluster, nodes):
|
||||||
"""Serialize cluster for provisioning."""
|
"""Serialize cluster for provisioning."""
|
||||||
cluster.prepare_for_provisioning()
|
TaskHelper.prepare_for_provisioning(nodes)
|
||||||
|
|
||||||
return ProvisioningSerializer.serialize(cluster)
|
return ProvisioningSerializer.serialize(cluster, nodes)
|
||||||
|
@ -283,22 +283,34 @@ class NailgunReceiver(object):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def provision_resp(cls, **kwargs):
|
def provision_resp(cls, **kwargs):
|
||||||
# For now provision task is nothing more than just adding
|
|
||||||
# system into cobbler and rebooting node. Then we think task
|
|
||||||
# is ready. We don't wait for end of node provisioning.
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"RPC method provision_resp received: %s" %
|
"RPC method provision_resp received: %s" %
|
||||||
json.dumps(kwargs)
|
json.dumps(kwargs))
|
||||||
)
|
|
||||||
task_uuid = kwargs.get('task_uuid')
|
task_uuid = kwargs.get('task_uuid')
|
||||||
message = kwargs.get('error')
|
message = kwargs.get('error')
|
||||||
status = kwargs.get('status')
|
status = kwargs.get('status')
|
||||||
progress = kwargs.get('progress')
|
progress = kwargs.get('progress')
|
||||||
|
nodes = kwargs.get('nodes', [])
|
||||||
|
|
||||||
task = get_task_by_uuid(task_uuid)
|
task = get_task_by_uuid(task_uuid)
|
||||||
if not task:
|
|
||||||
logger.warning(u"No task with uuid %s found", task_uuid)
|
for node in nodes:
|
||||||
return
|
uid = node.get('uid')
|
||||||
|
node_db = db().query(Node).get(uid)
|
||||||
|
|
||||||
|
if not node_db:
|
||||||
|
logger.warn('Task with uid "{0}" not found'.format(uid))
|
||||||
|
continue
|
||||||
|
|
||||||
|
if node.get('status') == 'error':
|
||||||
|
node_db.status = 'error'
|
||||||
|
node_db.progress = 100
|
||||||
|
node_db.error_type = 'provision'
|
||||||
|
node_db.error_msg = node.get('error_msg', 'Unknown error')
|
||||||
|
else:
|
||||||
|
node_db.status = node.get('status')
|
||||||
|
node_db.progress = node.get('progress')
|
||||||
|
|
||||||
TaskHelper.update_task_status(task.uuid, status, progress, message)
|
TaskHelper.update_task_status(task.uuid, status, progress, message)
|
||||||
|
|
||||||
@ -317,16 +329,9 @@ class NailgunReceiver(object):
|
|||||||
).all()
|
).all()
|
||||||
for n in error_nodes:
|
for n in error_nodes:
|
||||||
if names_only:
|
if names_only:
|
||||||
nodes_info.append(
|
nodes_info.append(u"'{0}'".format(n.name))
|
||||||
"'{0}'".format(n.name)
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
nodes_info.append(
|
nodes_info.append(u"'{0}': {1}".format(n.name, n.error_msg))
|
||||||
u"'{0}': {1}".format(
|
|
||||||
n.name,
|
|
||||||
n.error_msg
|
|
||||||
)
|
|
||||||
)
|
|
||||||
if nodes_info:
|
if nodes_info:
|
||||||
if names_only:
|
if names_only:
|
||||||
message = u", ".join(nodes_info)
|
message = u", ".join(nodes_info)
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
from sqlalchemy import or_
|
||||||
|
|
||||||
from nailgun.api.models import IPAddr
|
from nailgun.api.models import IPAddr
|
||||||
from nailgun.api.models import Node
|
from nailgun.api.models import Node
|
||||||
from nailgun.api.models import Task
|
from nailgun.api.models import Task
|
||||||
@ -300,7 +302,7 @@ class TaskHelper(object):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def nodes_to_deploy(cls, cluster):
|
def nodes_to_deploy(cls, cluster):
|
||||||
return sorted(filter(
|
nodes_to_deploy = sorted(filter(
|
||||||
lambda n: any([
|
lambda n: any([
|
||||||
n.pending_addition,
|
n.pending_addition,
|
||||||
n.needs_reprovision,
|
n.needs_reprovision,
|
||||||
@ -309,6 +311,11 @@ class TaskHelper(object):
|
|||||||
cluster.nodes
|
cluster.nodes
|
||||||
), key=lambda n: n.id)
|
), key=lambda n: n.id)
|
||||||
|
|
||||||
|
if cluster.is_ha_mode:
|
||||||
|
return cls.__nodes_to_deploy_ha(cluster, nodes_to_deploy)
|
||||||
|
|
||||||
|
return nodes_to_deploy
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def nodes_to_provision(cls, cluster):
|
def nodes_to_provision(cls, cluster):
|
||||||
return sorted(filter(
|
return sorted(filter(
|
||||||
@ -326,6 +333,48 @@ class TaskHelper(object):
|
|||||||
cluster.nodes
|
cluster.nodes
|
||||||
), key=lambda n: n.id)
|
), key=lambda n: n.id)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __nodes_to_deploy_ha(cls, cluster, nodes):
|
||||||
|
"""Get nodes for deployment for ha mode
|
||||||
|
* in case of failed controller should be redeployed
|
||||||
|
all controllers
|
||||||
|
* in case of failed non-controller should be
|
||||||
|
redeployed only node which was failed
|
||||||
|
|
||||||
|
If node list has at least one controller we
|
||||||
|
filter all controllers from the cluster and
|
||||||
|
return them.
|
||||||
|
"""
|
||||||
|
controller_nodes = []
|
||||||
|
|
||||||
|
# if list contain at least one controller
|
||||||
|
if cls.__has_controller_nodes(nodes):
|
||||||
|
# retrive all controllers from cluster
|
||||||
|
controller_nodes = db().query(Node). \
|
||||||
|
filter(or_(
|
||||||
|
Node.role_list.any(name='controller'),
|
||||||
|
Node.pending_role_list.any(name='controller'),
|
||||||
|
Node.role_list.any(name='primary-controller'),
|
||||||
|
Node.pending_role_list.any(name='primary-controller')
|
||||||
|
)). \
|
||||||
|
filter(Node.cluster == cluster). \
|
||||||
|
filter(False == Node.pending_deletion). \
|
||||||
|
order_by(Node.id).all()
|
||||||
|
|
||||||
|
return sorted(set(nodes + controller_nodes),
|
||||||
|
key=lambda node: node.id)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def __has_controller_nodes(cls, nodes):
|
||||||
|
"""Returns True if list of nodes has
|
||||||
|
at least one controller.
|
||||||
|
"""
|
||||||
|
for node in nodes:
|
||||||
|
if 'controller' in node.all_roles or \
|
||||||
|
'primary-controller' in node.all_roles:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def set_error(cls, task_uuid, message):
|
def set_error(cls, task_uuid, message):
|
||||||
cls.update_task_status(
|
cls.update_task_status(
|
||||||
@ -342,3 +391,42 @@ class TaskHelper(object):
|
|||||||
db().commit()
|
db().commit()
|
||||||
full_err_msg = u"\n".join(err_messages)
|
full_err_msg = u"\n".join(err_messages)
|
||||||
raise errors.NetworkCheckError(full_err_msg, add_client=False)
|
raise errors.NetworkCheckError(full_err_msg, add_client=False)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def prepare_for_provisioning(cls, nodes):
|
||||||
|
"""Prepare environment for provisioning,
|
||||||
|
update fqdns, assign admin ips
|
||||||
|
"""
|
||||||
|
netmanager = NetworkManager()
|
||||||
|
cls.update_slave_nodes_fqdn(nodes)
|
||||||
|
for node in nodes:
|
||||||
|
netmanager.assign_admin_ips(
|
||||||
|
node.id, len(node.meta.get('interfaces', [])))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def prepare_for_deployment(cls, nodes):
|
||||||
|
"""Prepare environment for deployment,
|
||||||
|
assign management, public, storage ips
|
||||||
|
"""
|
||||||
|
cls.update_slave_nodes_fqdn(nodes)
|
||||||
|
|
||||||
|
nodes_ids = [n.id for n in nodes]
|
||||||
|
netmanager = NetworkManager()
|
||||||
|
if nodes_ids:
|
||||||
|
netmanager.assign_ips(nodes_ids, 'management')
|
||||||
|
netmanager.assign_ips(nodes_ids, 'public')
|
||||||
|
netmanager.assign_ips(nodes_ids, 'storage')
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
netmanager.assign_admin_ips(
|
||||||
|
node.id, len(node.meta.get('interfaces', [])))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def raise_if_node_offline(cls, nodes):
|
||||||
|
offline_nodes = filter(lambda n: n.offline, nodes)
|
||||||
|
|
||||||
|
if offline_nodes:
|
||||||
|
node_names = ','.join(map(lambda n: n.full_name, offline_nodes))
|
||||||
|
raise errors.NodeOffline(
|
||||||
|
u'Nodes "%s" are offline.'
|
||||||
|
' Remove them from environment and try again.' % node_names)
|
||||||
|
@ -69,19 +69,17 @@ class TaskManager(object):
|
|||||||
db().commit()
|
db().commit()
|
||||||
|
|
||||||
|
|
||||||
class DeploymentTaskManager(TaskManager):
|
class ApplyChangesTaskManager(TaskManager):
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
logger.info(
|
logger.info(
|
||||||
u"Trying to start deployment at cluster '{0}'".format(
|
u"Trying to start deployment at cluster '{0}'".format(
|
||||||
self.cluster.name or self.cluster.id,
|
self.cluster.name or self.cluster.id))
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
current_tasks = db().query(Task).filter_by(
|
current_tasks = db().query(Task).filter_by(
|
||||||
cluster_id=self.cluster.id,
|
cluster_id=self.cluster.id,
|
||||||
name="deploy"
|
name='deploy')
|
||||||
)
|
|
||||||
for task in current_tasks:
|
for task in current_tasks:
|
||||||
if task.status == "running":
|
if task.status == "running":
|
||||||
raise errors.DeploymentAlreadyStarted()
|
raise errors.DeploymentAlreadyStarted()
|
||||||
@ -104,18 +102,19 @@ class DeploymentTaskManager(TaskManager):
|
|||||||
db().add(self.cluster)
|
db().add(self.cluster)
|
||||||
db().commit()
|
db().commit()
|
||||||
|
|
||||||
supertask = Task(
|
supertask = Task(name='deploy', cluster=self.cluster)
|
||||||
name="deploy",
|
|
||||||
cluster=self.cluster
|
|
||||||
)
|
|
||||||
db().add(supertask)
|
db().add(supertask)
|
||||||
db().commit()
|
db().commit()
|
||||||
|
|
||||||
|
# Run validation if user didn't redefine
|
||||||
|
# provisioning and deployment information
|
||||||
if not self.cluster.replaced_provisioning_info \
|
if not self.cluster.replaced_provisioning_info \
|
||||||
and not self.cluster.replaced_deployment_info:
|
and not self.cluster.replaced_deployment_info:
|
||||||
try:
|
try:
|
||||||
self.check_before_deployment(supertask)
|
self.check_before_deployment(supertask)
|
||||||
except errors.CheckBeforeDeploymentError:
|
except errors.CheckBeforeDeploymentError:
|
||||||
return supertask
|
return supertask
|
||||||
|
|
||||||
# in case of Red Hat
|
# in case of Red Hat
|
||||||
if self.cluster.release.operating_system == "RHEL":
|
if self.cluster.release.operating_system == "RHEL":
|
||||||
try:
|
try:
|
||||||
@ -143,22 +142,17 @@ class DeploymentTaskManager(TaskManager):
|
|||||||
if nodes_to_delete:
|
if nodes_to_delete:
|
||||||
task_deletion = supertask.create_subtask("node_deletion")
|
task_deletion = supertask.create_subtask("node_deletion")
|
||||||
logger.debug("Launching deletion task: %s", task_deletion.uuid)
|
logger.debug("Launching deletion task: %s", task_deletion.uuid)
|
||||||
self._call_silently(
|
self._call_silently(task_deletion, tasks.DeletionTask)
|
||||||
task_deletion,
|
|
||||||
tasks.DeletionTask
|
|
||||||
)
|
|
||||||
|
|
||||||
if nodes_to_provision:
|
if nodes_to_provision:
|
||||||
TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
|
TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
|
||||||
logger.debug("There are nodes to provision: %s",
|
logger.debug("There are nodes to provision: %s",
|
||||||
" ".join([n.fqdn for n in nodes_to_provision]))
|
" ".join([n.fqdn for n in nodes_to_provision]))
|
||||||
task_provision = supertask.create_subtask("provision")
|
task_provision = supertask.create_subtask("provision")
|
||||||
# we assume here that task_provision just adds system to
|
|
||||||
# cobbler and reboots it, so it has extremely small weight
|
|
||||||
task_provision.weight = 0.05
|
|
||||||
provision_message = self._call_silently(
|
provision_message = self._call_silently(
|
||||||
task_provision,
|
task_provision,
|
||||||
tasks.ProvisionTask,
|
tasks.ProvisionTask,
|
||||||
|
nodes_to_provision,
|
||||||
method_name='message'
|
method_name='message'
|
||||||
)
|
)
|
||||||
db().refresh(task_provision)
|
db().refresh(task_provision)
|
||||||
@ -181,6 +175,7 @@ class DeploymentTaskManager(TaskManager):
|
|||||||
deployment_message = self._call_silently(
|
deployment_message = self._call_silently(
|
||||||
task_deployment,
|
task_deployment,
|
||||||
tasks.DeploymentTask,
|
tasks.DeploymentTask,
|
||||||
|
nodes_to_deploy,
|
||||||
method_name='message'
|
method_name='message'
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -268,9 +263,7 @@ class DeploymentTaskManager(TaskManager):
|
|||||||
|
|
||||||
for task in subtasks:
|
for task in subtasks:
|
||||||
if task.status == 'error':
|
if task.status == 'error':
|
||||||
raise errors.RedHatSetupError(
|
raise errors.RedHatSetupError(task.message)
|
||||||
task.message
|
|
||||||
)
|
|
||||||
|
|
||||||
return subtask_messages
|
return subtask_messages
|
||||||
|
|
||||||
@ -324,6 +317,79 @@ class DeploymentTaskManager(TaskManager):
|
|||||||
db().commit()
|
db().commit()
|
||||||
|
|
||||||
|
|
||||||
|
class ProvisioningTaskManager(TaskManager):
|
||||||
|
|
||||||
|
def execute(self, nodes_to_provision):
|
||||||
|
"""Run provisioning task on specified nodes
|
||||||
|
|
||||||
|
Constraints: currently this task cannot deploy RedHat.
|
||||||
|
For redhat here should be added additional
|
||||||
|
tasks e.i. check credentials, check licenses,
|
||||||
|
redhat downloading.
|
||||||
|
Status of this task you can track here:
|
||||||
|
https://blueprints.launchpad.net/fuel/+spec
|
||||||
|
/nailgun-separate-provisioning-for-redhat
|
||||||
|
"""
|
||||||
|
TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
|
||||||
|
logger.debug('Nodes to provision: {0}'.format(
|
||||||
|
' '.join([n.fqdn for n in nodes_to_provision])))
|
||||||
|
|
||||||
|
task_provision = Task(name='provision', cluster=self.cluster)
|
||||||
|
db().add(task_provision)
|
||||||
|
db().commit()
|
||||||
|
|
||||||
|
provision_message = self._call_silently(
|
||||||
|
task_provision,
|
||||||
|
tasks.ProvisionTask,
|
||||||
|
nodes_to_provision,
|
||||||
|
method_name='message'
|
||||||
|
)
|
||||||
|
db().refresh(task_provision)
|
||||||
|
|
||||||
|
task_provision.cache = provision_message
|
||||||
|
|
||||||
|
for node in nodes_to_provision:
|
||||||
|
node.pending_addition = False
|
||||||
|
node.status = 'provisioning'
|
||||||
|
node.progress = 0
|
||||||
|
|
||||||
|
db().commit()
|
||||||
|
|
||||||
|
rpc.cast('naily', provision_message)
|
||||||
|
|
||||||
|
return task_provision
|
||||||
|
|
||||||
|
|
||||||
|
class DeploymentTaskManager(TaskManager):
|
||||||
|
|
||||||
|
def execute(self, nodes_to_deployment):
|
||||||
|
TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment)
|
||||||
|
logger.debug('Nodes to deploy: {0}'.format(
|
||||||
|
' '.join([n.fqdn for n in nodes_to_deployment])))
|
||||||
|
task_deployment = Task(name='deployment', cluster=self.cluster)
|
||||||
|
db().add(task_deployment)
|
||||||
|
db().commit()
|
||||||
|
|
||||||
|
deployment_message = self._call_silently(
|
||||||
|
task_deployment,
|
||||||
|
tasks.DeploymentTask,
|
||||||
|
nodes_to_deployment,
|
||||||
|
method_name='message')
|
||||||
|
|
||||||
|
db().refresh(task_deployment)
|
||||||
|
|
||||||
|
task_deployment.cache = deployment_message
|
||||||
|
|
||||||
|
for node in nodes_to_deployment:
|
||||||
|
node.status = 'deploying'
|
||||||
|
node.progress = 0
|
||||||
|
|
||||||
|
db().commit()
|
||||||
|
rpc.cast('naily', deployment_message)
|
||||||
|
|
||||||
|
return task_deployment
|
||||||
|
|
||||||
|
|
||||||
class CheckNetworksTaskManager(TaskManager):
|
class CheckNetworksTaskManager(TaskManager):
|
||||||
|
|
||||||
def execute(self, data, check_admin_untagged=False):
|
def execute(self, data, check_admin_untagged=False):
|
||||||
|
@ -101,17 +101,13 @@ class DeploymentTask(object):
|
|||||||
# those which are prepared for removal.
|
# those which are prepared for removal.
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def message(cls, task):
|
def message(cls, task, nodes):
|
||||||
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
|
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
|
||||||
|
TaskHelper.raise_if_node_offline(nodes)
|
||||||
|
|
||||||
task.cluster.prepare_for_deployment()
|
|
||||||
nodes = TaskHelper.nodes_to_deploy(task.cluster)
|
|
||||||
nodes_ids = [n.id for n in nodes]
|
nodes_ids = [n.id for n in nodes]
|
||||||
for n in db().query(Node).filter_by(
|
for n in db().query(Node).filter_by(
|
||||||
cluster=task.cluster).order_by(Node.id):
|
cluster=task.cluster).order_by(Node.id):
|
||||||
# However, we must not pass nodes which are set to be deleted.
|
|
||||||
if n.pending_deletion:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if n.id in nodes_ids:
|
if n.id in nodes_ids:
|
||||||
if n.pending_roles:
|
if n.pending_roles:
|
||||||
@ -129,10 +125,10 @@ class DeploymentTask(object):
|
|||||||
|
|
||||||
# here we replace provisioning data if user redefined them
|
# here we replace provisioning data if user redefined them
|
||||||
serialized_cluster = task.cluster.replaced_deployment_info or \
|
serialized_cluster = task.cluster.replaced_deployment_info or \
|
||||||
deployment_serializers.serialize(task.cluster)
|
deployment_serializers.serialize(task.cluster, nodes)
|
||||||
|
|
||||||
# After searilization set pending_addition to False
|
# After searilization set pending_addition to False
|
||||||
for node in db().query(Node).filter(Node.id.in_(nodes_ids)):
|
for node in nodes:
|
||||||
node.pending_addition = False
|
node.pending_addition = False
|
||||||
db().commit()
|
db().commit()
|
||||||
|
|
||||||
@ -143,44 +139,23 @@ class DeploymentTask(object):
|
|||||||
'task_uuid': task.uuid,
|
'task_uuid': task.uuid,
|
||||||
'deployment_info': serialized_cluster}}
|
'deployment_info': serialized_cluster}}
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def execute(cls, task):
|
|
||||||
logger.debug("DeploymentTask.execute(task=%s)" % task.uuid)
|
|
||||||
message = cls.message(task)
|
|
||||||
task.cache = message
|
|
||||||
db().add(task)
|
|
||||||
db().commit()
|
|
||||||
rpc.cast('naily', message)
|
|
||||||
|
|
||||||
|
|
||||||
class ProvisionTask(object):
|
class ProvisionTask(object):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def message(cls, task):
|
def message(cls, task, nodes_to_provisioning):
|
||||||
logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
|
logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
|
||||||
nodes = TaskHelper.nodes_to_provision(task.cluster)
|
TaskHelper.raise_if_node_offline(nodes_to_provisioning)
|
||||||
USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
|
serialized_cluster = task.cluster.replaced_provisioning_info or \
|
||||||
|
provisioning_serializers.serialize(
|
||||||
|
task.cluster, nodes_to_provisioning)
|
||||||
|
|
||||||
# We need to assign admin ips
|
for node in nodes_to_provisioning:
|
||||||
# and only after that prepare syslog
|
if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
|
||||||
# directories
|
|
||||||
task.cluster.prepare_for_provisioning()
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
if USE_FAKE:
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if node.offline:
|
|
||||||
raise errors.NodeOffline(
|
|
||||||
u'Node "%s" is offline.'
|
|
||||||
' Remove it from environment and try again.' %
|
|
||||||
node.full_name)
|
|
||||||
|
|
||||||
TaskHelper.prepare_syslog_dir(node)
|
TaskHelper.prepare_syslog_dir(node)
|
||||||
|
|
||||||
serialized_cluster = task.cluster.replaced_provisioning_info or \
|
|
||||||
provisioning_serializers.serialize(task.cluster)
|
|
||||||
|
|
||||||
message = {
|
message = {
|
||||||
'method': 'provision',
|
'method': 'provision',
|
||||||
'respond_to': 'provision_resp',
|
'respond_to': 'provision_resp',
|
||||||
@ -190,15 +165,6 @@ class ProvisionTask(object):
|
|||||||
|
|
||||||
return message
|
return message
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def execute(cls, task):
|
|
||||||
logger.debug("ProvisionTask.execute(task=%s)" % task.uuid)
|
|
||||||
message = cls.message(task)
|
|
||||||
task.cache = message
|
|
||||||
db().add(task)
|
|
||||||
db().commit()
|
|
||||||
rpc.cast('naily', message)
|
|
||||||
|
|
||||||
|
|
||||||
class DeletionTask(object):
|
class DeletionTask(object):
|
||||||
|
|
||||||
|
@ -728,6 +728,10 @@ class BaseIntegrationTest(BaseTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseUnitTest(BaseTestCase):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def fake_tasks(fake_rpc=True,
|
def fake_tasks(fake_rpc=True,
|
||||||
mock_rpc=True,
|
mock_rpc=True,
|
||||||
**kwargs):
|
**kwargs):
|
||||||
|
@ -15,16 +15,23 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
import nailgun
|
||||||
|
|
||||||
|
from mock import patch
|
||||||
from nailgun.api.models import Cluster
|
from nailgun.api.models import Cluster
|
||||||
from nailgun.test.base import BaseIntegrationTest
|
from nailgun.test.base import BaseIntegrationTest
|
||||||
|
from nailgun.test.base import fake_tasks
|
||||||
from nailgun.test.base import reverse
|
from nailgun.test.base import reverse
|
||||||
|
|
||||||
|
|
||||||
class TestHandlers(BaseIntegrationTest):
|
def nodes_filter_param(node_ids):
|
||||||
|
return '?nodes={0}'.format(','.join(node_ids))
|
||||||
|
|
||||||
|
|
||||||
|
class TestOrchestratorInfoHandlers(BaseIntegrationTest):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestHandlers, self).setUp()
|
super(TestOrchestratorInfoHandlers, self).setUp()
|
||||||
self.cluster = self.env.create_cluster(api=False)
|
self.cluster = self.env.create_cluster(api=False)
|
||||||
|
|
||||||
def check_info_handler(self, handler_name, get_info):
|
def check_info_handler(self, handler_name, get_info):
|
||||||
@ -68,10 +75,10 @@ class TestHandlers(BaseIntegrationTest):
|
|||||||
lambda: self.cluster.replaced_deployment_info)
|
lambda: self.cluster.replaced_deployment_info)
|
||||||
|
|
||||||
|
|
||||||
class TestDefaultOrchestratorHandlers(BaseIntegrationTest):
|
class TestDefaultOrchestratorInfoHandlers(BaseIntegrationTest):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(TestDefaultOrchestratorHandlers, self).setUp()
|
super(TestDefaultOrchestratorInfoHandlers, self).setUp()
|
||||||
|
|
||||||
cluster = self.env.create(
|
cluster = self.env.create(
|
||||||
cluster_kwargs={
|
cluster_kwargs={
|
||||||
@ -112,6 +119,34 @@ class TestDefaultOrchestratorHandlers(BaseIntegrationTest):
|
|||||||
self.assertEqual(resp.status, 200)
|
self.assertEqual(resp.status, 200)
|
||||||
self.assertEqual(3, len(json.loads(resp.body)['nodes']))
|
self.assertEqual(3, len(json.loads(resp.body)['nodes']))
|
||||||
|
|
||||||
|
def test_default_provisioning_handler_for_selected_nodes(self):
|
||||||
|
node_ids = [node.uid for node in self.cluster.nodes][:2]
|
||||||
|
url = reverse(
|
||||||
|
'DefaultProvisioningInfo',
|
||||||
|
kwargs={'cluster_id': self.cluster.id}) + \
|
||||||
|
nodes_filter_param(node_ids)
|
||||||
|
resp = self.app.get(url, headers=self.default_headers)
|
||||||
|
|
||||||
|
self.assertEqual(resp.status, 200)
|
||||||
|
data = json.loads(resp.body)['nodes']
|
||||||
|
self.assertEqual(2, len(data))
|
||||||
|
actual_uids = [node['uid'] for node in data]
|
||||||
|
self.assertItemsEqual(actual_uids, node_ids)
|
||||||
|
|
||||||
|
def test_default_deployment_handler_for_selected_nodes(self):
|
||||||
|
node_ids = [node.uid for node in self.cluster.nodes][:2]
|
||||||
|
url = reverse(
|
||||||
|
'DefaultDeploymentInfo',
|
||||||
|
kwargs={'cluster_id': self.cluster.id}) + \
|
||||||
|
nodes_filter_param(node_ids)
|
||||||
|
resp = self.app.get(url, headers=self.default_headers)
|
||||||
|
|
||||||
|
self.assertEqual(resp.status, 200)
|
||||||
|
data = json.loads(resp.body)
|
||||||
|
self.assertEqual(2, len(data))
|
||||||
|
actual_uids = [node['uid'] for node in data]
|
||||||
|
self.assertItemsEqual(actual_uids, node_ids)
|
||||||
|
|
||||||
def test_cluster_provisioning_customization(self):
|
def test_cluster_provisioning_customization(self):
|
||||||
self.customization_handler_helper(
|
self.customization_handler_helper(
|
||||||
'ProvisioningInfo',
|
'ProvisioningInfo',
|
||||||
@ -123,3 +158,58 @@ class TestDefaultOrchestratorHandlers(BaseIntegrationTest):
|
|||||||
'DeploymentInfo',
|
'DeploymentInfo',
|
||||||
lambda: self.cluster.replaced_deployment_info
|
lambda: self.cluster.replaced_deployment_info
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestSelectedNodesAction(BaseIntegrationTest):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestSelectedNodesAction, self).setUp()
|
||||||
|
self.env.create(
|
||||||
|
cluster_kwargs={
|
||||||
|
'mode': 'ha_compact'},
|
||||||
|
nodes_kwargs=[
|
||||||
|
{'roles': ['controller'], 'pending_addition': True},
|
||||||
|
{'roles': ['controller'], 'pending_addition': True},
|
||||||
|
{'roles': ['controller'], 'pending_addition': True},
|
||||||
|
{'roles': ['cinder'], 'pending_addition': True},
|
||||||
|
{'roles': ['compute'], 'pending_addition': True},
|
||||||
|
{'roles': ['cinder'], 'pending_addition': True}])
|
||||||
|
|
||||||
|
self.cluster = self.env.clusters[0]
|
||||||
|
self.node_uids = [n.uid for n in self.cluster.nodes][:3]
|
||||||
|
|
||||||
|
def send_empty_put(self, url):
|
||||||
|
return self.app.put(
|
||||||
|
url, '', headers=self.default_headers, expect_errors=True)
|
||||||
|
|
||||||
|
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||||
|
@patch('nailgun.rpc.cast')
|
||||||
|
def test_start_provisioning_on_selected_nodes(self, mock_rpc):
|
||||||
|
action_url = reverse(
|
||||||
|
'ProvisionSelectedNodes',
|
||||||
|
kwargs={'cluster_id': self.cluster.id}) + \
|
||||||
|
nodes_filter_param(self.node_uids)
|
||||||
|
|
||||||
|
self.send_empty_put(action_url)
|
||||||
|
|
||||||
|
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||||
|
provisioned_uids = [
|
||||||
|
n['uid'] for n in args[1]['args']['provisioning_info']['nodes']]
|
||||||
|
|
||||||
|
self.assertEqual(3, len(provisioned_uids))
|
||||||
|
self.assertItemsEqual(self.node_uids, provisioned_uids)
|
||||||
|
|
||||||
|
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||||
|
@patch('nailgun.rpc.cast')
|
||||||
|
def test_start_deployment_on_selected_nodes(self, mock_rpc):
|
||||||
|
action_url = reverse(
|
||||||
|
'DeploySelectedNodes',
|
||||||
|
kwargs={'cluster_id': self.cluster.id}) + \
|
||||||
|
nodes_filter_param(self.node_uids)
|
||||||
|
|
||||||
|
self.send_empty_put(action_url)
|
||||||
|
|
||||||
|
args, kwargs = nailgun.task.manager.rpc.cast.call_args
|
||||||
|
deployed_uids = [n['uid'] for n in args[1]['args']['deployment_info']]
|
||||||
|
self.assertEqual(3, len(deployed_uids))
|
||||||
|
self.assertItemsEqual(self.node_uids, deployed_uids)
|
||||||
|
@ -27,6 +27,7 @@ from nailgun.orchestrator.deployment_serializers \
|
|||||||
from nailgun.orchestrator.deployment_serializers \
|
from nailgun.orchestrator.deployment_serializers \
|
||||||
import DeploymentMultiSerializer
|
import DeploymentMultiSerializer
|
||||||
from nailgun.settings import settings
|
from nailgun.settings import settings
|
||||||
|
from nailgun.task.helpers import TaskHelper
|
||||||
from nailgun.test.base import BaseIntegrationTest
|
from nailgun.test.base import BaseIntegrationTest
|
||||||
from nailgun.test.base import reverse
|
from nailgun.test.base import reverse
|
||||||
from nailgun.volumes import manager
|
from nailgun.volumes import manager
|
||||||
@ -72,7 +73,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
|||||||
nodes_kwargs=node_args)
|
nodes_kwargs=node_args)
|
||||||
|
|
||||||
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
||||||
cluster_db.prepare_for_deployment()
|
TaskHelper.prepare_for_deployment(cluster_db.nodes)
|
||||||
return cluster_db
|
return cluster_db
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -101,7 +102,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
|||||||
def test_serialize_node(self):
|
def test_serialize_node(self):
|
||||||
node = self.env.create_node(
|
node = self.env.create_node(
|
||||||
api=True, cluster_id=self.cluster.id, pending_addition=True)
|
api=True, cluster_id=self.cluster.id, pending_addition=True)
|
||||||
self.cluster.prepare_for_deployment()
|
TaskHelper.prepare_for_deployment(self.cluster.nodes)
|
||||||
|
|
||||||
node_db = self.db.query(Node).get(node['id'])
|
node_db = self.db.query(Node).get(node['id'])
|
||||||
serialized_data = self.serializer.serialize_node(node_db, 'controller')
|
serialized_data = self.serializer.serialize_node(node_db, 'controller')
|
||||||
@ -188,7 +189,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
|||||||
self.app.put(url, json.dumps(data),
|
self.app.put(url, json.dumps(data),
|
||||||
headers=self.default_headers,
|
headers=self.default_headers,
|
||||||
expect_errors=False)
|
expect_errors=False)
|
||||||
facts = self.serializer.serialize(cluster)
|
facts = self.serializer.serialize(cluster, cluster.nodes)
|
||||||
|
|
||||||
for fact in facts:
|
for fact in facts:
|
||||||
self.assertEquals(fact['vlan_interface'], 'eth0')
|
self.assertEquals(fact['vlan_interface'], 'eth0')
|
||||||
@ -227,7 +228,7 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
|||||||
|
|
||||||
self.db.add(new_ip_range)
|
self.db.add(new_ip_range)
|
||||||
self.db.commit()
|
self.db.commit()
|
||||||
facts = self.serializer.serialize(self.cluster)
|
facts = self.serializer.serialize(self.cluster, self.cluster.nodes)
|
||||||
|
|
||||||
for fact in facts:
|
for fact in facts:
|
||||||
self.assertEquals(
|
self.assertEquals(
|
||||||
@ -285,7 +286,7 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
|||||||
{'roles': ['cinder'], 'pending_addition': True}])
|
{'roles': ['cinder'], 'pending_addition': True}])
|
||||||
|
|
||||||
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
||||||
cluster_db.prepare_for_deployment()
|
TaskHelper.prepare_for_deployment(cluster_db.nodes)
|
||||||
return cluster_db
|
return cluster_db
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -345,81 +346,6 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
|||||||
{'point': '2', 'weight': '2'}])
|
{'point': '2', 'weight': '2'}])
|
||||||
|
|
||||||
|
|
||||||
class TestNovaOrchestratorHASerializerRedeploymentErrorNodes(
|
|
||||||
OrchestratorSerializerTestBase):
|
|
||||||
|
|
||||||
def create_env(self, nodes):
|
|
||||||
cluster = self.env.create(
|
|
||||||
cluster_kwargs={
|
|
||||||
'mode': 'ha_compact'},
|
|
||||||
nodes_kwargs=nodes)
|
|
||||||
|
|
||||||
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
|
||||||
cluster_db.prepare_for_deployment()
|
|
||||||
return cluster_db
|
|
||||||
|
|
||||||
@property
|
|
||||||
def serializer(self):
|
|
||||||
return DeploymentHASerializer
|
|
||||||
|
|
||||||
def filter_by_role(self, nodes, role):
|
|
||||||
return filter(lambda node: role in node.all_roles, nodes)
|
|
||||||
|
|
||||||
def test_redeploy_all_controller_if_single_controller_failed(self):
|
|
||||||
cluster = self.create_env([
|
|
||||||
{'roles': ['controller'], 'status': 'error'},
|
|
||||||
{'roles': ['controller']},
|
|
||||||
{'roles': ['controller', 'cinder']},
|
|
||||||
{'roles': ['compute', 'cinder']},
|
|
||||||
{'roles': ['compute']},
|
|
||||||
{'roles': ['cinder']}])
|
|
||||||
|
|
||||||
nodes = self.serializer.get_nodes_to_deployment(cluster)
|
|
||||||
self.assertEquals(len(nodes), 3)
|
|
||||||
|
|
||||||
controllers = self.filter_by_role(nodes, 'controller')
|
|
||||||
self.assertEquals(len(controllers), 3)
|
|
||||||
|
|
||||||
def test_redeploy_only_compute_cinder(self):
|
|
||||||
cluster = self.create_env([
|
|
||||||
{'roles': ['controller']},
|
|
||||||
{'roles': ['controller']},
|
|
||||||
{'roles': ['controller', 'cinder']},
|
|
||||||
{'roles': ['compute', 'cinder']},
|
|
||||||
{'roles': ['compute'], 'status': 'error'},
|
|
||||||
{'roles': ['cinder'], 'status': 'error'}])
|
|
||||||
|
|
||||||
nodes = self.serializer.get_nodes_to_deployment(cluster)
|
|
||||||
self.assertEquals(len(nodes), 2)
|
|
||||||
|
|
||||||
cinders = self.filter_by_role(nodes, 'cinder')
|
|
||||||
self.assertEquals(len(cinders), 1)
|
|
||||||
|
|
||||||
computes = self.filter_by_role(nodes, 'compute')
|
|
||||||
self.assertEquals(len(computes), 1)
|
|
||||||
|
|
||||||
def test_redeploy_all_controller_and_compute_cinder(self):
|
|
||||||
cluster = self.create_env([
|
|
||||||
{'roles': ['controller'], 'status': 'error'},
|
|
||||||
{'roles': ['controller']},
|
|
||||||
{'roles': ['controller', 'cinder']},
|
|
||||||
{'roles': ['compute', 'cinder']},
|
|
||||||
{'roles': ['compute'], 'status': 'error'},
|
|
||||||
{'roles': ['cinder'], 'status': 'error'}])
|
|
||||||
|
|
||||||
nodes = self.serializer.get_nodes_to_deployment(cluster)
|
|
||||||
self.assertEquals(len(nodes), 5)
|
|
||||||
|
|
||||||
controllers = self.filter_by_role(nodes, 'controller')
|
|
||||||
self.assertEquals(len(controllers), 3)
|
|
||||||
|
|
||||||
cinders = self.filter_by_role(nodes, 'cinder')
|
|
||||||
self.assertEquals(len(cinders), 2)
|
|
||||||
|
|
||||||
computes = self.filter_by_role(nodes, 'compute')
|
|
||||||
self.assertEquals(len(computes), 1)
|
|
||||||
|
|
||||||
|
|
||||||
class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@ -441,7 +367,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
|||||||
'pending_addition': True}])
|
'pending_addition': True}])
|
||||||
|
|
||||||
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
||||||
cluster_db.prepare_for_deployment()
|
TaskHelper.prepare_for_deployment(cluster_db.nodes)
|
||||||
return cluster_db
|
return cluster_db
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -470,7 +396,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
|||||||
def test_serialize_node(self):
|
def test_serialize_node(self):
|
||||||
node = self.env.create_node(
|
node = self.env.create_node(
|
||||||
api=True, cluster_id=self.cluster.id, pending_addition=True)
|
api=True, cluster_id=self.cluster.id, pending_addition=True)
|
||||||
self.cluster.prepare_for_deployment()
|
TaskHelper.prepare_for_deployment(self.cluster.nodes)
|
||||||
|
|
||||||
node_db = self.db.query(Node).get(node['id'])
|
node_db = self.db.query(Node).get(node['id'])
|
||||||
serialized_data = self.serializer.serialize_node(node_db, 'controller')
|
serialized_data = self.serializer.serialize_node(node_db, 'controller')
|
||||||
@ -547,7 +473,7 @@ class TestNeutronOrchestratorSerializer(OrchestratorSerializerTestBase):
|
|||||||
|
|
||||||
def test_gre_segmentation(self):
|
def test_gre_segmentation(self):
|
||||||
cluster = self.create_env('multinode', 'gre')
|
cluster = self.create_env('multinode', 'gre')
|
||||||
facts = self.serializer.serialize(cluster)
|
facts = self.serializer.serialize(cluster, cluster.nodes)
|
||||||
|
|
||||||
for fact in facts:
|
for fact in facts:
|
||||||
self.assertEquals(
|
self.assertEquals(
|
||||||
@ -582,7 +508,7 @@ class TestNeutronOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
||||||
cluster_db.prepare_for_deployment()
|
TaskHelper.prepare_for_deployment(cluster_db.nodes)
|
||||||
return cluster_db
|
return cluster_db
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -616,80 +542,3 @@ class TestNeutronOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
|||||||
attrs['mp'],
|
attrs['mp'],
|
||||||
[{'point': '1', 'weight': '1'},
|
[{'point': '1', 'weight': '1'},
|
||||||
{'point': '2', 'weight': '2'}])
|
{'point': '2', 'weight': '2'}])
|
||||||
|
|
||||||
|
|
||||||
class TestNeutronOrchestratorHASerializerRedeploymentErrorNodes(
|
|
||||||
OrchestratorSerializerTestBase):
|
|
||||||
|
|
||||||
def create_env(self, nodes):
|
|
||||||
cluster = self.env.create(
|
|
||||||
cluster_kwargs={
|
|
||||||
'mode': 'ha_compact',
|
|
||||||
'net_provider': 'neutron',
|
|
||||||
'net_segment_type': 'vlan'},
|
|
||||||
nodes_kwargs=nodes)
|
|
||||||
|
|
||||||
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
|
||||||
cluster_db.prepare_for_deployment()
|
|
||||||
return cluster_db
|
|
||||||
|
|
||||||
@property
|
|
||||||
def serializer(self):
|
|
||||||
return DeploymentHASerializer
|
|
||||||
|
|
||||||
def filter_by_role(self, nodes, role):
|
|
||||||
return filter(lambda node: role in node.all_roles, nodes)
|
|
||||||
|
|
||||||
def test_redeploy_all_controller_if_single_controller_failed(self):
|
|
||||||
cluster = self.create_env([
|
|
||||||
{'roles': ['controller'], 'status': 'error'},
|
|
||||||
{'roles': ['controller']},
|
|
||||||
{'roles': ['controller', 'cinder']},
|
|
||||||
{'roles': ['compute', 'cinder']},
|
|
||||||
{'roles': ['compute']},
|
|
||||||
{'roles': ['cinder']}])
|
|
||||||
|
|
||||||
nodes = self.serializer.get_nodes_to_deployment(cluster)
|
|
||||||
self.assertEquals(len(nodes), 3)
|
|
||||||
|
|
||||||
controllers = self.filter_by_role(nodes, 'controller')
|
|
||||||
self.assertEquals(len(controllers), 3)
|
|
||||||
|
|
||||||
def test_redeploy_only_compute_cinder(self):
|
|
||||||
cluster = self.create_env([
|
|
||||||
{'roles': ['controller']},
|
|
||||||
{'roles': ['controller']},
|
|
||||||
{'roles': ['controller', 'cinder']},
|
|
||||||
{'roles': ['compute', 'cinder']},
|
|
||||||
{'roles': ['compute'], 'status': 'error'},
|
|
||||||
{'roles': ['cinder'], 'status': 'error'}])
|
|
||||||
|
|
||||||
nodes = self.serializer.get_nodes_to_deployment(cluster)
|
|
||||||
self.assertEquals(len(nodes), 2)
|
|
||||||
|
|
||||||
cinders = self.filter_by_role(nodes, 'cinder')
|
|
||||||
self.assertEquals(len(cinders), 1)
|
|
||||||
|
|
||||||
computes = self.filter_by_role(nodes, 'compute')
|
|
||||||
self.assertEquals(len(computes), 1)
|
|
||||||
|
|
||||||
def test_redeploy_all_controller_and_compute_cinder(self):
|
|
||||||
cluster = self.create_env([
|
|
||||||
{'roles': ['controller'], 'status': 'error'},
|
|
||||||
{'roles': ['controller']},
|
|
||||||
{'roles': ['controller', 'cinder']},
|
|
||||||
{'roles': ['compute', 'cinder']},
|
|
||||||
{'roles': ['compute'], 'status': 'error'},
|
|
||||||
{'roles': ['cinder'], 'status': 'error'}])
|
|
||||||
|
|
||||||
nodes = self.serializer.get_nodes_to_deployment(cluster)
|
|
||||||
self.assertEquals(len(nodes), 5)
|
|
||||||
|
|
||||||
controllers = self.filter_by_role(nodes, 'controller')
|
|
||||||
self.assertEquals(len(controllers), 3)
|
|
||||||
|
|
||||||
cinders = self.filter_by_role(nodes, 'cinder')
|
|
||||||
self.assertEquals(len(cinders), 2)
|
|
||||||
|
|
||||||
computes = self.filter_by_role(nodes, 'compute')
|
|
||||||
self.assertEquals(len(computes), 1)
|
|
||||||
|
@ -37,7 +37,7 @@ class TestProvisioningSerializer(BaseIntegrationTest):
|
|||||||
{'roles': ['compute'], 'pending_addition': True}])
|
{'roles': ['compute'], 'pending_addition': True}])
|
||||||
|
|
||||||
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
||||||
serialized_cluster = serialize(cluster_db)
|
serialized_cluster = serialize(cluster_db, cluster_db.nodes)
|
||||||
|
|
||||||
for node in serialized_cluster['nodes']:
|
for node in serialized_cluster['nodes']:
|
||||||
node_db = db().query(Node).filter_by(fqdn=node['hostname']).first()
|
node_db = db().query(Node).filter_by(fqdn=node['hostname']).first()
|
||||||
|
@ -14,21 +14,21 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
import nailgun
|
||||||
|
import nailgun.rpc as rpc
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from mock import patch
|
from mock import patch
|
||||||
|
|
||||||
from nailgun.settings import settings
|
|
||||||
|
|
||||||
import nailgun
|
|
||||||
from nailgun.api.models import Cluster
|
from nailgun.api.models import Cluster
|
||||||
from nailgun.api.models import Node
|
from nailgun.api.models import Node
|
||||||
from nailgun.api.models import Notification
|
from nailgun.api.models import Notification
|
||||||
from nailgun.api.models import Task
|
from nailgun.api.models import Task
|
||||||
from nailgun.errors import errors
|
from nailgun.errors import errors
|
||||||
import nailgun.rpc as rpc
|
from nailgun.settings import settings
|
||||||
from nailgun.task.manager import DeploymentTaskManager
|
from nailgun.task.helpers import TaskHelper
|
||||||
|
from nailgun.task.manager import ApplyChangesTaskManager
|
||||||
from nailgun.test.base import BaseIntegrationTest
|
from nailgun.test.base import BaseIntegrationTest
|
||||||
from nailgun.test.base import fake_tasks
|
from nailgun.test.base import fake_tasks
|
||||||
from nailgun.test.base import reverse
|
from nailgun.test.base import reverse
|
||||||
@ -102,7 +102,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
|||||||
{"pending_addition": True, 'roles': ['compute']}])
|
{"pending_addition": True, 'roles': ['compute']}])
|
||||||
cluster_db = self.env.clusters[0]
|
cluster_db = self.env.clusters[0]
|
||||||
# Generate ips, fqdns
|
# Generate ips, fqdns
|
||||||
cluster_db.prepare_for_deployment()
|
TaskHelper.prepare_for_deployment(cluster_db.nodes)
|
||||||
# First node with status ready
|
# First node with status ready
|
||||||
# should not be readeployed
|
# should not be readeployed
|
||||||
self.env.nodes[0].status = 'ready'
|
self.env.nodes[0].status = 'ready'
|
||||||
@ -127,23 +127,26 @@ class TestTaskManagers(BaseIntegrationTest):
|
|||||||
@fake_tasks()
|
@fake_tasks()
|
||||||
def test_deployment_fails_if_node_offline(self):
|
def test_deployment_fails_if_node_offline(self):
|
||||||
cluster = self.env.create_cluster(api=True)
|
cluster = self.env.create_cluster(api=True)
|
||||||
self.env.create_node(cluster_id=cluster['id'],
|
self.env.create_node(
|
||||||
|
cluster_id=cluster['id'],
|
||||||
roles=["controller"],
|
roles=["controller"],
|
||||||
pending_addition=True)
|
pending_addition=True)
|
||||||
self.env.create_node(cluster_id=cluster['id'],
|
offline_node = self.env.create_node(
|
||||||
|
cluster_id=cluster['id'],
|
||||||
roles=["compute"],
|
roles=["compute"],
|
||||||
online=False,
|
online=False,
|
||||||
name="Offline node",
|
name="Offline node",
|
||||||
pending_addition=True)
|
pending_addition=True)
|
||||||
self.env.create_node(cluster_id=cluster['id'],
|
self.env.create_node(
|
||||||
|
cluster_id=cluster['id'],
|
||||||
roles=["compute"],
|
roles=["compute"],
|
||||||
pending_addition=True)
|
pending_addition=True)
|
||||||
supertask = self.env.launch_deployment()
|
supertask = self.env.launch_deployment()
|
||||||
self.env.wait_error(
|
self.env.wait_error(
|
||||||
supertask,
|
supertask,
|
||||||
60,
|
60,
|
||||||
u"Deployment has failed. Check these nodes:\n"
|
'Nodes "{0}" are offline. Remove them from environment '
|
||||||
"'Offline node'"
|
'and try again.'.format(offline_node.full_name)
|
||||||
)
|
)
|
||||||
|
|
||||||
@fake_tasks()
|
@fake_tasks()
|
||||||
@ -405,7 +408,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
|||||||
def test_no_node_no_cry(self):
|
def test_no_node_no_cry(self):
|
||||||
cluster = self.env.create_cluster(api=True)
|
cluster = self.env.create_cluster(api=True)
|
||||||
cluster_id = cluster['id']
|
cluster_id = cluster['id']
|
||||||
manager = DeploymentTaskManager(cluster_id)
|
manager = ApplyChangesTaskManager(cluster_id)
|
||||||
task = Task(name='provision', cluster_id=cluster_id)
|
task = Task(name='provision', cluster_id=cluster_id)
|
||||||
self.db.add(task)
|
self.db.add(task)
|
||||||
self.db.commit()
|
self.db.commit()
|
||||||
@ -424,7 +427,7 @@ class TestTaskManagers(BaseIntegrationTest):
|
|||||||
)
|
)
|
||||||
cluster_db = self.env.clusters[0]
|
cluster_db = self.env.clusters[0]
|
||||||
cluster_db.clear_pending_changes()
|
cluster_db.clear_pending_changes()
|
||||||
manager = DeploymentTaskManager(cluster_db.id)
|
manager = ApplyChangesTaskManager(cluster_db.id)
|
||||||
self.assertRaises(errors.WrongNodeStatus, manager.execute)
|
self.assertRaises(errors.WrongNodeStatus, manager.execute)
|
||||||
|
|
||||||
@fake_tasks()
|
@fake_tasks()
|
||||||
|
@ -14,15 +14,11 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import gzip
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
from StringIO import StringIO
|
|
||||||
import tarfile
|
|
||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
import unittest
|
|
||||||
|
|
||||||
from mock import Mock
|
from mock import Mock
|
||||||
from mock import patch
|
from mock import patch
|
||||||
@ -357,69 +353,6 @@ class TestLogs(BaseIntegrationTest):
|
|||||||
tm_patcher.stop()
|
tm_patcher.stop()
|
||||||
self.assertEquals(resp.status, 400)
|
self.assertEquals(resp.status, 400)
|
||||||
|
|
||||||
@unittest.skip("will be partly moved to shotgun")
|
|
||||||
def test_log_package_handler_old(self):
|
|
||||||
f = tempfile.NamedTemporaryFile(mode='r+b')
|
|
||||||
f.write('testcontent')
|
|
||||||
f.flush()
|
|
||||||
settings.LOGS_TO_PACK_FOR_SUPPORT = {'test': f.name}
|
|
||||||
resp = self.app.get(reverse('LogPackageHandler'))
|
|
||||||
self.assertEquals(200, resp.status)
|
|
||||||
tf = tarfile.open(fileobj=StringIO(resp.body), mode='r:gz')
|
|
||||||
m = tf.extractfile('test')
|
|
||||||
self.assertEquals(m.read(), 'testcontent')
|
|
||||||
f.close()
|
|
||||||
m.close()
|
|
||||||
|
|
||||||
@unittest.skip("will be partly moved to shotgun")
|
|
||||||
def test_log_package_handler_sensitive(self):
|
|
||||||
account = RedHatAccount()
|
|
||||||
account.username = "REDHATUSERNAME"
|
|
||||||
account.password = "REDHATPASSWORD"
|
|
||||||
account.license_type = "rhsm"
|
|
||||||
self.db.add(account)
|
|
||||||
self.db.commit()
|
|
||||||
|
|
||||||
f = tempfile.NamedTemporaryFile(mode='r+b')
|
|
||||||
f.write('begin\nREDHATUSERNAME\nREDHATPASSWORD\nend')
|
|
||||||
f.flush()
|
|
||||||
settings.LOGS_TO_PACK_FOR_SUPPORT = {'test': f.name}
|
|
||||||
resp = self.app.get(reverse('LogPackageHandler'))
|
|
||||||
self.assertEquals(200, resp.status)
|
|
||||||
tf = tarfile.open(fileobj=StringIO(resp.body), mode='r:gz')
|
|
||||||
m = tf.extractfile('test')
|
|
||||||
self.assertEquals(m.read(), 'begin\nusername\npassword\nend')
|
|
||||||
f.close()
|
|
||||||
m.close()
|
|
||||||
|
|
||||||
@unittest.skip("will be partly moved to shotgun")
|
|
||||||
def test_log_package_handler_sensitive_gz(self):
|
|
||||||
account = RedHatAccount()
|
|
||||||
account.username = "REDHATUSERNAME"
|
|
||||||
account.password = "REDHATPASSWORD"
|
|
||||||
account.license_type = "rhsm"
|
|
||||||
self.db.add(account)
|
|
||||||
self.db.commit()
|
|
||||||
|
|
||||||
f = tempfile.NamedTemporaryFile(mode='r+b', suffix='.gz')
|
|
||||||
fgz = gzip.GzipFile(mode='w+b', fileobj=f)
|
|
||||||
fgz.write('begin\nREDHATUSERNAME\nREDHATPASSWORD\nend')
|
|
||||||
fgz.flush()
|
|
||||||
fgz.close()
|
|
||||||
|
|
||||||
settings.LOGS_TO_PACK_FOR_SUPPORT = {'test.gz': f.name}
|
|
||||||
resp = self.app.get(reverse('LogPackageHandler'))
|
|
||||||
self.assertEquals(200, resp.status)
|
|
||||||
tf = tarfile.open(fileobj=StringIO(resp.body), mode='r:gz')
|
|
||||||
|
|
||||||
m = tf.extractfile('test.gz')
|
|
||||||
mgz = gzip.GzipFile(mode='r+b', fileobj=m)
|
|
||||||
self.assertEquals(mgz.read(), 'begin\nusername\npassword\nend')
|
|
||||||
mgz.close()
|
|
||||||
|
|
||||||
f.close()
|
|
||||||
m.close()
|
|
||||||
|
|
||||||
def test_log_entry_collection_handler_sensitive(self):
|
def test_log_entry_collection_handler_sensitive(self):
|
||||||
account = RedHatAccount()
|
account = RedHatAccount()
|
||||||
account.username = "REDHATUSERNAME"
|
account.username = "REDHATUSERNAME"
|
||||||
|
96
nailgun/nailgun/test/unit/test_task_helpers.py
Normal file
96
nailgun/nailgun/test/unit/test_task_helpers.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2013 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
from nailgun.api.models import Cluster
|
||||||
|
from nailgun.orchestrator.deployment_serializers \
|
||||||
|
import DeploymentHASerializer
|
||||||
|
from nailgun.task.helpers import TaskHelper
|
||||||
|
from nailgun.test.base import BaseUnitTest
|
||||||
|
|
||||||
|
|
||||||
|
class TestTaskHelpersNodesSelectionInCaseOfFailedNodes(BaseUnitTest):
|
||||||
|
|
||||||
|
def create_env(self, nodes):
|
||||||
|
cluster = self.env.create(
|
||||||
|
cluster_kwargs={
|
||||||
|
'mode': 'ha_compact'},
|
||||||
|
nodes_kwargs=nodes)
|
||||||
|
|
||||||
|
cluster_db = self.db.query(Cluster).get(cluster['id'])
|
||||||
|
TaskHelper.prepare_for_deployment(cluster_db.nodes)
|
||||||
|
return cluster_db
|
||||||
|
|
||||||
|
@property
|
||||||
|
def serializer(self):
|
||||||
|
return DeploymentHASerializer
|
||||||
|
|
||||||
|
def filter_by_role(self, nodes, role):
|
||||||
|
return filter(lambda node: role in node.all_roles, nodes)
|
||||||
|
|
||||||
|
def test_redeploy_all_controller_if_single_controller_failed(self):
|
||||||
|
cluster = self.create_env([
|
||||||
|
{'roles': ['controller'], 'status': 'error'},
|
||||||
|
{'roles': ['controller']},
|
||||||
|
{'roles': ['controller', 'cinder']},
|
||||||
|
{'roles': ['compute', 'cinder']},
|
||||||
|
{'roles': ['compute']},
|
||||||
|
{'roles': ['cinder']}])
|
||||||
|
|
||||||
|
nodes = TaskHelper.nodes_to_deploy(cluster)
|
||||||
|
self.assertEquals(len(nodes), 3)
|
||||||
|
|
||||||
|
controllers = self.filter_by_role(nodes, 'controller')
|
||||||
|
self.assertEquals(len(controllers), 3)
|
||||||
|
|
||||||
|
def test_redeploy_only_compute_cinder(self):
|
||||||
|
cluster = self.create_env([
|
||||||
|
{'roles': ['controller']},
|
||||||
|
{'roles': ['controller']},
|
||||||
|
{'roles': ['controller', 'cinder']},
|
||||||
|
{'roles': ['compute', 'cinder']},
|
||||||
|
{'roles': ['compute'], 'status': 'error'},
|
||||||
|
{'roles': ['cinder'], 'status': 'error'}])
|
||||||
|
|
||||||
|
nodes = TaskHelper.nodes_to_deploy(cluster)
|
||||||
|
self.assertEquals(len(nodes), 2)
|
||||||
|
|
||||||
|
cinders = self.filter_by_role(nodes, 'cinder')
|
||||||
|
self.assertEquals(len(cinders), 1)
|
||||||
|
|
||||||
|
computes = self.filter_by_role(nodes, 'compute')
|
||||||
|
self.assertEquals(len(computes), 1)
|
||||||
|
|
||||||
|
def test_redeploy_all_controller_and_compute_cinder(self):
|
||||||
|
cluster = self.create_env([
|
||||||
|
{'roles': ['controller'], 'status': 'error'},
|
||||||
|
{'roles': ['controller']},
|
||||||
|
{'roles': ['controller', 'cinder']},
|
||||||
|
{'roles': ['compute', 'cinder']},
|
||||||
|
{'roles': ['compute'], 'status': 'error'},
|
||||||
|
{'roles': ['cinder'], 'status': 'error'}])
|
||||||
|
|
||||||
|
nodes = TaskHelper.nodes_to_deploy(cluster)
|
||||||
|
self.assertEquals(len(nodes), 5)
|
||||||
|
|
||||||
|
controllers = self.filter_by_role(nodes, 'controller')
|
||||||
|
self.assertEquals(len(controllers), 3)
|
||||||
|
|
||||||
|
cinders = self.filter_by_role(nodes, 'cinder')
|
||||||
|
self.assertEquals(len(cinders), 2)
|
||||||
|
|
||||||
|
computes = self.filter_by_role(nodes, 'compute')
|
||||||
|
self.assertEquals(len(computes), 1)
|
@ -84,14 +84,15 @@ module Naily
|
|||||||
Naily.logger.error "Error running provisioning: #{e.message}, trace: #{e.backtrace.inspect}"
|
Naily.logger.error "Error running provisioning: #{e.message}, trace: #{e.backtrace.inspect}"
|
||||||
raise StopIteration
|
raise StopIteration
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@orchestrator.watch_provision_progress(
|
||||||
|
reporter, data['args']['task_uuid'], data['args']['provisioning_info']['nodes'])
|
||||||
end
|
end
|
||||||
|
|
||||||
def deploy(data)
|
def deploy(data)
|
||||||
Naily.logger.info("'deploy' method called with data: #{data.inspect}")
|
Naily.logger.info("'deploy' method called with data: #{data.inspect}")
|
||||||
|
|
||||||
reporter = Naily::Reporter.new(@producer, data['respond_to'], data['args']['task_uuid'])
|
reporter = Naily::Reporter.new(@producer, data['respond_to'], data['args']['task_uuid'])
|
||||||
@orchestrator.watch_provision_progress(reporter, data['args']['task_uuid'], data['args']['deployment_info'])
|
|
||||||
|
|
||||||
begin
|
begin
|
||||||
@orchestrator.deploy(reporter, data['args']['task_uuid'], data['args']['deployment_info'])
|
@orchestrator.deploy(reporter, data['args']['task_uuid'], data['args']['deployment_info'])
|
||||||
reporter.report('status' => 'ready', 'progress' => 100)
|
reporter.report('status' => 'ready', 'progress' => 100)
|
||||||
|
Loading…
Reference in New Issue
Block a user