Merge "Graph based task serializer for deployment"
This commit is contained in:
commit
fa84b696a4
@ -225,3 +225,15 @@ OPENSTACK_IMAGES_SETTINGS = Enum(
|
||||
"size_unit"
|
||||
)
|
||||
)
|
||||
|
||||
DEPLOY_STRATEGY = Enum(
|
||||
'parallel',
|
||||
'one_by_one'
|
||||
)
|
||||
|
||||
ORCHESTRATOR_TASK_TYPES = Enum(
|
||||
'puppet',
|
||||
'shell',
|
||||
'sync',
|
||||
'upload_file'
|
||||
)
|
||||
|
232
nailgun/nailgun/orchestrator/deployment_graph.py
Normal file
232
nailgun/nailgun/orchestrator/deployment_graph.py
Normal file
@ -0,0 +1,232 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
import networkx as nx
|
||||
import yaml
|
||||
|
||||
from nailgun import consts
|
||||
from nailgun.errors import errors
|
||||
from nailgun.orchestrator import graph_configuration
|
||||
from nailgun.orchestrator import priority_serializers as ps
|
||||
from nailgun.orchestrator import tasks_templates as templates
|
||||
from nailgun.utils import extract_env_version
|
||||
|
||||
|
||||
class DeploymentGraph(nx.DiGraph):
|
||||
"""DirectedGraph that is used to generate configuration for speficific
|
||||
orchestrators.
|
||||
|
||||
In case of astute - we are working with priorities
|
||||
In - mistral - we will serialize workbook from this graph
|
||||
|
||||
General task format
|
||||
|
||||
id: string
|
||||
type: string - one of - role, stage, puppet, shell, upload_file, sync
|
||||
required_for: direct dependencies
|
||||
requires: reverse dependencies
|
||||
role: direct dependencies
|
||||
stage: direct dependency
|
||||
parameters: specific for each task type parameters
|
||||
"""
|
||||
|
||||
def add_tasks(self, tasks):
|
||||
for task in tasks:
|
||||
self.add_task(task)
|
||||
|
||||
def add_task(self, task):
|
||||
self.add_node(task['id'], **task)
|
||||
for req in task.get('required_for', ()):
|
||||
self.add_edge(task['id'], req)
|
||||
for req in task.get('requires', ()):
|
||||
self.add_edge(req, task['id'])
|
||||
for req in task.get('role', ()):
|
||||
self.add_edge(task['id'], req)
|
||||
if 'stage' in task:
|
||||
self.add_edge(task['id'], task['stage'])
|
||||
|
||||
def group_nodes_by_roles(self, nodes, roles):
|
||||
"""Group nodes by roles
|
||||
|
||||
:param nodes: list of node db object
|
||||
:param roles: list of roles names
|
||||
:returns: dict of {role_name: nodes_list} pairs
|
||||
"""
|
||||
res = defaultdict(list)
|
||||
for node in nodes:
|
||||
if node['role'] in roles:
|
||||
res[node['role']].append(node)
|
||||
return res
|
||||
|
||||
def assign_parallel_nodes(self, priority, nodes):
|
||||
"""It is possible that same node have 2 or more roles that can be
|
||||
deployed in parallel. We can not allow it. That is why priorities
|
||||
will be assigned in chunks
|
||||
|
||||
:params priority: PriorityStrategy instance
|
||||
:params nodes: list of serialized nodes (dicts)
|
||||
"""
|
||||
current_nodes = nodes
|
||||
while current_nodes:
|
||||
next_nodes = []
|
||||
group = []
|
||||
added_uids = []
|
||||
for node in current_nodes:
|
||||
if 'uid' not in node or 'role' not in node:
|
||||
raise errors.InvalidSerializedNode(
|
||||
'uid and role are mandatory fields. Node: {0}'.format(
|
||||
node))
|
||||
if node['uid'] not in added_uids:
|
||||
group.append(node)
|
||||
added_uids.append(node['uid'])
|
||||
else:
|
||||
next_nodes.append(node)
|
||||
priority.in_parallel(group)
|
||||
current_nodes = next_nodes
|
||||
|
||||
def process_parallel_nodes(self, priority, parallel_roles, grouped_nodes):
|
||||
"""Process both types of parallel deployment nodes
|
||||
|
||||
:param priority: PriorityStrategy instance
|
||||
:param parallel_roles: list of dict object
|
||||
:param grouped_nodes: dict with {role: nodes} mapping
|
||||
"""
|
||||
parallel_nodes = []
|
||||
for role in parallel_roles:
|
||||
if 'amount' in role['parameters']['strategy']:
|
||||
priority.in_parallel_by(
|
||||
grouped_nodes[role['id']],
|
||||
role['parameters']['strategy']['amount'])
|
||||
else:
|
||||
parallel_nodes.extend(grouped_nodes[role['id']])
|
||||
if parallel_nodes:
|
||||
#check assign_parallel_nodes docstring for explanation
|
||||
self.assign_parallel_nodes(priority, parallel_nodes)
|
||||
|
||||
def add_priorities(self, nodes):
|
||||
"""Add priorities and tasks for all nodes
|
||||
|
||||
:param nodes: list of node db object
|
||||
"""
|
||||
priority = ps.PriorityStrategy()
|
||||
roles_subgraph = self.get_roles_subgraph()
|
||||
current_roles = roles_subgraph.get_root_roles()
|
||||
# get list with names ['controller', 'compute', 'cinder']
|
||||
all_roles = roles_subgraph.nodes()
|
||||
grouped_nodes = self.group_nodes_by_roles(nodes, all_roles)
|
||||
#if there is no nodes with some roles - mark them as success roles
|
||||
processed_roles = set(all_roles) - set(grouped_nodes.keys())
|
||||
|
||||
while current_roles:
|
||||
one_by_one = []
|
||||
parallel = []
|
||||
|
||||
for r in current_roles:
|
||||
role = self.node[r]
|
||||
if (role['parameters']['strategy']['type']
|
||||
== consts.DEPLOY_STRATEGY.one_by_one):
|
||||
one_by_one.append(role)
|
||||
elif (role['parameters']['strategy']['type']
|
||||
== consts.DEPLOY_STRATEGY.parallel):
|
||||
parallel.append(role)
|
||||
|
||||
for role in one_by_one:
|
||||
priority.one_by_one(grouped_nodes[role['id']])
|
||||
|
||||
self.process_parallel_nodes(priority, parallel, grouped_nodes)
|
||||
|
||||
processed_roles.update(current_roles)
|
||||
current_roles = roles_subgraph.get_next_roles(processed_roles)
|
||||
|
||||
def get_root_roles(self):
|
||||
"""Return roles that doesnt have predecessors
|
||||
|
||||
:returns: list of roles names
|
||||
"""
|
||||
result = []
|
||||
for node in self.nodes():
|
||||
if not self.predecessors(node):
|
||||
result.append(node)
|
||||
return result
|
||||
|
||||
def get_next_roles(self, success_roles):
|
||||
"""Get roles that have predecessors in success_roles list
|
||||
|
||||
:param success_roles: list of roles names
|
||||
:returns: list of roles names
|
||||
"""
|
||||
result = []
|
||||
for role in self.nodes():
|
||||
if (set(self.predecessors(role)) <= success_roles
|
||||
and role not in success_roles):
|
||||
result.append(role)
|
||||
return result
|
||||
|
||||
def get_roles_subgraph(self):
|
||||
roles = [t['id'] for t in self.node.values() if t['type'] == 'role']
|
||||
return self.subgraph(roles)
|
||||
|
||||
def get_tasks_for_role(self, role_name):
|
||||
tasks = []
|
||||
for task in self.predecessors(role_name):
|
||||
if self.node[task]['type'] not in ('role', 'stage'):
|
||||
tasks.append(task)
|
||||
return self.subgraph(tasks)
|
||||
|
||||
def serialize_tasks(self, node):
|
||||
"""Serialize tasks with necessary for orchestrator attributes
|
||||
|
||||
:param node: dict with serialized node
|
||||
"""
|
||||
tasks = self.get_tasks_for_role(node['role']).topology
|
||||
serialized = []
|
||||
priority = ps.Priority()
|
||||
for task in tasks:
|
||||
if task['type'] == consts.ORCHESTRATOR_TASK_TYPES.puppet:
|
||||
item = templates.make_puppet_task(
|
||||
[node['uid']],
|
||||
task)
|
||||
elif task['type'] == consts.ORCHESTRATOR_TASK_TYPES.shell:
|
||||
item = templates.make_shell_task(
|
||||
[node['uid']],
|
||||
task)
|
||||
item['priority'] = priority.next()
|
||||
serialized.append(item)
|
||||
return serialized
|
||||
|
||||
@property
|
||||
def topology(self):
|
||||
return map(lambda t: self.node[t], nx.topological_sort(self))
|
||||
|
||||
|
||||
def create_graph(cluster):
|
||||
"""Creates graph with dependences between roles and tasks.
|
||||
|
||||
:param cluster: DB Cluster object
|
||||
:returns: DeploymentGraph instance
|
||||
"""
|
||||
env_version = extract_env_version(cluster.release.version)
|
||||
if env_version.startswith('6.0') or env_version.startswith('5.1'):
|
||||
tasks = graph_configuration.DEPLOYMENT_CURRENT
|
||||
elif env_version.startswith('5.0'):
|
||||
tasks = graph_configuration.DEPLOYMENT_50
|
||||
if cluster.pending_release_id:
|
||||
tasks = graph_configuration.PATCHING
|
||||
graph = DeploymentGraph()
|
||||
graph.add_tasks(yaml.load(tasks))
|
||||
return graph
|
@ -36,7 +36,7 @@ from nailgun.db.sqlalchemy.models import Node
|
||||
from nailgun.errors import errors
|
||||
from nailgun.logger import logger
|
||||
from nailgun.objects import Cluster
|
||||
from nailgun.orchestrator import priority_serializers as ps
|
||||
from nailgun.orchestrator.deployment_graph import create_graph
|
||||
from nailgun.settings import settings
|
||||
from nailgun.utils import dict_merge
|
||||
from nailgun.utils import extract_env_version
|
||||
@ -797,16 +797,26 @@ class NeutronNetworkDeploymentSerializer60(
|
||||
return attrs
|
||||
|
||||
|
||||
class DeploymentMultinodeSerializer(object):
|
||||
class GraphBasedSerializer(object):
|
||||
|
||||
def __init__(self, graph):
|
||||
self.graph = graph
|
||||
|
||||
def set_deployment_priorities(self, nodes):
|
||||
self.graph.add_priorities(nodes)
|
||||
|
||||
def set_tasks(self, nodes):
|
||||
for node in nodes:
|
||||
node['tasks'] = self.graph.serialize_tasks(node)
|
||||
|
||||
|
||||
class DeploymentMultinodeSerializer(GraphBasedSerializer):
|
||||
|
||||
nova_network_serializer = NovaNetworkDeploymentSerializer
|
||||
neutron_network_serializer = NeutronNetworkDeploymentSerializer
|
||||
|
||||
critical_roles = ['controller', 'ceph-osd', 'primary-mongo']
|
||||
|
||||
def __init__(self, priority_serializer):
|
||||
self.priority = priority_serializer
|
||||
|
||||
def serialize(self, cluster, nodes, ignore_customized=False):
|
||||
"""Method generates facts which
|
||||
through an orchestrator passes to puppet
|
||||
@ -828,7 +838,7 @@ class DeploymentMultinodeSerializer(object):
|
||||
|
||||
self.set_deployment_priorities(nodes)
|
||||
self.set_critical_nodes(nodes)
|
||||
|
||||
self.set_tasks(nodes)
|
||||
return [dict_merge(node, common_attrs) for node in nodes]
|
||||
|
||||
def serialize_customized(self, cluster, nodes):
|
||||
@ -934,10 +944,6 @@ class DeploymentMultinodeSerializer(object):
|
||||
def not_roles(self, nodes, roles):
|
||||
return filter(lambda node: node['role'] not in roles, nodes)
|
||||
|
||||
def set_deployment_priorities(self, nodes):
|
||||
"""Set priorities of deployment."""
|
||||
self.priority.set_deployment_priorities(nodes)
|
||||
|
||||
def set_critical_nodes(self, nodes):
|
||||
"""Set behavior on nodes deployment error
|
||||
during deployment process.
|
||||
@ -1156,53 +1162,26 @@ def create_serializer(cluster):
|
||||
:param cluster: a cluster to process
|
||||
:returns: a serializer for a given cluster
|
||||
"""
|
||||
# env-version serializer map
|
||||
serializers_map = {
|
||||
'5.0': {
|
||||
'multinode': (
|
||||
DeploymentMultinodeSerializer,
|
||||
ps.PriorityMultinodeSerializer50,
|
||||
),
|
||||
'ha': (
|
||||
DeploymentHASerializer,
|
||||
ps.PriorityHASerializer50,
|
||||
),
|
||||
'multinode': DeploymentMultinodeSerializer,
|
||||
'ha': DeploymentHASerializer,
|
||||
},
|
||||
'5.1': {
|
||||
'multinode': (
|
||||
DeploymentMultinodeSerializer51,
|
||||
ps.PriorityMultinodeSerializer51,
|
||||
),
|
||||
'ha': (
|
||||
DeploymentHASerializer51,
|
||||
ps.PriorityHASerializer51,
|
||||
),
|
||||
'multinode': DeploymentMultinodeSerializer51,
|
||||
'ha': DeploymentHASerializer51,
|
||||
},
|
||||
'6.0': {
|
||||
'multinode': (
|
||||
DeploymentMultinodeSerializer60,
|
||||
ps.PriorityMultinodeSerializer60,
|
||||
),
|
||||
'ha': (
|
||||
DeploymentHASerializer60,
|
||||
ps.PriorityHASerializer60,
|
||||
),
|
||||
'multinode': DeploymentMultinodeSerializer60,
|
||||
'ha': DeploymentHASerializer60,
|
||||
},
|
||||
}
|
||||
|
||||
env_version = extract_env_version(cluster.release.version)
|
||||
env_mode = 'ha' if cluster.is_ha_mode else 'multinode'
|
||||
|
||||
# choose serializer
|
||||
for version, serializers in six.iteritems(serializers_map):
|
||||
if env_version.startswith(version):
|
||||
serializer, priority = serializers[env_mode]
|
||||
if cluster.pending_release_id:
|
||||
priority = {
|
||||
'ha': ps.PriorityHASerializerPatching,
|
||||
'multinode': ps.PriorityMultinodeSerializerPatching,
|
||||
}.get(env_mode)
|
||||
return serializer(priority())
|
||||
return serializers[env_mode](create_graph(cluster))
|
||||
|
||||
raise errors.UnsupportedSerializer()
|
||||
|
||||
|
225
nailgun/nailgun/orchestrator/graph_configuration.py
Normal file
225
nailgun/nailgun/orchestrator/graph_configuration.py
Normal file
@ -0,0 +1,225 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
#(dshulyak) temporary, this config will be moved to fuel-library
|
||||
#until we will stabilize our api
|
||||
DEPLOYMENT_CURRENT = """
|
||||
- id: deploy
|
||||
type: stage
|
||||
- id: primary-controller
|
||||
type: role
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: controller
|
||||
type: role
|
||||
requires: [primary-controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
amount: 6
|
||||
- id: cinder
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: compute
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: zabbix-server
|
||||
type: role
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: mongo
|
||||
type: role
|
||||
requires: [zabbix-server]
|
||||
required_for: [deploy, primary-controller, controller]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: primary-mongo
|
||||
type: role
|
||||
requires: [mongo]
|
||||
required_for: [deploy, primary-controller, controller]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: ceph-osd
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: deploy_legacy
|
||||
type: puppet
|
||||
role: [primary-controller, controller,
|
||||
cinder, compute, ceph-osd,
|
||||
zabbix-server, primary-mongo, mongo]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/manifests/site.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
"""
|
||||
|
||||
DEPLOYMENT_50 = """
|
||||
- id: deploy
|
||||
type: stage
|
||||
- id: primary-controller
|
||||
type: role
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: controller
|
||||
type: role
|
||||
requires: [primary-controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: cinder
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: compute
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: zabbix-server
|
||||
type: role
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: mongo
|
||||
type: role
|
||||
requires: [zabbix-server]
|
||||
required_for: [deploy, primary-controller, controller]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: primary-mongo
|
||||
type: role
|
||||
requires: [mongo]
|
||||
required_for: [deploy, primary-controller, controller]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: ceph-osd
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: deploy_legacy
|
||||
type: puppet
|
||||
role: [primary-controller, controller,
|
||||
cinder, compute, ceph-osd,
|
||||
zabbix-server, primary-mongo, mongo]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/manifests/site.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
"""
|
||||
|
||||
PATCHING = """
|
||||
- id: deploy
|
||||
type: stage
|
||||
- id: primary-controller
|
||||
type: role
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: controller
|
||||
type: role
|
||||
requires: [primary-controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: cinder
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: compute
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: zabbix-server
|
||||
type: role
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: mongo
|
||||
type: role
|
||||
requires: [zabbix-server]
|
||||
required_for: [deploy, primary-controller, controller]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: primary-mongo
|
||||
type: role
|
||||
requires: [mongo]
|
||||
required_for: [deploy, primary-controller, controller]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: ceph-osd
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: deploy_legacy
|
||||
type: puppet
|
||||
role: [primary-controller, controller,
|
||||
cinder, compute, ceph-osd,
|
||||
zabbix-server, primary-mongo, mongo]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/manifests/site.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
"""
|
@ -21,74 +21,10 @@ from nailgun import consts
|
||||
from nailgun.errors import errors
|
||||
from nailgun.logger import logger
|
||||
from nailgun.orchestrator.priority_serializers import PriorityStrategy
|
||||
import nailgun.orchestrator.tasks_templates as templates
|
||||
from nailgun.plugins.manager import PluginManager
|
||||
|
||||
|
||||
def make_repo_task(uids, repo_data, repo_path):
|
||||
return {
|
||||
'type': 'upload_file',
|
||||
'uids': uids,
|
||||
'parameters': {
|
||||
'path': repo_path,
|
||||
'data': repo_data}}
|
||||
|
||||
|
||||
def make_ubuntu_repo_task(plugin_name, repo_url, uids):
|
||||
repo_data = 'deb {0} /'.format(repo_url)
|
||||
repo_path = '/etc/apt/sources.list.d/{0}.list'.format(plugin_name)
|
||||
|
||||
return make_repo_task(uids, repo_data, repo_path)
|
||||
|
||||
|
||||
def make_centos_repo_task(plugin_name, repo_url, uids):
|
||||
repo_data = '\n'.join([
|
||||
'[{0}]',
|
||||
'name=Plugin {0} repository',
|
||||
'baseurl={1}',
|
||||
'gpgcheck=0']).format(plugin_name, repo_url)
|
||||
repo_path = '/etc/yum.repos.d/{0}.repo'.format(plugin_name)
|
||||
|
||||
return make_repo_task(uids, repo_data, repo_path)
|
||||
|
||||
|
||||
def make_sync_scripts_task(uids, src, dst):
|
||||
return {
|
||||
'type': 'sync',
|
||||
'uids': uids,
|
||||
'parameters': {
|
||||
'src': src,
|
||||
'dst': dst}}
|
||||
|
||||
|
||||
def make_shell_task(uids, task, cwd):
|
||||
return {
|
||||
'type': 'shell',
|
||||
'uids': uids,
|
||||
'parameters': {
|
||||
'cmd': task['parameters']['cmd'],
|
||||
'timeout': task['parameters']['timeout'],
|
||||
'cwd': cwd}}
|
||||
|
||||
|
||||
def make_apt_update_task(uids):
|
||||
task = {
|
||||
'parameters': {
|
||||
'cmd': 'apt-get update',
|
||||
'timeout': 180}}
|
||||
return make_shell_task(uids, task, '/')
|
||||
|
||||
|
||||
def make_puppet_task(uids, task, cwd):
|
||||
return {
|
||||
'type': 'puppet',
|
||||
'uids': uids,
|
||||
'parameters': {
|
||||
'puppet_manifest': task['parameters']['puppet_manifest'],
|
||||
'puppet_modules': task['parameters']['puppet_modules'],
|
||||
'timeout': task['parameters']['timeout'],
|
||||
'cwd': cwd}}
|
||||
|
||||
|
||||
class BasePluginDeploymentHooksSerializer(object):
|
||||
|
||||
def __init__(self, cluster, nodes):
|
||||
@ -115,7 +51,8 @@ class BasePluginDeploymentHooksSerializer(object):
|
||||
continue
|
||||
tasks.append(self.serialize_task(
|
||||
plugin, task,
|
||||
make_shell_task(uids, task, plugin.slaves_scripts_path)))
|
||||
templates.make_shell_task(
|
||||
uids, task, plugin.slaves_scripts_path)))
|
||||
|
||||
for task in puppet_tasks:
|
||||
uids = self.get_uids_for_task(task)
|
||||
@ -123,7 +60,8 @@ class BasePluginDeploymentHooksSerializer(object):
|
||||
continue
|
||||
tasks.append(self.serialize_task(
|
||||
plugin, task,
|
||||
make_puppet_task(uids, task, plugin.slaves_scripts_path)))
|
||||
templates.make_puppet_task(
|
||||
uids, task, plugin.slaves_scripts_path)))
|
||||
|
||||
return tasks
|
||||
|
||||
@ -184,14 +122,14 @@ class PluginsPreDeploymentHooksSerializer(BasePluginDeploymentHooksSerializer):
|
||||
repo_tasks.append(
|
||||
self.serialize_task(
|
||||
plugin, {},
|
||||
make_centos_repo_task(
|
||||
templates.make_centos_repo_task(
|
||||
plugin.full_name,
|
||||
plugin.repo_url(self.cluster), uids)))
|
||||
elif operating_system == consts.RELEASE_OS.ubuntu:
|
||||
repo_tasks.append(
|
||||
self.serialize_task(
|
||||
plugin, {},
|
||||
make_ubuntu_repo_task(
|
||||
templates.make_ubuntu_repo_task(
|
||||
plugin.full_name,
|
||||
plugin.repo_url(self.cluster), uids)))
|
||||
#apt-get upgrade executed after every additional source.list
|
||||
@ -199,7 +137,7 @@ class PluginsPreDeploymentHooksSerializer(BasePluginDeploymentHooksSerializer):
|
||||
repo_tasks.append(
|
||||
self.serialize_task(
|
||||
plugin, {},
|
||||
make_apt_update_task(uids)))
|
||||
templates.make_apt_update_task(uids)))
|
||||
else:
|
||||
raise errors.InvalidOperatingSystem(
|
||||
'Operating system {0} is invalid'.format(operating_system))
|
||||
@ -215,7 +153,7 @@ class PluginsPreDeploymentHooksSerializer(BasePluginDeploymentHooksSerializer):
|
||||
tasks.append(
|
||||
self.serialize_task(
|
||||
plugin, {},
|
||||
make_sync_scripts_task(
|
||||
templates.make_sync_scripts_task(
|
||||
uids,
|
||||
plugin.master_scripts_path(self.cluster),
|
||||
plugin.slaves_scripts_path)))
|
||||
|
@ -14,9 +14,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import six
|
||||
|
||||
|
||||
class Priority(object):
|
||||
"""Returns a priority sequence from hightest to lowest.
|
||||
@ -69,158 +66,3 @@ class PriorityStrategy(object):
|
||||
if index % amount == 0:
|
||||
self._priority.next()
|
||||
task['priority'] = self._priority.current
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class PrioritySerializer(object):
|
||||
"""A base interface for implementing priority serializer."""
|
||||
|
||||
def __init__(self):
|
||||
self.priority = PriorityStrategy()
|
||||
|
||||
def by_role(self, nodes, role):
|
||||
return filter(lambda node: node['role'] == role, nodes)
|
||||
|
||||
def not_roles(self, nodes, roles):
|
||||
return filter(lambda node: node['role'] not in roles, nodes)
|
||||
|
||||
@abc.abstractmethod
|
||||
def set_deployment_priorities(self, nodes):
|
||||
"""Set deployment priorities for a given nodes.
|
||||
|
||||
:param nodes: a list of nodes to be prioritized
|
||||
"""
|
||||
|
||||
|
||||
class PriorityMultinodeSerializer50(PrioritySerializer):
|
||||
|
||||
def set_deployment_priorities(self, nodes):
|
||||
|
||||
self.priority.one_by_one(self.by_role(nodes, 'zabbix-server'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'controller'))
|
||||
|
||||
self.priority.in_parallel(
|
||||
self.not_roles(nodes, [
|
||||
'controller',
|
||||
'mongo',
|
||||
'primary-mongo',
|
||||
'zabbix-server']))
|
||||
|
||||
|
||||
# Yep, for MultiNode we have no changes between 5.0, 5.1 and 6.0
|
||||
PriorityMultinodeSerializer51 = PriorityMultinodeSerializer50
|
||||
PriorityMultinodeSerializer60 = PriorityMultinodeSerializer50
|
||||
|
||||
|
||||
class PriorityHASerializer50(PrioritySerializer):
|
||||
|
||||
def set_deployment_priorities(self, nodes):
|
||||
|
||||
self.priority.in_parallel(self.by_role(nodes, 'zabbix-server'))
|
||||
self.priority.in_parallel(self.by_role(nodes, 'primary-swift-proxy'))
|
||||
self.priority.in_parallel(self.by_role(nodes, 'swift-proxy'))
|
||||
self.priority.in_parallel(self.by_role(nodes, 'storage'))
|
||||
|
||||
self.priority.one_by_one(self.by_role(nodes, 'mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-controller'))
|
||||
|
||||
# We are deploying in parallel, so do not let us deploy more than
|
||||
# 6 controllers simultaneously or galera master may be exhausted
|
||||
self.priority.one_by_one(self.by_role(nodes, 'controller'))
|
||||
|
||||
self.priority.in_parallel(
|
||||
self.not_roles(nodes, [
|
||||
'primary-swift-proxy',
|
||||
'swift-proxy',
|
||||
'storage',
|
||||
'primary-controller',
|
||||
'controller',
|
||||
'quantum',
|
||||
'mongo',
|
||||
'primary-mongo',
|
||||
'zabbix-server']))
|
||||
|
||||
|
||||
class PriorityHASerializer51(PrioritySerializer):
|
||||
|
||||
def set_deployment_priorities(self, nodes):
|
||||
|
||||
self.priority.in_parallel(self.by_role(nodes, 'zabbix-server'))
|
||||
self.priority.in_parallel(self.by_role(nodes, 'primary-swift-proxy'))
|
||||
self.priority.in_parallel(self.by_role(nodes, 'swift-proxy'))
|
||||
self.priority.in_parallel(self.by_role(nodes, 'storage'))
|
||||
|
||||
self.priority.one_by_one(self.by_role(nodes, 'mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-controller'))
|
||||
|
||||
# We are deploying in parallel, so do not let us deploy more than
|
||||
# 6 controllers simultaneously or galera master may be exhausted
|
||||
self.priority.in_parallel_by(self.by_role(nodes, 'controller'), 6)
|
||||
|
||||
self.priority.in_parallel(
|
||||
self.not_roles(nodes, [
|
||||
'primary-swift-proxy',
|
||||
'swift-proxy',
|
||||
'storage',
|
||||
'primary-controller',
|
||||
'controller',
|
||||
'quantum',
|
||||
'mongo',
|
||||
'primary-mongo',
|
||||
'zabbix-server']))
|
||||
|
||||
|
||||
# Since no difference between 6.0 and 5.1 serializers so far
|
||||
PriorityHASerializer60 = PriorityHASerializer51
|
||||
|
||||
|
||||
class PriorityMultinodeSerializerPatching(PrioritySerializer):
|
||||
|
||||
def set_deployment_priorities(self, nodes):
|
||||
|
||||
self.priority.one_by_one(self.by_role(nodes, 'zabbix-server'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'controller'))
|
||||
|
||||
# this is a difference between a regular multinode mode
|
||||
self.priority.one_by_one(
|
||||
self.not_roles(nodes, [
|
||||
'controller',
|
||||
'mongo',
|
||||
'primary-mongo',
|
||||
'zabbix-server']))
|
||||
|
||||
|
||||
class PriorityHASerializerPatching(PrioritySerializer):
|
||||
|
||||
def set_deployment_priorities(self, nodes):
|
||||
|
||||
self.priority.one_by_one(self.by_role(nodes, 'zabbix-server'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-swift-proxy'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'swift-proxy'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'storage'))
|
||||
|
||||
self.priority.one_by_one(self.by_role(nodes, 'mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-mongo'))
|
||||
self.priority.one_by_one(self.by_role(nodes, 'primary-controller'))
|
||||
|
||||
# We are deploying in parallel, so do not let us deploy more than
|
||||
# 6 controllers simultaneously or galera master may be exhausted
|
||||
self.priority.one_by_one(self.by_role(nodes, 'controller'))
|
||||
|
||||
self.priority.one_by_one(
|
||||
self.not_roles(nodes, [
|
||||
'primary-swift-proxy',
|
||||
'swift-proxy',
|
||||
'storage',
|
||||
'primary-controller',
|
||||
'controller',
|
||||
'quantum',
|
||||
'mongo',
|
||||
'primary-mongo',
|
||||
'zabbix-server']))
|
||||
|
80
nailgun/nailgun/orchestrator/tasks_templates.py
Normal file
80
nailgun/nailgun/orchestrator/tasks_templates.py
Normal file
@ -0,0 +1,80 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def make_repo_task(uids, repo_data, repo_path):
|
||||
return {
|
||||
'type': 'upload_file',
|
||||
'uids': uids,
|
||||
'parameters': {
|
||||
'path': repo_path,
|
||||
'data': repo_data}}
|
||||
|
||||
|
||||
def make_ubuntu_repo_task(plugin_name, repo_url, uids):
|
||||
repo_data = 'deb {0} /'.format(repo_url)
|
||||
repo_path = '/etc/apt/sources.list.d/{0}.list'.format(plugin_name)
|
||||
|
||||
return make_repo_task(uids, repo_data, repo_path)
|
||||
|
||||
|
||||
def make_centos_repo_task(plugin_name, repo_url, uids):
|
||||
repo_data = '\n'.join([
|
||||
'[{0}]',
|
||||
'name=Plugin {0} repository',
|
||||
'baseurl={1}',
|
||||
'gpgcheck=0']).format(plugin_name, repo_url)
|
||||
repo_path = '/etc/yum.repos.d/{0}.repo'.format(plugin_name)
|
||||
|
||||
return make_repo_task(uids, repo_data, repo_path)
|
||||
|
||||
|
||||
def make_sync_scripts_task(uids, src, dst):
|
||||
return {
|
||||
'type': 'sync',
|
||||
'uids': uids,
|
||||
'parameters': {
|
||||
'src': src,
|
||||
'dst': dst}}
|
||||
|
||||
|
||||
def make_shell_task(uids, task, cwd='/'):
|
||||
return {
|
||||
'type': 'shell',
|
||||
'uids': uids,
|
||||
'parameters': {
|
||||
'cmd': task['parameters']['cmd'],
|
||||
'timeout': task['parameters']['timeout'],
|
||||
'cwd': cwd}}
|
||||
|
||||
|
||||
def make_apt_update_task(uids):
|
||||
task = {
|
||||
'parameters': {
|
||||
'cmd': 'apt-get update',
|
||||
'timeout': 180}}
|
||||
return make_shell_task(uids, task, '/')
|
||||
|
||||
|
||||
def make_puppet_task(uids, task, cwd='/'):
|
||||
return {
|
||||
'type': 'puppet',
|
||||
'uids': uids,
|
||||
'parameters': {
|
||||
'puppet_manifest': task['parameters']['puppet_manifest'],
|
||||
'puppet_modules': task['parameters']['puppet_modules'],
|
||||
'timeout': task['parameters']['timeout'],
|
||||
'cwd': cwd}}
|
@ -662,7 +662,7 @@ class FakeCapacityLog(FakeAmpqThread):
|
||||
|
||||
FAKE_THREADS = {
|
||||
'provision': FakeProvisionThread,
|
||||
'deploy': FakeDeploymentThread,
|
||||
'granular_deploy': FakeDeploymentThread,
|
||||
'remove_nodes': FakeDeletionThread,
|
||||
'stop_deploy_task': FakeStopDeploymentThread,
|
||||
'reset_environment': FakeResetEnvironmentThread,
|
||||
|
@ -150,9 +150,11 @@ class DeploymentTask(object):
|
||||
for node in nodes:
|
||||
node.pending_addition = False
|
||||
|
||||
#NOTE(dshulyak) discussed with warpc, separation is required
|
||||
#to leave legacy deployment model as it is
|
||||
rpc_message = make_astute_message(
|
||||
task,
|
||||
'deploy',
|
||||
'granular_deploy',
|
||||
'deploy_resp',
|
||||
{
|
||||
'deployment_info': serialized_cluster,
|
||||
@ -187,7 +189,7 @@ class UpdateTask(object):
|
||||
|
||||
rpc_message = make_astute_message(
|
||||
task,
|
||||
'deploy',
|
||||
'granular_deploy',
|
||||
'deploy_resp',
|
||||
{
|
||||
'deployment_info': serialized_cluster
|
||||
|
@ -85,6 +85,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'storage_interface': 'eth0.102',
|
||||
'public_interface': 'eth0',
|
||||
'floating_interface': 'eth0',
|
||||
'tasks': [],
|
||||
|
||||
'master_ip': '127.0.0.1',
|
||||
'use_cinder': True,
|
||||
@ -166,14 +167,6 @@ class TestHandlers(BaseIntegrationTest):
|
||||
),
|
||||
}
|
||||
|
||||
# Individual attrs calculation and
|
||||
# merging with common attrs
|
||||
priority_mapping = {
|
||||
'controller': [600, 600, 500],
|
||||
'cinder': 700,
|
||||
'compute': 700
|
||||
}
|
||||
|
||||
critical_mapping = {
|
||||
'primary-controller': True,
|
||||
'controller': False,
|
||||
@ -185,10 +178,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
for node in nodes_db:
|
||||
ips = assigned_ips[node.id]
|
||||
for role in sorted(node.roles):
|
||||
priority = priority_mapping[role]
|
||||
is_critical = critical_mapping[role]
|
||||
if isinstance(priority, list):
|
||||
priority = priority.pop()
|
||||
|
||||
individual_atts = {
|
||||
'uid': str(node.id),
|
||||
@ -197,7 +187,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'online': node.online,
|
||||
'fail_if_error': is_critical,
|
||||
'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
|
||||
'priority': priority,
|
||||
'priority': 100,
|
||||
|
||||
'network_data': {
|
||||
'eth0': {
|
||||
@ -240,7 +230,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
deployment_msg = {
|
||||
'api_version': '1',
|
||||
'method': 'deploy',
|
||||
'method': 'granular_deploy',
|
||||
'respond_to': 'deploy_resp',
|
||||
'args': {}
|
||||
}
|
||||
@ -374,7 +364,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'public_address',
|
||||
'storage_address',
|
||||
'ipaddr',
|
||||
'IP'])
|
||||
'IP',
|
||||
'tasks'])
|
||||
self.datadiff(
|
||||
args[1][1],
|
||||
deployment_msg,
|
||||
@ -382,7 +373,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'public_address',
|
||||
'storage_address',
|
||||
'ipaddr',
|
||||
'IP'])
|
||||
'IP',
|
||||
'tasks',
|
||||
'priority'])
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -440,7 +433,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'deployment_id': cluster_db.id,
|
||||
'openstack_version_prev': None,
|
||||
'openstack_version': cluster_db.release.version,
|
||||
'fuel_version': cluster_db.fuel_version
|
||||
'fuel_version': cluster_db.fuel_version,
|
||||
'tasks': []
|
||||
}
|
||||
common_attrs.update(
|
||||
objects.Release.get_orchestrator_data_dict(cluster_db.release)
|
||||
@ -563,14 +557,6 @@ class TestHandlers(BaseIntegrationTest):
|
||||
),
|
||||
}
|
||||
|
||||
# Individual attrs calculation and
|
||||
# merging with common attrs
|
||||
priority_mapping = {
|
||||
'controller': [600, 600, 500],
|
||||
'cinder': 700,
|
||||
'compute': 700
|
||||
}
|
||||
|
||||
critical_mapping = {
|
||||
'primary-controller': True,
|
||||
'controller': False,
|
||||
@ -582,10 +568,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
for node in nodes_db:
|
||||
ips = assigned_ips[node.id]
|
||||
for role in sorted(node.roles):
|
||||
priority = priority_mapping[role]
|
||||
is_critical = critical_mapping[role]
|
||||
if isinstance(priority, list):
|
||||
priority = priority.pop()
|
||||
|
||||
individual_atts = {
|
||||
'uid': str(node.id),
|
||||
@ -594,8 +577,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'online': node.online,
|
||||
'fail_if_error': is_critical,
|
||||
'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
|
||||
'priority': priority,
|
||||
|
||||
'priority': 100,
|
||||
'network_scheme': {
|
||||
"version": "1.0",
|
||||
"provider": "ovs",
|
||||
@ -688,7 +670,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
deployment_msg = {
|
||||
'api_version': '1',
|
||||
'method': 'deploy',
|
||||
'method': 'granular_deploy',
|
||||
'respond_to': 'deploy_resp',
|
||||
'args': {}
|
||||
}
|
||||
@ -823,7 +805,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'public_address',
|
||||
'storage_address',
|
||||
'ipaddr',
|
||||
'IP'])
|
||||
'IP',
|
||||
'tasks'])
|
||||
self.datadiff(
|
||||
args[1][1],
|
||||
deployment_msg,
|
||||
@ -831,7 +814,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'public_address',
|
||||
'storage_address',
|
||||
'ipaddr',
|
||||
'IP'])
|
||||
'IP',
|
||||
'tasks',
|
||||
'priority'])
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
@ -991,7 +976,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
controller_nodes = filter(
|
||||
lambda node: node['role'] == 'controller',
|
||||
deepcopy(nodes_list))
|
||||
|
||||
common_attrs['tasks'] = []
|
||||
common_attrs['nodes'] = nodes_list
|
||||
common_attrs['nodes'][0]['role'] = 'primary-controller'
|
||||
|
||||
@ -1012,14 +997,6 @@ class TestHandlers(BaseIntegrationTest):
|
||||
),
|
||||
}
|
||||
|
||||
# Individual attrs calculation and
|
||||
# merging with common attrs
|
||||
priority_mapping = {
|
||||
'controller': [600, 600, 500],
|
||||
'cinder': 700,
|
||||
'compute': 700
|
||||
}
|
||||
|
||||
critical_mapping = {
|
||||
'primary-controller': True,
|
||||
'controller': False,
|
||||
@ -1035,10 +1012,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
other_nets = nm.get_networks_not_on_node(node)
|
||||
|
||||
for role in sorted(node.roles):
|
||||
priority = priority_mapping[role]
|
||||
is_critical = critical_mapping[role]
|
||||
if isinstance(priority, list):
|
||||
priority = priority.pop()
|
||||
|
||||
individual_atts = {
|
||||
'uid': str(node.id),
|
||||
@ -1047,7 +1021,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'online': node.online,
|
||||
'fail_if_error': is_critical,
|
||||
'fqdn': 'node-%d.%s' % (node.id, settings.DNS_DOMAIN),
|
||||
'priority': priority,
|
||||
'priority': 100,
|
||||
|
||||
'network_scheme': {
|
||||
"version": "1.0",
|
||||
@ -1155,7 +1129,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
deployment_msg = {
|
||||
'api_version': '1',
|
||||
'method': 'deploy',
|
||||
'method': 'granular_deploy',
|
||||
'respond_to': 'deploy_resp',
|
||||
'args': {}
|
||||
}
|
||||
@ -1290,7 +1264,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'public_address',
|
||||
'storage_address',
|
||||
'ipaddr',
|
||||
'IP'])
|
||||
'IP',
|
||||
'tasks'])
|
||||
self.datadiff(
|
||||
args[1][1],
|
||||
deployment_msg,
|
||||
@ -1298,7 +1273,9 @@ class TestHandlers(BaseIntegrationTest):
|
||||
'public_address',
|
||||
'storage_address',
|
||||
'ipaddr',
|
||||
'IP'])
|
||||
'IP',
|
||||
'tasks',
|
||||
'priority'])
|
||||
|
||||
@fake_tasks(fake_rpc=False, mock_rpc=False)
|
||||
@patch('nailgun.rpc.cast')
|
||||
|
@ -19,6 +19,7 @@ from operator import attrgetter
|
||||
from operator import itemgetter
|
||||
import re
|
||||
|
||||
import mock
|
||||
from netaddr import IPNetwork
|
||||
from netaddr import IPRange
|
||||
|
||||
@ -37,14 +38,7 @@ from nailgun.orchestrator.deployment_serializers import\
|
||||
from nailgun.orchestrator.deployment_serializers import\
|
||||
DeploymentMultinodeSerializer
|
||||
|
||||
from nailgun.orchestrator.priority_serializers import\
|
||||
PriorityHASerializer50
|
||||
from nailgun.orchestrator.priority_serializers import\
|
||||
PriorityHASerializer51
|
||||
from nailgun.orchestrator.priority_serializers import\
|
||||
PriorityHASerializerPatching
|
||||
from nailgun.orchestrator.priority_serializers import\
|
||||
PriorityMultinodeSerializer50
|
||||
from nailgun.orchestrator.deployment_graph import create_graph
|
||||
|
||||
from nailgun.db.sqlalchemy import models
|
||||
from nailgun import objects
|
||||
@ -74,9 +68,21 @@ class OrchestratorSerializerTestBase(BaseIntegrationTest):
|
||||
filter(Node.role_list.any(name='controller')).\
|
||||
order_by(Node.id)
|
||||
|
||||
def add_default_params(self, nodes):
|
||||
"""Adds neceserry default parameters to nodes
|
||||
|
||||
:param nodes: list of dicts
|
||||
"""
|
||||
for pos, node in enumerate(nodes, start=1):
|
||||
node['uid'] = str(pos)
|
||||
|
||||
@property
|
||||
def serializer(self):
|
||||
return DeploymentHASerializer(PriorityHASerializer50())
|
||||
cluster = mock.MagicMock(pending_release_id=None)
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.0'):
|
||||
return DeploymentHASerializer(create_graph(cluster))
|
||||
|
||||
def serialize(self, cluster):
|
||||
objects.NodeCollection.prepare_for_deployment(cluster.nodes)
|
||||
@ -307,20 +313,24 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'}
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
serializer = DeploymentMultinodeSerializer(
|
||||
PriorityMultinodeSerializer50())
|
||||
self.add_default_params(nodes)
|
||||
cluster = mock.MagicMock(pending_release_id=None)
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.0'):
|
||||
serializer = DeploymentMultinodeSerializer(
|
||||
create_graph(cluster))
|
||||
serializer.set_deployment_priorities(nodes)
|
||||
expected_priorities = [
|
||||
{'role': 'mongo', 'priority': 100},
|
||||
{'role': 'mongo', 'priority': 200},
|
||||
{'role': 'primary-mongo', 'priority': 300},
|
||||
{'role': 'controller', 'priority': 400},
|
||||
{'role': 'ceph-osd', 'priority': 500},
|
||||
{'role': 'other', 'priority': 500}
|
||||
{'role': 'ceph-osd', 'priority': 500}
|
||||
]
|
||||
self.add_default_params(expected_priorities)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
def test_set_critital_node(self):
|
||||
@ -329,19 +339,21 @@ class TestNovaOrchestratorSerializer(OrchestratorSerializerTestBase):
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'}
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
serializer = DeploymentMultinodeSerializer(
|
||||
PriorityMultinodeSerializer50())
|
||||
cluster = mock.MagicMock(pending_release_id=None)
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.0'):
|
||||
serializer = DeploymentMultinodeSerializer(
|
||||
create_graph(cluster))
|
||||
serializer.set_critical_nodes(nodes)
|
||||
expected_ciritial_roles = [
|
||||
{'role': 'mongo', 'fail_if_error': False},
|
||||
{'role': 'mongo', 'fail_if_error': False},
|
||||
{'role': 'primary-mongo', 'fail_if_error': True},
|
||||
{'role': 'controller', 'fail_if_error': True},
|
||||
{'role': 'ceph-osd', 'fail_if_error': True},
|
||||
{'role': 'other', 'fail_if_error': False}
|
||||
{'role': 'ceph-osd', 'fail_if_error': True}
|
||||
]
|
||||
self.assertEqual(expected_ciritial_roles, nodes)
|
||||
|
||||
@ -373,44 +385,39 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
@property
|
||||
def serializer(self):
|
||||
return DeploymentHASerializer(PriorityHASerializer50())
|
||||
cluster = mock.MagicMock(pending_release_id=None)
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.0'):
|
||||
return DeploymentHASerializer(create_graph(cluster))
|
||||
|
||||
def test_set_deployment_priorities(self):
|
||||
nodes = [
|
||||
{'role': 'zabbix-server'},
|
||||
{'role': 'primary-swift-proxy'},
|
||||
{'role': 'swift-proxy'},
|
||||
{'role': 'storage'},
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'primary-controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'}
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
self.add_default_params(nodes)
|
||||
self.serializer.set_deployment_priorities(nodes)
|
||||
expected_priorities = [
|
||||
{'role': 'zabbix-server', 'priority': 100},
|
||||
{'role': 'primary-swift-proxy', 'priority': 200},
|
||||
{'role': 'swift-proxy', 'priority': 300},
|
||||
{'role': 'storage', 'priority': 400},
|
||||
{'role': 'mongo', 'priority': 500},
|
||||
{'role': 'primary-mongo', 'priority': 600},
|
||||
{'role': 'primary-controller', 'priority': 700},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 900},
|
||||
{'role': 'ceph-osd', 'priority': 1000},
|
||||
{'role': 'other', 'priority': 1000}
|
||||
{'role': 'mongo', 'priority': 200},
|
||||
{'role': 'primary-mongo', 'priority': 300},
|
||||
{'role': 'primary-controller', 'priority': 400},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 600},
|
||||
{'role': 'ceph-osd', 'priority': 700},
|
||||
]
|
||||
self.add_default_params(expected_priorities)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
def test_set_deployment_priorities_many_cntrls(self):
|
||||
nodes = [
|
||||
{'role': 'zabbix-server'},
|
||||
{'role': 'primary-swift-proxy'},
|
||||
{'role': 'swift-proxy'},
|
||||
{'role': 'storage'},
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'primary-controller'},
|
||||
@ -422,58 +429,47 @@ class TestNovaOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'}
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
self.add_default_params(nodes)
|
||||
self.serializer.set_deployment_priorities(nodes)
|
||||
expected_priorities = [
|
||||
{'role': 'zabbix-server', 'priority': 100},
|
||||
{'role': 'primary-swift-proxy', 'priority': 200},
|
||||
{'role': 'swift-proxy', 'priority': 300},
|
||||
{'role': 'storage', 'priority': 400},
|
||||
{'role': 'mongo', 'priority': 500},
|
||||
{'role': 'primary-mongo', 'priority': 600},
|
||||
{'role': 'primary-controller', 'priority': 700},
|
||||
{'role': 'mongo', 'priority': 200},
|
||||
{'role': 'primary-mongo', 'priority': 300},
|
||||
{'role': 'primary-controller', 'priority': 400},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 600},
|
||||
{'role': 'controller', 'priority': 700},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 900},
|
||||
{'role': 'controller', 'priority': 1000},
|
||||
{'role': 'controller', 'priority': 1100},
|
||||
{'role': 'controller', 'priority': 1200},
|
||||
{'role': 'controller', 'priority': 1300},
|
||||
{'role': 'controller', 'priority': 1400},
|
||||
{'role': 'controller', 'priority': 1500},
|
||||
{'role': 'ceph-osd', 'priority': 1600},
|
||||
{'role': 'other', 'priority': 1600}
|
||||
{'role': 'ceph-osd', 'priority': 1300}
|
||||
]
|
||||
self.add_default_params(expected_priorities)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
def test_set_critital_node(self):
|
||||
nodes = [
|
||||
{'role': 'zabbix-server'},
|
||||
{'role': 'primary-swift-proxy'},
|
||||
{'role': 'swift-proxy'},
|
||||
{'role': 'storage'},
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'primary-controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'}
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
self.serializer.set_critical_nodes(nodes)
|
||||
expected_ciritial_roles = [
|
||||
{'role': 'zabbix-server', 'fail_if_error': False},
|
||||
{'role': 'primary-swift-proxy', 'fail_if_error': True},
|
||||
{'role': 'swift-proxy', 'fail_if_error': False},
|
||||
{'role': 'storage', 'fail_if_error': False},
|
||||
{'role': 'mongo', 'fail_if_error': False},
|
||||
{'role': 'primary-mongo', 'fail_if_error': True},
|
||||
{'role': 'primary-controller', 'fail_if_error': True},
|
||||
{'role': 'controller', 'fail_if_error': False},
|
||||
{'role': 'controller', 'fail_if_error': False},
|
||||
{'role': 'ceph-osd', 'fail_if_error': True},
|
||||
{'role': 'other', 'fail_if_error': False}
|
||||
{'role': 'ceph-osd', 'fail_if_error': True}
|
||||
]
|
||||
self.assertEqual(expected_ciritial_roles, nodes)
|
||||
|
||||
@ -530,44 +526,39 @@ class TestNovaOrchestratorHASerializer51(TestNovaOrchestratorHASerializer):
|
||||
|
||||
@property
|
||||
def serializer(self):
|
||||
return DeploymentHASerializer51(PriorityHASerializer51())
|
||||
cluster = mock.MagicMock(pending_release_id=None)
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.1'):
|
||||
return DeploymentHASerializer51(create_graph(cluster))
|
||||
|
||||
def test_set_deployment_priorities(self):
|
||||
nodes = [
|
||||
{'role': 'zabbix-server'},
|
||||
{'role': 'primary-swift-proxy'},
|
||||
{'role': 'swift-proxy'},
|
||||
{'role': 'storage'},
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'primary-controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'}
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
self.add_default_params(nodes)
|
||||
self.serializer.set_deployment_priorities(nodes)
|
||||
expected_priorities = [
|
||||
{'role': 'zabbix-server', 'priority': 100},
|
||||
{'role': 'primary-swift-proxy', 'priority': 200},
|
||||
{'role': 'swift-proxy', 'priority': 300},
|
||||
{'role': 'storage', 'priority': 400},
|
||||
{'role': 'mongo', 'priority': 500},
|
||||
{'role': 'primary-mongo', 'priority': 600},
|
||||
{'role': 'primary-controller', 'priority': 700},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'ceph-osd', 'priority': 900},
|
||||
{'role': 'other', 'priority': 900}
|
||||
{'role': 'mongo', 'priority': 200},
|
||||
{'role': 'primary-mongo', 'priority': 300},
|
||||
{'role': 'primary-controller', 'priority': 400},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'ceph-osd', 'priority': 600},
|
||||
]
|
||||
self.add_default_params(expected_priorities)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
def test_set_deployment_priorities_many_cntrls(self):
|
||||
nodes = [
|
||||
{'role': 'zabbix-server'},
|
||||
{'role': 'primary-swift-proxy'},
|
||||
{'role': 'swift-proxy'},
|
||||
{'role': 'storage'},
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'primary-controller'},
|
||||
@ -579,29 +570,26 @@ class TestNovaOrchestratorHASerializer51(TestNovaOrchestratorHASerializer):
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'}
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
self.add_default_params(nodes)
|
||||
self.serializer.set_deployment_priorities(nodes)
|
||||
expected_priorities = [
|
||||
{'role': 'zabbix-server', 'priority': 100},
|
||||
{'role': 'primary-swift-proxy', 'priority': 200},
|
||||
{'role': 'swift-proxy', 'priority': 300},
|
||||
{'role': 'storage', 'priority': 400},
|
||||
{'role': 'mongo', 'priority': 500},
|
||||
{'role': 'primary-mongo', 'priority': 600},
|
||||
{'role': 'primary-controller', 'priority': 700},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 900},
|
||||
{'role': 'controller', 'priority': 900},
|
||||
{'role': 'ceph-osd', 'priority': 1000},
|
||||
{'role': 'other', 'priority': 1000}
|
||||
{'role': 'mongo', 'priority': 200},
|
||||
{'role': 'primary-mongo', 'priority': 300},
|
||||
{'role': 'primary-controller', 'priority': 400},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 600},
|
||||
{'role': 'controller', 'priority': 600},
|
||||
{'role': 'ceph-osd', 'priority': 700}
|
||||
]
|
||||
self.add_default_params(expected_priorities)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
|
||||
@ -609,48 +597,39 @@ class TestHASerializerPatching(TestNovaOrchestratorHASerializer):
|
||||
|
||||
@property
|
||||
def serializer(self):
|
||||
return DeploymentHASerializer(PriorityHASerializerPatching())
|
||||
cluster = mock.MagicMock(pending_release_id='111')
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.0'):
|
||||
return DeploymentHASerializer(create_graph(cluster))
|
||||
|
||||
def test_set_deployment_priorities(self):
|
||||
nodes = [
|
||||
{'role': 'zabbix-server'},
|
||||
{'role': 'primary-swift-proxy'},
|
||||
{'role': 'swift-proxy'},
|
||||
{'role': 'storage'},
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'primary-controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'},
|
||||
{'role': 'other'},
|
||||
{'role': 'other'},
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
self.add_default_params(nodes)
|
||||
self.serializer.set_deployment_priorities(nodes)
|
||||
expected_priorities = [
|
||||
{'role': 'zabbix-server', 'priority': 100},
|
||||
{'role': 'primary-swift-proxy', 'priority': 200},
|
||||
{'role': 'swift-proxy', 'priority': 300},
|
||||
{'role': 'storage', 'priority': 400},
|
||||
{'role': 'mongo', 'priority': 500},
|
||||
{'role': 'primary-mongo', 'priority': 600},
|
||||
{'role': 'primary-controller', 'priority': 700},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 900},
|
||||
{'role': 'ceph-osd', 'priority': 1000},
|
||||
{'role': 'other', 'priority': 1100},
|
||||
{'role': 'other', 'priority': 1200},
|
||||
{'role': 'other', 'priority': 1300},
|
||||
{'role': 'mongo', 'priority': 200},
|
||||
{'role': 'primary-mongo', 'priority': 300},
|
||||
{'role': 'primary-controller', 'priority': 400},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 600},
|
||||
{'role': 'ceph-osd', 'priority': 700}
|
||||
]
|
||||
self.add_default_params(expected_priorities)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
def test_set_deployment_priorities_many_cntrls(self):
|
||||
nodes = [
|
||||
{'role': 'zabbix-server'},
|
||||
{'role': 'primary-swift-proxy'},
|
||||
{'role': 'swift-proxy'},
|
||||
{'role': 'storage'},
|
||||
{'role': 'mongo'},
|
||||
{'role': 'primary-mongo'},
|
||||
{'role': 'primary-controller'},
|
||||
@ -662,29 +641,26 @@ class TestHASerializerPatching(TestNovaOrchestratorHASerializer):
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'controller'},
|
||||
{'role': 'ceph-osd'},
|
||||
{'role': 'other'}
|
||||
{'role': 'ceph-osd'}
|
||||
]
|
||||
self.add_default_params(nodes)
|
||||
self.serializer.set_deployment_priorities(nodes)
|
||||
expected_priorities = [
|
||||
{'role': 'zabbix-server', 'priority': 100},
|
||||
{'role': 'primary-swift-proxy', 'priority': 200},
|
||||
{'role': 'swift-proxy', 'priority': 300},
|
||||
{'role': 'storage', 'priority': 400},
|
||||
{'role': 'mongo', 'priority': 500},
|
||||
{'role': 'primary-mongo', 'priority': 600},
|
||||
{'role': 'primary-controller', 'priority': 700},
|
||||
{'role': 'mongo', 'priority': 200},
|
||||
{'role': 'primary-mongo', 'priority': 300},
|
||||
{'role': 'primary-controller', 'priority': 400},
|
||||
{'role': 'controller', 'priority': 500},
|
||||
{'role': 'controller', 'priority': 600},
|
||||
{'role': 'controller', 'priority': 700},
|
||||
{'role': 'controller', 'priority': 800},
|
||||
{'role': 'controller', 'priority': 900},
|
||||
{'role': 'controller', 'priority': 1000},
|
||||
{'role': 'controller', 'priority': 1100},
|
||||
{'role': 'controller', 'priority': 1200},
|
||||
{'role': 'controller', 'priority': 1300},
|
||||
{'role': 'controller', 'priority': 1400},
|
||||
{'role': 'controller', 'priority': 1500},
|
||||
{'role': 'ceph-osd', 'priority': 1600},
|
||||
{'role': 'other', 'priority': 1700}
|
||||
{'role': 'ceph-osd', 'priority': 1300}
|
||||
]
|
||||
self.add_default_params(expected_priorities)
|
||||
self.assertEqual(expected_priorities, nodes)
|
||||
|
||||
|
||||
@ -1267,7 +1243,11 @@ class TestNeutronOrchestratorHASerializer(OrchestratorSerializerTestBase):
|
||||
|
||||
@property
|
||||
def serializer(self):
|
||||
return DeploymentHASerializer(PriorityHASerializer50())
|
||||
cluster = mock.MagicMock(pending_release_id=None)
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.0'):
|
||||
return DeploymentHASerializer(create_graph(cluster))
|
||||
|
||||
def test_node_list(self):
|
||||
serialized_nodes = self.serializer.node_list(self.cluster.nodes)
|
||||
@ -1458,11 +1438,19 @@ class TestMongoNodesSerialization(OrchestratorSerializerTestBase):
|
||||
|
||||
@property
|
||||
def serializer_ha(self):
|
||||
return DeploymentHASerializer(PriorityHASerializer50())
|
||||
cluster = mock.MagicMock(pending_release_id=None)
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.0'):
|
||||
return DeploymentHASerializer(create_graph(cluster))
|
||||
|
||||
@property
|
||||
def serializer_mn(self):
|
||||
return DeploymentMultinodeSerializer(PriorityMultinodeSerializer50())
|
||||
cluster = mock.MagicMock(pending_release_id=None)
|
||||
with mock.patch(
|
||||
'nailgun.orchestrator.deployment_graph.extract_env_version',
|
||||
return_value='5.0'):
|
||||
return DeploymentMultinodeSerializer(create_graph(cluster))
|
||||
|
||||
def test_mongo_roles_equals_in_defferent_modes(self):
|
||||
cluster = self.create_env()
|
||||
|
@ -18,7 +18,6 @@ from nailgun.errors import errors
|
||||
from nailgun.test.base import BaseUnitTest
|
||||
|
||||
from nailgun.orchestrator import deployment_serializers as ds
|
||||
from nailgun.orchestrator import priority_serializers as ps
|
||||
|
||||
|
||||
class TestCreateSerializer(BaseUnitTest):
|
||||
@ -71,40 +70,3 @@ class TestCreateSerializer(BaseUnitTest):
|
||||
cluster = mock.MagicMock(is_ha_mode=True)
|
||||
self.assertRaises(
|
||||
errors.UnsupportedSerializer, ds.create_serializer, cluster)
|
||||
|
||||
@mock.patch(
|
||||
'nailgun.orchestrator.deployment_serializers.extract_env_version',
|
||||
return_value='5.0')
|
||||
def test_regular_priority_serializer_ha(self, _):
|
||||
cluster = mock.MagicMock(is_ha_mode=True, pending_release_id=None)
|
||||
prio = ds.create_serializer(cluster).priority
|
||||
|
||||
self.assertTrue(isinstance(prio, ps.PriorityHASerializer50))
|
||||
|
||||
@mock.patch(
|
||||
'nailgun.orchestrator.deployment_serializers.extract_env_version',
|
||||
return_value='5.0')
|
||||
def test_regular_priority_serializer_mn(self, _):
|
||||
cluster = mock.MagicMock(is_ha_mode=False, pending_release_id=None)
|
||||
prio = ds.create_serializer(cluster).priority
|
||||
|
||||
self.assertTrue(isinstance(prio, ps.PriorityMultinodeSerializer50))
|
||||
|
||||
@mock.patch(
|
||||
'nailgun.orchestrator.deployment_serializers.extract_env_version',
|
||||
return_value='5.0')
|
||||
def test_patching_priority_serializer_ha(self, _):
|
||||
cluster = mock.MagicMock(is_ha_mode=True, pending_release_id=42)
|
||||
prio = ds.create_serializer(cluster).priority
|
||||
|
||||
self.assertTrue(isinstance(prio, ps.PriorityHASerializerPatching))
|
||||
|
||||
@mock.patch(
|
||||
'nailgun.orchestrator.deployment_serializers.extract_env_version',
|
||||
return_value='5.0')
|
||||
def test_patching_priority_serializer_mn(self, _):
|
||||
cluster = mock.MagicMock(is_ha_mode=False, pending_release_id=42)
|
||||
prio = ds.create_serializer(cluster).priority
|
||||
|
||||
self.assertTrue(
|
||||
isinstance(prio, ps.PriorityMultinodeSerializerPatching))
|
||||
|
214
nailgun/nailgun/test/unit/test_graph_serializer.py
Normal file
214
nailgun/nailgun/test/unit/test_graph_serializer.py
Normal file
@ -0,0 +1,214 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from collections import defaultdict
|
||||
from itertools import groupby
|
||||
|
||||
import yaml
|
||||
|
||||
from nailgun.orchestrator import deployment_graph
|
||||
from nailgun.orchestrator import graph_configuration
|
||||
from nailgun.test import base
|
||||
|
||||
|
||||
TASKS = """
|
||||
- id: deploy
|
||||
type: stage
|
||||
- id: primary-controller
|
||||
type: role
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: one_by_one
|
||||
- id: controller
|
||||
type: role
|
||||
requires: [primary-controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
amount: 2
|
||||
- id: cinder
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: compute
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
- id: network
|
||||
type: role
|
||||
requires: [controller]
|
||||
required_for: [compute, deploy]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
"""
|
||||
|
||||
SUBTASKS = """
|
||||
- id: install_controller
|
||||
type: puppet
|
||||
requires: [setup_network]
|
||||
role: [controller, primary-controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
puppet_manifests: /etc/puppet/manifests/controller.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 360
|
||||
- id: setup_network
|
||||
type: shell
|
||||
role: [controller, primary-controller]
|
||||
required_for: [deploy]
|
||||
parameters:
|
||||
cmd: run_setup_network.sh
|
||||
timeout: 120
|
||||
"""
|
||||
|
||||
|
||||
class TestGraphDependencies(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestGraphDependencies, self).setUp()
|
||||
self.tasks = yaml.load(TASKS)
|
||||
self.subtasks = yaml.load(SUBTASKS)
|
||||
self.graph = deployment_graph.DeploymentGraph()
|
||||
|
||||
def test_build_deployment_graph(self):
|
||||
self.graph.add_tasks(self.tasks)
|
||||
roles = self.graph.get_roles_subgraph()
|
||||
topology_by_id = [item['id'] for item in roles.topology]
|
||||
self.assertEqual(
|
||||
topology_by_id,
|
||||
['primary-controller', 'controller',
|
||||
'network', 'compute', 'cinder'])
|
||||
|
||||
def test_subtasks_in_correct_order(self):
|
||||
self.graph.add_tasks(self.tasks + self.subtasks)
|
||||
subtask_graph = self.graph.get_tasks_for_role('controller')
|
||||
topology_by_id = [item['id'] for item in subtask_graph.topology]
|
||||
self.assertEqual(
|
||||
topology_by_id,
|
||||
['setup_network', 'install_controller'])
|
||||
|
||||
|
||||
class TestAddDependenciesToNodes(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestAddDependenciesToNodes, self).setUp()
|
||||
tasks = yaml.load(TASKS + SUBTASKS)
|
||||
self.graph = deployment_graph.DeploymentGraph()
|
||||
self.graph.add_tasks(tasks)
|
||||
|
||||
def test_priority_serilized_correctly_for_all_roles(self):
|
||||
nodes = [{'uid': '3', 'role': 'primary-controller'},
|
||||
{'uid': '1', 'role': 'controller'},
|
||||
{'uid': '2', 'role': 'controller'},
|
||||
{'uid': '4', 'role': 'controller'},
|
||||
{'uid': '6', 'role': 'controller'},
|
||||
{'uid': '7', 'role': 'cinder'},
|
||||
{'uid': '8', 'role': 'cinder'},
|
||||
{'uid': '9', 'role': 'network'},
|
||||
{'uid': '10', 'role': 'compute'}]
|
||||
|
||||
self.graph.add_priorities(nodes)
|
||||
by_priority = defaultdict(list)
|
||||
for role, group in groupby(nodes, lambda node: node['priority']):
|
||||
by_priority[role].extend(list(group))
|
||||
self.assertEqual(
|
||||
by_priority[100],
|
||||
[{'uid': '3', 'role': 'primary-controller', 'priority': 100}])
|
||||
self.assertEqual(
|
||||
by_priority[200],
|
||||
[{'uid': '1', 'role': 'controller', 'priority': 200},
|
||||
{'uid': '2', 'role': 'controller', 'priority': 200}])
|
||||
self.assertEqual(
|
||||
by_priority[300],
|
||||
[{'uid': '4', 'role': 'controller', 'priority': 300},
|
||||
{'uid': '6', 'role': 'controller', 'priority': 300}])
|
||||
self.assertEqual(
|
||||
by_priority[400],
|
||||
[{'uid': '7', 'role': 'cinder', 'priority': 400},
|
||||
{'uid': '8', 'role': 'cinder', 'priority': 400},
|
||||
{'uid': '9', 'role': 'network', 'priority': 400}])
|
||||
self.assertEqual(
|
||||
by_priority[500],
|
||||
[{'uid': '10', 'role': 'compute', 'priority': 500}])
|
||||
|
||||
def test_serialize_priority_for_same_node_diff_roles(self):
|
||||
nodes = [{'uid': '3', 'role': 'primary-controller'},
|
||||
{'uid': '1', 'role': 'controller'},
|
||||
{'uid': '2', 'role': 'controller'},
|
||||
{'uid': '1', 'role': 'cinder'},
|
||||
{'uid': '4', 'role': 'cinder'},
|
||||
{'uid': '4', 'role': 'network'}]
|
||||
self.graph.add_priorities(nodes)
|
||||
by_priority = defaultdict(list)
|
||||
for role, group in groupby(nodes, lambda node: node['priority']):
|
||||
by_priority[role].extend(list(group))
|
||||
self.assertEqual(
|
||||
by_priority[100],
|
||||
[{'uid': '3', 'role': 'primary-controller', 'priority': 100}])
|
||||
self.assertEqual(
|
||||
by_priority[200],
|
||||
[{'uid': '1', 'role': 'controller', 'priority': 200},
|
||||
{'uid': '2', 'role': 'controller', 'priority': 200}])
|
||||
self.assertEqual(
|
||||
by_priority[300],
|
||||
[{'uid': '1', 'role': 'cinder', 'priority': 300},
|
||||
{'uid': '4', 'role': 'cinder', 'priority': 300}])
|
||||
self.assertEqual(
|
||||
by_priority[400],
|
||||
[{'uid': '4', 'role': 'network', 'priority': 400}])
|
||||
|
||||
|
||||
class TestLegacyGraphSerialized(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestLegacyGraphSerialized, self).setUp()
|
||||
self.graph = deployment_graph.DeploymentGraph()
|
||||
self.tasks = yaml.load(graph_configuration.DEPLOYMENT_CURRENT)
|
||||
self.graph.add_tasks(self.tasks)
|
||||
|
||||
def test_serialized_with_tasks_and_priorities(self):
|
||||
"""Test verifies that priorities and tasks."""
|
||||
nodes = [{'uid': '3', 'role': 'primary-controller'},
|
||||
{'uid': '1', 'role': 'controller'},
|
||||
{'uid': '2', 'role': 'controller'},
|
||||
{'uid': '7', 'role': 'cinder'},
|
||||
{'uid': '8', 'role': 'compute'},
|
||||
{'uid': '9', 'role': 'mongo'},
|
||||
{'uid': '10', 'role': 'primary-mongo'},
|
||||
{'uid': '11', 'role': 'ceph-osd'},
|
||||
{'uid': '12', 'role': 'zabbix-server'}]
|
||||
self.graph.add_priorities(nodes)
|
||||
by_priority = defaultdict(list)
|
||||
for role, group in groupby(nodes, lambda node: node['priority']):
|
||||
by_priority[role].extend(list(group))
|
||||
self.assertEqual(by_priority[100][0]['role'], 'zabbix-server')
|
||||
self.assertEqual(by_priority[200][0]['role'], 'mongo')
|
||||
self.assertEqual(by_priority[300][0]['role'], 'primary-mongo')
|
||||
self.assertEqual(by_priority[400][0]['role'], 'primary-controller')
|
||||
self.assertEqual(by_priority[500][0]['role'], 'controller')
|
||||
self.assertEqual(by_priority[500][1]['role'], 'controller')
|
||||
self.assertEqual(
|
||||
set([i['role'] for i in by_priority[600]]),
|
||||
set(['compute', 'cinder', 'ceph-osd']))
|
@ -16,13 +16,13 @@
|
||||
|
||||
from nailgun.test import base
|
||||
|
||||
from nailgun.orchestrator import plugins_serializers
|
||||
from nailgun.orchestrator import tasks_templates
|
||||
|
||||
|
||||
class TestMakeTask(base.BaseTestCase):
|
||||
|
||||
def test_make_ubuntu_repo_task(self):
|
||||
result = plugins_serializers.make_ubuntu_repo_task(
|
||||
result = tasks_templates.make_ubuntu_repo_task(
|
||||
'plugin_name',
|
||||
'http://url',
|
||||
[1, 2, 3])
|
||||
|
@ -29,3 +29,4 @@ keystonemiddleware>=1.2.0
|
||||
# we might still need keystone command
|
||||
python-keystoneclient>=0.11
|
||||
python-novaclient>=2.17.0
|
||||
networkx>=1.8
|
||||
|
Loading…
Reference in New Issue
Block a user