fuel-web/nailgun/nailgun/test/unit/test_task.py

682 lines
25 KiB
Python

# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun.db.sqlalchemy.models import Task
from nailgun import errors
from nailgun import objects
from nailgun.task import task
from nailgun.test.base import BaseTestCase
from nailgun.utils import reverse
class TestClusterDeletionTask(BaseTestCase):
def create_cluster_and_execute_deletion_task(
self, attributes=None, os=consts.RELEASE_OS.centos):
cluster = self.env.create(
cluster_kwargs={
'editable_attributes': attributes,
},
release_kwargs={
'operating_system': os,
'version': '2025-7.0',
},
)
self.fake_task = Task(name=consts.TASK_NAMES.cluster_deletion,
cluster=cluster)
task.ClusterDeletionTask.execute(self.fake_task)
@mock.patch('nailgun.task.task.DeletionTask', autospec=True)
@mock.patch.object(task.DeleteIBPImagesTask, 'execute')
def test_target_images_deletion_skipped_empty_attributes(
self, mock_img_task, mock_del):
self.create_cluster_and_execute_deletion_task({})
self.assertTrue(mock_del.execute.called)
self.assertFalse(mock_img_task.called)
@mock.patch('nailgun.task.task.DeletionTask', autospec=True)
@mock.patch.object(task.DeleteIBPImagesTask, 'execute')
def test_target_images_deletion_skipped_os_centos(
self, mock_img_task, mock_del):
attributes = {'provision': {
'method': consts.PROVISION_METHODS.image,
}}
self.create_cluster_and_execute_deletion_task(attributes)
self.assertTrue(mock_del.execute.called)
self.assertFalse(mock_img_task.called)
@mock.patch('nailgun.task.task.DeletionTask', autospec=True)
@mock.patch.object(task.DeleteIBPImagesTask, 'execute')
def test_target_images_deletion_skipped_os_ubuntu_cobbler(
self, mock_img_task, mock_del):
os = consts.RELEASE_OS.ubuntu
attributes = {'provision': {
'method': consts.PROVISION_METHODS.cobbler,
}}
self.create_cluster_and_execute_deletion_task(attributes, os)
self.assertTrue(mock_del.execute.called)
self.assertFalse(mock_img_task.called)
@mock.patch('nailgun.task.task.DeletionTask', autospec=True)
@mock.patch.object(task.DeleteIBPImagesTask, 'execute')
def test_target_images_deletion_executed(self, mock_img_task, mock_del):
os = consts.RELEASE_OS.ubuntu
attributes = {'provision': {
'method': consts.PROVISION_METHODS.image,
}}
self.create_cluster_and_execute_deletion_task(attributes, os)
self.assertTrue(mock_del.execute.called)
self.assertTrue(mock_img_task.called)
fake_attrs = objects.Attributes.merged_attrs_values(
self.fake_task.cluster.attributes)
mock_img_task.assert_called_once_with(
mock.ANY, fake_attrs['provision']['image_data'])
class TestDeleteIBPImagesTask(BaseTestCase):
@mock.patch('nailgun.task.task.settings')
@mock.patch('nailgun.task.task.make_astute_message')
def test_message(self, mock_astute, mock_settings):
mock_settings.PROVISIONING_IMAGES_PATH = '/fake/path'
mock_settings.REMOVE_IMAGES_TIMEOUT = 'fake_timeout'
task_mock = mock.Mock()
task_mock.cluster.id = '123'
task_mock.uuid = 'fake_uuid'
fake_image_data = {'/': {'uri': 'http://a.b/fake.img'},
'/boot': {'uri': 'http://c.d/fake-boot.img'}}
task.DeleteIBPImagesTask.message(task_mock, fake_image_data)
rpc_message = mock_astute.call_args[0][3]
rm_cmd = rpc_message['tasks'][0]['parameters'].pop('cmd')
mock_astute.assert_called_once_with(
mock.ANY, 'execute_tasks', 'remove_images_resp', mock.ANY)
self.assertEqual(rpc_message, {
'tasks': [{
'id': None,
'type': 'shell',
'uids': [consts.MASTER_NODE_UID],
'parameters': {
'retries': 3,
'cwd': '/',
'timeout': 'fake_timeout',
'interval': 1}}]})
self.assertTrue(rm_cmd.startswith('rm -f'))
self.assertIn('/fake/path/fake-boot.img', rm_cmd)
self.assertIn('/fake/path/fake.img', rm_cmd)
self.assertIn('/fake/path/fake.yaml', rm_cmd)
class TestHelperUpdateClusterStatus(BaseTestCase):
def setUp(self):
super(TestHelperUpdateClusterStatus, self).setUp()
self.cluster = self.env.create(
nodes_kwargs=[
{'roles': ['controller']},
{'roles': ['compute', 'virt']},
{'roles': ['cinder']}])
def node_should_be_error_with_type(self, node, error_type):
self.assertEqual(node.status, 'error')
self.assertEqual(node.error_type, error_type)
self.assertEqual(node.progress, 0)
def nodes_should_not_be_error(self, nodes):
for node in nodes:
self.assertEqual(node.status, 'discover')
def test_update_nodes_to_error_if_deployment_task_failed(self):
self.cluster.nodes[0].status = 'deploying'
self.cluster.nodes[0].progress = 12
deployment_task = Task(name='deployment', cluster=self.cluster,
status='error')
self.db.add(deployment_task)
self.db.commit()
objects.Task._update_cluster_data(deployment_task)
self.db.flush()
self.assertEqual(self.cluster.status, 'error')
self.assertFalse(self.cluster.is_locked)
self.node_should_be_error_with_type(self.cluster.nodes[0], 'deploy')
self.nodes_should_not_be_error(self.cluster.nodes[1:])
def test_update_cluster_to_error_if_deploy_task_failed(self):
deploy_task = Task(name='deploy', cluster=self.cluster, status='error')
self.db.add(deploy_task)
self.db.commit()
objects.Task._update_cluster_data(deploy_task)
self.db.flush()
self.assertEqual(self.cluster.status, 'error')
self.assertFalse(self.cluster.is_locked)
def test_update_nodes_to_error_if_provision_task_failed(self):
self.cluster.nodes[0].status = 'provisioning'
self.cluster.nodes[0].progress = 12
provision_task = Task(name='provision', cluster=self.cluster,
status='error')
self.db.add(provision_task)
self.db.commit()
objects.Task._update_cluster_data(provision_task)
self.db.flush()
self.assertEqual(self.cluster.status, 'error')
self.assertFalse(self.cluster.is_locked)
self.node_should_be_error_with_type(self.cluster.nodes[0], 'provision')
self.nodes_should_not_be_error(self.cluster.nodes[1:])
def test_update_cluster_to_operational(self):
deploy_task = Task(
name=consts.TASK_NAMES.deployment,
cluster=self.cluster, status=consts.TASK_STATUSES.ready
)
for node in self.env.nodes:
node.status = consts.NODE_STATUSES.ready
self.db.add(deploy_task)
self.db.commit()
objects.Task._update_cluster_data(deploy_task)
self.db.flush()
self.assertEqual(
self.cluster.status, consts.CLUSTER_STATUSES.operational)
self.assertFalse(self.cluster.is_locked)
def test_update_if_parent_task_is_ready_all_nodes_should_be_ready(self):
for node in self.cluster.nodes:
node.status = consts.NODE_STATUSES.ready
node.progress = 100
self.cluster.nodes[0].status = consts.NODE_STATUSES.deploying
self.cluster.nodes[0].progress = 24
deploy_task = Task(
name=consts.TASK_NAMES.deployment,
cluster=self.cluster, status=consts.TASK_STATUSES.ready
)
self.db.add(deploy_task)
self.db.commit()
objects.Task._update_cluster_data(deploy_task)
self.db.flush()
self.assertEqual(
self.cluster.status, consts.CLUSTER_STATUSES.operational)
self.assertFalse(self.cluster.is_locked)
for node in self.cluster.nodes:
self.assertEqual(node.status, consts.NODE_STATUSES.ready)
self.assertEqual(node.progress, 100)
def test_update_cluster_status_if_task_was_already_in_error_status(self):
for node in self.cluster.nodes:
node.status = 'provisioning'
node.progress = 12
provision_task = Task(name='provision', cluster=self.cluster,
status='error')
self.db.add(provision_task)
self.db.commit()
data = {'status': 'error', 'progress': 100}
objects.Task.update(provision_task, data)
self.db.flush()
self.assertEqual(self.cluster.status, 'error')
self.assertEqual(provision_task.status, 'error')
self.assertFalse(self.cluster.is_locked)
for node in self.cluster.nodes:
self.assertEqual(node.status, 'error')
self.assertEqual(node.progress, 0)
def test_do_not_set_cluster_to_error_if_validation_failed(self):
for task_name in ['check_before_deployment', 'check_networks']:
supertask = Task(
name='deploy',
cluster=self.cluster,
status='error')
check_task = Task(
name=task_name,
cluster=self.cluster,
status='error')
supertask.subtasks.append(check_task)
self.db.add(check_task)
self.db.commit()
objects.Task._update_cluster_data(supertask)
self.db.flush()
self.assertEqual(self.cluster.status, 'new')
self.assertFalse(self.cluster.is_locked)
class TestCheckBeforeDeploymentTask(BaseTestCase):
def setUp(self):
super(TestCheckBeforeDeploymentTask, self).setUp()
self.cluster = self.env.create(
release_kwargs={'version': '1111-8.0'},
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'vlan',
'editable_attributes': {
'common': {
'libvirt_type': {
'value': consts.HYPERVISORS.qemu
}
}
}
},
nodes_kwargs=[{'roles': ['controller']}])
self.env.create_node()
self.node = self.env.nodes[0]
self.task = Task(cluster_id=self.cluster.id)
self.env.db.add(self.task)
self.env.db.commit()
def set_node_status(self, status):
self.node.status = status
self.env.db.commit()
self.assertEqual(self.node.status, status)
def set_node_error_type(self, error_type):
self.node.error_type = error_type
self.env.db.commit()
self.assertEqual(self.node.error_type, error_type)
@mock.patch('nailgun.task.task.assignment.NodeAssignmentValidator')
def test_not_yet_provisioned_nodes_roles_are_validated(self, validator):
self.set_node_status(consts.NODE_STATUSES.discover)
task.CheckBeforeDeploymentTask._check_nodes_roles(self.task)
validator.check_roles_for_conflicts.assert_called_once()
validator.check_roles_requirement.assert_called_once()
def test_check_nodes_online_raises_exception(self):
self.node.online = False
self.env.db.commit()
self.assertRaises(
errors.NodeOffline,
task.CheckBeforeDeploymentTask._check_nodes_are_online,
self.task)
def test_check_nodes_online_do_not_raise_exception_node_to_deletion(self):
self.node.online = False
self.node.pending_deletion = True
self.env.db.commit()
task.CheckBeforeDeploymentTask._check_nodes_are_online(self.task)
def find_net_by_name(self, nets, name):
for net in nets['networks']:
if net['name'] == name:
return net
def test_missing_network_group_with_template(self):
net_template = self.env.read_fixtures(['network_template_80'])[0]
objects.Cluster.set_network_template(
self.cluster,
net_template
)
public = [n for n in self.cluster.network_groups
if n.name == consts.NETWORKS.public][0]
self.env._delete_network_group(public.id)
self.assertRaisesRegexp(
errors.NetworkTemplateMissingNetworkGroup,
"The following network groups are missing: public",
task.CheckBeforeDeploymentTask._validate_network_template,
self.task)
def test_missing_node_role_from_template(self):
net_template = self.env.read_fixtures(['network_template_80'])[0]
objects.Cluster.set_network_template(
self.cluster,
net_template
)
cluster_assigned_roles = \
objects.Cluster.get_assigned_roles(self.cluster)
conf_template = self.cluster.network_config.configuration_template
for net_group in six.itervalues(conf_template['adv_net_template']):
template_node_roles = net_group['templates_for_node_role']
for assigned_role in cluster_assigned_roles:
if assigned_role in template_node_roles:
del template_node_roles[assigned_role]
self.assertRaises(
errors.NetworkTemplateMissingRoles,
task.CheckBeforeDeploymentTask._validate_network_template,
self.task
)
def test_missing_network_group_with_template_multi_ng(self):
net_template = self.env.read_fixtures(['network_template_80'])[0]
resp = self.env.create_node_group(name='group-custom-1',
cluster_id=self.cluster.id)
del self.cluster.nodes[0]
ng = objects.NodeGroup.get_by_uid(resp.json_body['id'])
self.env.create_nodes_w_interfaces_count(
1, 5,
roles=['controller'],
cluster_id=self.cluster.id,
group_id=ng.id
)
objects.Cluster.set_network_template(
self.cluster,
net_template
)
public = [n for n in ng.networks
if n.name == consts.NETWORKS.public][0]
self.env._delete_network_group(public.id)
self.assertRaisesRegexp(
errors.NetworkTemplateMissingNetworkGroup,
("The following network groups are missing: public "
".* group-custom-1"),
task.CheckBeforeDeploymentTask._validate_network_template,
self.task)
def test_default_net_data_used_for_checking_absent_node_groups(self):
self.env.create_node_group(api=False, name='new_group',
cluster_id=self.cluster.id)
# template validation should pass without errors
# as the 'default' sub-template must be used for 'new_group'
# (same as for 'default' node group)
self.assertNotRaises(
Exception,
task.CheckBeforeDeploymentTask._validate_network_template,
self.task
)
def test_sriov_is_enabled_with_non_kvm_hypervisor(self):
objects.NIC.update(self.node.nic_interfaces[0], {
'attributes': {
'sriov': {
'enabled': {'value': True},
'numfs': {'value': 2}
}
},
'meta': {
'sriov': {
'available': True,
'totalvfs': 4,
}
}
})
self.assertRaisesRegexp(
errors.InvalidData,
'Only KVM hypervisor works with SRIOV',
task.CheckBeforeDeploymentTask._check_sriov_properties,
self.task,
)
def test_wrong_net_role_for_dpdk(self):
objects.Cluster.set_network_template(
self.cluster,
self.env.read_fixtures(['network_template_90'])[0]
)
conf_template = self.cluster.network_config.configuration_template
template = conf_template['adv_net_template']['default']
network_scheme = template['network_scheme']['private']
network_scheme['roles'] = {'test': 'br-prv'}
self.assertRaisesRegexp(
errors.NetworkCheckError,
'Only neutron/private network role .* with DPDK',
task.CheckBeforeDeploymentTask._validate_network_template,
self.task,
)
def test_wrong_dpdk_endpoints_count(self):
objects.Cluster.set_network_template(
self.cluster,
self.env.read_fixtures(['network_template_90'])[0],
)
conf_template = self.cluster.network_config.configuration_template
template = conf_template['adv_net_template']['default']
network_scheme = template['network_scheme']['private']
network_scheme['transformations'].append({
'action': 'add-port',
'bridge': 'br-derp',
'name': '<% if3 %>.101',
'provider': 'dpdkovs',
})
self.assertRaisesRegexp(
errors.NetworkCheckError,
'dpdkovs provider can be assigned only for one endpoint',
task.CheckBeforeDeploymentTask._validate_network_template,
self.task,
)
def test_dpdk_hugepages_are_not_configured(self):
net_template = self.env.read_fixtures(['network_template_90'])[0]
del self.cluster.nodes[0]
self.env.create_nodes_w_interfaces_count(
1, 6,
roles=['compute'],
cluster_id=self.cluster.id
)
self.node = self.cluster.nodes[0]
objects.Cluster.set_network_template(
self.cluster,
net_template
)
objects.Node.update_attributes(
self.node, {'hugepages': {'dpdk': {'value': 0}}})
objects.NIC.update(self.node.nic_interfaces[0],
{'interface_properties':
{
'dpdk': {'enabled': True,
'available': True},
}})
self.assertRaisesRegexp(
errors.InvalidData,
'Hugepages for DPDK are not configured',
task.CheckBeforeDeploymentTask._check_dpdk_properties,
self.task,
)
def test_nova_hugepages_are_not_configured_with_dpdk_enabled(self):
net_template = self.env.read_fixtures(['network_template_90'])[0]
del self.cluster.nodes[0]
self.env.create_nodes_w_interfaces_count(
1, 6,
roles=['compute'],
cluster_id=self.cluster.id
)
self.node = self.cluster.nodes[0]
objects.Cluster.set_network_template(
self.cluster,
net_template
)
objects.Node.update_attributes(
self.node, {'hugepages': {
'nova': {'value': {'2048': 0}},
'dpdk': {'value': 1},
}})
objects.NIC.update(self.node.nic_interfaces[0],
{'interface_properties':
{
'dpdk': {'enabled': True,
'available': True},
}})
self.assertRaisesRegexp(
errors.InvalidData,
'Hugepages for Nova are not configured',
task.CheckBeforeDeploymentTask._check_dpdk_properties,
self.task,
)
def test_check_public_networks(self):
cluster = self.cluster
self.env.create_nodes(
2, api=True, roles=['controller'], cluster_id=cluster.id)
self.env.create_nodes(
2, api=True, roles=['compute'], cluster_id=cluster.id)
# we have 3 controllers now
self.assertEqual(
sum('controller' in n.all_roles for n in self.env.nodes),
3
)
attrs = cluster.attributes.editable
self.assertEqual(
attrs['public_network_assignment']['assign_to_all_nodes']['value'],
False
)
self.assertFalse(
objects.Cluster.should_assign_public_to_all_nodes(cluster))
resp = self.env.neutron_networks_get(cluster.id)
nets = resp.json_body
# not enough IPs for 3 nodes and 2 VIPs
self.find_net_by_name(nets, 'public')['ip_ranges'] = \
[["172.16.0.2", "172.16.0.5"]]
resp = self.env.neutron_networks_put(cluster.id, nets)
self.assertEqual(resp.status_code, 200)
self.assertRaises(
errors.NetworkCheckError,
task.CheckBeforeDeploymentTask._check_public_network,
self.task)
# enough IPs for 3 nodes and 2 VIPs
self.find_net_by_name(nets, 'public')['ip_ranges'] = \
[["172.16.0.2", "172.16.0.6"]]
resp = self.env.neutron_networks_put(cluster.id, nets)
self.assertEqual(resp.status_code, 200)
self.assertNotRaises(
errors.NetworkCheckError,
task.CheckBeforeDeploymentTask._check_public_network,
self.task)
attrs['public_network_assignment']['assign_to_all_nodes']['value'] = \
True
resp = self.app.patch(
reverse(
'ClusterAttributesHandler',
kwargs={'cluster_id': cluster.id}),
params=jsonutils.dumps({'editable': attrs}),
headers=self.default_headers
)
self.assertEqual(200, resp.status_code)
self.assertTrue(
objects.Cluster.should_assign_public_to_all_nodes(cluster))
self.assertRaises(
errors.NetworkCheckError,
task.CheckBeforeDeploymentTask._check_public_network,
self.task)
class TestDeployTask(BaseTestCase):
def create_deploy_tasks(self):
cluster = self.env.create()
deploy_task = Task(name=consts.TASK_NAMES.deploy,
cluster_id=cluster.id,
status=consts.TASK_STATUSES.pending)
self.db.add(deploy_task)
self.db.flush()
provision_task = Task(name=consts.TASK_NAMES.provision,
status=consts.TASK_STATUSES.pending,
parent_id=deploy_task.id, cluster_id=cluster.id)
self.db.add(provision_task)
deployment_task = Task(name=consts.TASK_NAMES.deployment,
status=consts.TASK_STATUSES.pending,
parent_id=deploy_task.id, cluster_id=cluster.id)
self.db.add(deployment_task)
self.db.flush()
return deploy_task, provision_task, deployment_task
def test_running_status_bubble_for_deploy_task(self):
deploy_task, provision_task, deployment_task = \
self.create_deploy_tasks()
objects.Task.update(provision_task,
{'status': consts.TASK_STATUSES.running})
# Only deploy and provision tasks are running now
self.assertEqual(consts.TASK_STATUSES.running, deploy_task.status)
self.assertEqual(consts.TASK_STATUSES.running, provision_task.status)
self.assertEqual(consts.TASK_STATUSES.pending, deployment_task.status)
def test_error_status_bubble_for_deploy_task(self):
deploy_task, provision_task, deployment_task = \
self.create_deploy_tasks()
objects.Task.update(provision_task,
{'status': consts.TASK_STATUSES.error})
# All tasks have error status
self.assertEqual(consts.TASK_STATUSES.error, deploy_task.status)
self.assertEqual(consts.TASK_STATUSES.error, provision_task.status)
self.assertEqual(consts.TASK_STATUSES.error, deployment_task.status)
def test_ready_status_bubble_for_deploy_task(self):
deploy_task, provision_task, deployment_task = \
self.create_deploy_tasks()
objects.Task.update(provision_task,
{'status': consts.TASK_STATUSES.ready})
# Not all child bugs in ready state
self.assertEqual(consts.TASK_STATUSES.running, deploy_task.status)
self.assertEqual(consts.TASK_STATUSES.ready, provision_task.status)
self.assertEqual(consts.TASK_STATUSES.pending, deployment_task.status)
# All child bugs in ready state
objects.Task.update(deployment_task,
{'status': consts.TASK_STATUSES.ready})
self.assertEqual(consts.TASK_STATUSES.ready, deploy_task.status)
self.assertEqual(consts.TASK_STATUSES.ready, provision_task.status)
self.assertEqual(consts.TASK_STATUSES.ready, deployment_task.status)