Add tests for task idempotency of cluster in HA mode

Related-Bug: #1582715
Closes-Bug: #1583976

Change-Id: Ifba1d7d741b061dc07c997a35e5028126c1d2e26
(cherry picked from commit 660c62afaf)
This commit is contained in:
Sergey Novikov 2016-04-22 18:37:34 +03:00
parent 9d5ee61197
commit 23d49cec11
8 changed files with 528 additions and 27 deletions

View File

@ -2829,14 +2829,17 @@ class FuelWebClient(object):
self.assert_task_success(task, interval=interval, timeout=timeout) self.assert_task_success(task, interval=interval, timeout=timeout)
def execute_task_on_node(self, task_name, node_id, def execute_task_on_node(self, task_name, node_id,
cluster_id, force_exc=False): cluster_id, force_exception=False,
force_execution=True):
"""Execute deployment task against the corresponding node """Execute deployment task against the corresponding node
:param task_name: str, name of a task to execute :param task_name: str, name of a task to execute
:param node_id: int, node ID to execute task on :param node_id: int, node ID to execute task on
:param cluster_id: int, cluster ID :param cluster_id: int, cluster ID
:param force_exc: bool, indication whether exceptions on task :param force_exception: bool, indication whether exceptions on task
execution are ignored execution are ignored
:param force_execution: bool, run particular task on nodes
and do not care if there were changes or not
:return: None :return: None
""" """
try: try:
@ -2845,9 +2848,10 @@ class FuelWebClient(object):
task = self.client.put_deployment_tasks_for_cluster( task = self.client.put_deployment_tasks_for_cluster(
cluster_id=cluster_id, cluster_id=cluster_id,
data=[task_name], data=[task_name],
node_id=node_id) node_id=node_id,
force=force_execution)
self.assert_task_success(task, timeout=30 * 60) self.assert_task_success(task, timeout=30 * 60)
except (AssertionError, TimeoutError): except (AssertionError, TimeoutError):
logger.exception("Failed to run task {!r}".format(task_name)) logger.exception("Failed to run task {!r}".format(task_name))
if force_exc: if force_exception:
raise raise

View File

@ -529,16 +529,21 @@ class NailgunClient(object):
@logwrap @logwrap
@json_parse @json_parse
def put_deployment_tasks_for_cluster(self, cluster_id, data, node_id): def put_deployment_tasks_for_cluster(self, cluster_id, data, node_id,
""" Put task to be executed on the nodes from cluster.: force=False):
Params: """Put task to be executed on the nodes from cluster
cluster_id : Cluster id,
node_id: Node ids where task should be run, can be node_id=1, :param cluster_id: int, cluster id
or node_id =1,2,3, :param data: list, tasks ids
data: tasks ids""" :param node_id: str, Node ids where task should be run,
can be node_id=1, or node_id =1,2,3,
:param force: bool, run particular task on nodes and do not care
if there were changes or not
:return:
"""
return self.client.put( return self.client.put(
'/api/clusters/{0}/deploy_tasks?nodes={1}'.format( '/api/clusters/{0}/deploy_tasks?nodes={1}{2}'.format(
cluster_id, node_id), data) cluster_id, node_id, '&force=1' if force else ''), data)
@logwrap @logwrap
@json_parse @json_parse

View File

@ -22,7 +22,6 @@ import yaml
from fuelweb_test import logger from fuelweb_test import logger
from fuelweb_test.helpers.decorators import log_snapshot_after_test from fuelweb_test.helpers.decorators import log_snapshot_after_test
from fuelweb_test.helpers.ssh_manager import SSHManager from fuelweb_test.helpers.ssh_manager import SSHManager
from fuelweb_test.settings import NEUTRON
from fuelweb_test.settings import DEPLOYMENT_MODE from fuelweb_test.settings import DEPLOYMENT_MODE
from fuelweb_test.settings import NEUTRON_SEGMENT from fuelweb_test.settings import NEUTRON_SEGMENT
from fuelweb_test.tests.base_test_case import SetupEnvironment from fuelweb_test.tests.base_test_case import SetupEnvironment
@ -47,6 +46,7 @@ TASKS_BLACKLIST = [
"netconfig", "netconfig",
"upload_provision_data"] "upload_provision_data"]
SETTINGS_SKIPLIST = ( SETTINGS_SKIPLIST = (
"dns_list", "dns_list",
"ntp_list", "ntp_list",
@ -210,7 +210,19 @@ class LCMTestBasic(TestBasic):
return extra_actual_tasks, extra_fixture_tasks, wrong_types return extra_actual_tasks, extra_fixture_tasks, wrong_types
def check_extra_tasks(self, slave_nodes, deployment, idmp=True): def define_pr_ctrl(self):
"""Define primary controller
:return: dict, node info
"""
devops_pr_controller = self.fuel_web.get_nailgun_primary_node(
self.env.d_env.nodes().slaves[0])
pr_ctrl = self.fuel_web.get_nailgun_node_by_devops_node(
devops_pr_controller)
return pr_ctrl
def check_extra_tasks(self, slave_nodes, deployment, idmp=True, ha=False):
"""Check existing extra tasks regarding to fixture and actual task """Check existing extra tasks regarding to fixture and actual task
or tasks with a wrong type or tasks with a wrong type
@ -218,6 +230,7 @@ class LCMTestBasic(TestBasic):
:param deployment: a string, name of the deployment kind :param deployment: a string, name of the deployment kind
:param idmp: bool, indicates whether idempotency or ensurability :param idmp: bool, indicates whether idempotency or ensurability
fixture is checked fixture is checked
:param ha: bool, indicates ha mode is enabled or disabled
:return: a list with nodes for which extra tasks regarding to fixture :return: a list with nodes for which extra tasks regarding to fixture
and actual task or tasks with a wrong type were found and actual task or tasks with a wrong type were found
""" """
@ -225,8 +238,12 @@ class LCMTestBasic(TestBasic):
'extra_fixture_tasks': {}, 'extra_fixture_tasks': {},
'wrong_types': {}, 'wrong_types': {},
'failed_tasks': {}} 'failed_tasks': {}}
pr_ctrl = self.define_pr_ctrl() if ha else {}
for node in slave_nodes: for node in slave_nodes:
node_roles = self.node_roles(node) node_roles = self.node_roles(node)
if node.get('name') == pr_ctrl.get('name', None):
node_roles = 'primary-' + node_roles
node_ref = "{}_{}".format(node["id"], node_roles) node_ref = "{}_{}".format(node["id"], node_roles)
fixture = self.load_fixture(deployment, node_roles, idmp) fixture = self.load_fixture(deployment, node_roles, idmp)
node_tasks = self.get_nodes_tasks(node["id"]) node_tasks = self.get_nodes_tasks(node["id"])
@ -248,17 +265,21 @@ class LCMTestBasic(TestBasic):
if failed_tasks] if failed_tasks]
return failed_nodes return failed_nodes
def generate_fixture(self, node_refs, cluster_id, slave_nodes): def generate_fixture(self, node_refs, cluster_id, slave_nodes, ha=False):
"""Generate fixture with description of task idempotency """Generate fixture with description of task idempotency
:param node_refs: a string, refs to nailgun node :param node_refs: a string, refs to nailgun node
:param cluster_id: an integer, number of cluster id :param cluster_id: an integer, number of cluster id
:param slave_nodes: a list of nailgun nodes :param slave_nodes: a list of nailgun nodes
:param ha: bool, indicates ha mode is enabled or disabled
:return: None :return: None
""" """
result = {} result = {}
pr_ctrl = self.define_pr_ctrl() if ha else {}
for node in slave_nodes: for node in slave_nodes:
node_roles = self.node_roles(node) node_roles = self.node_roles(node)
if node.get('name') == pr_ctrl.get('name', None):
node_roles = 'primary-' + node_roles
node_ref = "{}_{}".format(node["id"], node_roles) node_ref = "{}_{}".format(node["id"], node_roles)
if node_ref not in node_refs: if node_ref not in node_refs:
logger.debug('Node {!r} was skipped because the current ' logger.debug('Node {!r} was skipped because the current '
@ -544,8 +565,8 @@ class SetupLCMEnvironment(LCMTestBasic):
5. Add 1 cinder node 5. Add 1 cinder node
6. Deploy cluster 6. Deploy cluster
7. Check extra deployment tasks 7. Check extra deployment tasks
8. Generate fixtures
Duration 180m
Snapshot: "lcm_deploy_1_ctrl_1_cmp_1_cinder" Snapshot: "lcm_deploy_1_ctrl_1_cmp_1_cinder"
""" """
deployment = '1_ctrl_1_cmp_1_cinder' deployment = '1_ctrl_1_cmp_1_cinder'
@ -560,7 +581,6 @@ class SetupLCMEnvironment(LCMTestBasic):
name=self.__class__.__name__, name=self.__class__.__name__,
mode=DEPLOYMENT_MODE, mode=DEPLOYMENT_MODE,
settings={ settings={
"net_provider": NEUTRON,
"net_segment_type": segment_type "net_segment_type": segment_type
} }
) )
@ -582,7 +602,7 @@ class SetupLCMEnvironment(LCMTestBasic):
slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
node_refs = self.check_extra_tasks(slave_nodes, deployment) node_refs = self.check_extra_tasks(slave_nodes, deployment)
if node_refs: if node_refs:
self.show_step(8) logger.info('Generating a new fixture . . .')
self.generate_fixture(node_refs, cluster_id, slave_nodes) self.generate_fixture(node_refs, cluster_id, slave_nodes)
msg = ('Please update idempotency fixtures in the repo ' msg = ('Please update idempotency fixtures in the repo '
'according to generated fixtures') 'according to generated fixtures')
@ -603,8 +623,8 @@ class SetupLCMEnvironment(LCMTestBasic):
5. Add 1 mongo node 5. Add 1 mongo node
6. Deploy cluster 6. Deploy cluster
7. Check extra deployment tasks 7. Check extra deployment tasks
8. Generate fixtures
Duration 180m
Snapshot: "lcm_deploy_1_ctrl_1_cmp_1_mongo" Snapshot: "lcm_deploy_1_ctrl_1_cmp_1_mongo"
""" """
deployment = '1_ctrl_1_cmp_1_mongo' deployment = '1_ctrl_1_cmp_1_mongo'
@ -620,7 +640,6 @@ class SetupLCMEnvironment(LCMTestBasic):
mode=DEPLOYMENT_MODE, mode=DEPLOYMENT_MODE,
settings={ settings={
'ceilometer': True, 'ceilometer': True,
'net_provider': NEUTRON,
'net_segment_type': segment_type 'net_segment_type': segment_type
} }
) )
@ -642,7 +661,7 @@ class SetupLCMEnvironment(LCMTestBasic):
slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
node_refs = self.check_extra_tasks(slave_nodes, deployment) node_refs = self.check_extra_tasks(slave_nodes, deployment)
if node_refs: if node_refs:
self.show_step(8) logger.info('Generating a new fixture . . .')
self.generate_fixture(node_refs, cluster_id, slave_nodes) self.generate_fixture(node_refs, cluster_id, slave_nodes)
msg = ('Please update idempotency fixtures in the repo ' msg = ('Please update idempotency fixtures in the repo '
'according to generated fixtures') 'according to generated fixtures')
@ -663,8 +682,8 @@ class SetupLCMEnvironment(LCMTestBasic):
5. Add 3 ceph-osd nodes 5. Add 3 ceph-osd nodes
6. Deploy cluster 6. Deploy cluster
7. Check extra deployment tasks 7. Check extra deployment tasks
8. Generate fixtures
Duration 240m
Snapshot: "lcm_deploy_1_ctrl_1_cmp_3_ceph" Snapshot: "lcm_deploy_1_ctrl_1_cmp_3_ceph"
""" """
deployment = '1_ctrl_1_cmp_3_ceph' deployment = '1_ctrl_1_cmp_3_ceph'
@ -683,7 +702,6 @@ class SetupLCMEnvironment(LCMTestBasic):
'volumes_ceph': True, 'volumes_ceph': True,
'images_ceph': True, 'images_ceph': True,
'objects_ceph': True, 'objects_ceph': True,
'net_provider': NEUTRON,
'net_segment_type': segment_type 'net_segment_type': segment_type
} }
) )
@ -707,9 +725,74 @@ class SetupLCMEnvironment(LCMTestBasic):
slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id) slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
node_refs = self.check_extra_tasks(slave_nodes, deployment) node_refs = self.check_extra_tasks(slave_nodes, deployment)
if node_refs: if node_refs:
self.show_step(8) logger.info('Generating a new fixture . . .')
self.generate_fixture(node_refs, cluster_id, slave_nodes) self.generate_fixture(node_refs, cluster_id, slave_nodes)
msg = ('Please update idempotency fixtures in the repo ' msg = ('Please update idempotency fixtures in the repo '
'according to generated fixtures') 'according to generated fixtures')
raise DeprecatedFixture(msg) raise DeprecatedFixture(msg)
self.env.make_snapshot(snapshotname, is_make=True) self.env.make_snapshot(snapshotname, is_make=True)
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
groups=['lcm_deploy_3_ctrl_3_cmp_ceph_sahara'])
@log_snapshot_after_test
def lcm_deploy_3_ctrl_3_cmp_ceph_sahara(self):
"""Create cluster with Sahara, Ceilometer, Ceph in HA mode
Scenario:
1. Revert snapshot "ready_with_9_slaves"
2. Create cluster
3. Add 3 controllers with mongo role
4. Add 3 compute node with ceph-osd role
5. Deploy cluster
6. Check extra deployment tasks
Duration 240m
Snapshot: "lcm_deploy_3_ctrl_3_cmp_ceph_sahara"
"""
deployment = '3_ctrl_3_cmp_ceph_sahara'
snapshotname = 'lcm_deploy_{}'.format(deployment)
self.check_run(snapshotname)
self.show_step(1)
self.env.revert_snapshot("ready_with_9_slaves")
self.show_step(2)
segment_type = NEUTRON_SEGMENT['tun']
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE,
settings={
'ceilometer': True,
"sahara": True,
'volumes_lvm': False,
'volumes_ceph': True,
'images_ceph': True,
'objects_ceph': True,
"net_segment_type": segment_type
}
)
self.show_step(3)
self.show_step(4)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller', 'mongo'],
'slave-02': ['controller', 'mongo'],
'slave-03': ['controller', 'mongo'],
'slave-04': ['compute', 'ceph-osd'],
'slave-05': ['compute', 'ceph-osd'],
'slave-06': ['compute', 'ceph-osd']
}
)
self.show_step(5)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.show_step(6)
slave_nodes = self.fuel_web.client.list_cluster_nodes(cluster_id)
node_refs = self.check_extra_tasks(slave_nodes, deployment, ha=True)
if node_refs:
logger.info('Generating a new fixture . . .')
self.generate_fixture(node_refs, cluster_id, slave_nodes, ha=True)
msg = ('Please update idempotency fixtures in the repo '
'according to generated fixtures')
raise DeprecatedFixture(msg)
self.env.make_snapshot(snapshotname, is_make=True)

View File

@ -31,7 +31,9 @@ tasks:
- plugins_setup_repositories: null - plugins_setup_repositories: null
- ceph-compute: null - ceph-compute: null
- ssl-keys-saving: null - ssl-keys-saving: null
- sriov_iommu_check: null - sriov_iommu_check:
skip:
- Exec[sriov_iommu_check]
- openstack-network-end: - openstack-network-end:
type: skipped type: skipped
- ceilometer-compute: - ceilometer-compute:

View File

@ -0,0 +1,67 @@
roles:
compute
tasks:
- update_hosts: null
- openstack-network-start:
type: skipped
- openstack-network-common-config: null
- clear_nodes_info:
type: skipped
- openstack-network-agents-sriov: null
- override_configuration: null
- globals: null
- fuel_pkgs: null
- openstack-network-agents-l3: null
- openstack-network-agents-metadata: null
- tools: null
- rsync_core_puppet:
type: sync
- enable_nova_compute_service: null
- cgroups: null
- upload_nodes_info:
type: skipped
- copy_keys:
type: copy_files
- copy_deleted_nodes:
type: copy_files
- setup_repositories: null
- dns-client: null
- openstack-network-plugins-l2: null
- allocate_hugepages: null
- plugins_setup_repositories:
no_puppet_run: true
- updatedb: null
- ceph-compute: null
- ssl-keys-saving: null
- sriov_iommu_check:
skip:
- Exec[sriov_iommu_check]
- openstack-network-end:
type: skipped
- copy_keys_ceph:
type: copy_files
- upload_configuration:
type: upload_file
- firewall: null
- top-role-ceph-osd: null
- logging: null
- top-role-compute:
skip:
- Notify[Module openstack_tasks cannot notify service nova-compute on packages
update]
- Service[nova-compute]
- sync_time:
type: shell
- openstack-network-compute-nova: null
- plugins_rsync:
no_puppet_run: true
- connectivity_tests: null
- configuration_symlink:
type: shell
- ceilometer-compute: null
- hosts: null
- copy_haproxy_keys:
type: copy_files
- ntp-client: null
- ssl-add-trust-chain: null
- reserved_ports: null

View File

@ -0,0 +1,130 @@
roles:
controller
tasks:
- openstack-haproxy-mysqld: null
- openstack-network-agents-l3: null
- dump_rabbitmq_definitions: null
- rsync_core_puppet:
type: sync
- ssl-dns-setup: null
- ceilometer-controller: null
- override_configuration: null
- ceph_create_pools: null
- virtual_ips: null
- openstack-haproxy-murano:
no_puppet_run: true
- openstack-network-end:
type: skipped
- openstack-haproxy-radosgw: null
- openstack-haproxy-swift: null
- openstack-haproxy-neutron: null
- updatedb: null
- plugins_rsync:
no_puppet_run: true
- openstack-controller: null
- rabbitmq: null
- openstack-haproxy-keystone: null
- hosts: null
- ntp-client: null
- reserved_ports: null
- controller_remaining_tasks: null
- openstack-haproxy-aodh: null
- murano-cfapi:
no_puppet_run: true
- vmware-vcenter:
no_puppet_run: true
- ceilometer-radosgw-user: null
- ironic-compute:
no_puppet_run: true
- dns-server: null
- swift-proxy_storage:
no_puppet_run: true
- copy_keys:
type: copy_files
- enable_rados: null
- ntp-check: null
- keystone: null
- disable_keystone_service_token:
no_puppet_run: true
- umm: null
- ceph-mon: null
- memcached: null
- allocate_hugepages: null
- openrc-delete:
skip:
- File[/root/openrc]
- plugins_setup_repositories:
no_puppet_run: true
- ceph-radosgw: null
- openstack-haproxy-sahara: null
- ssl-keys-saving: null
- apache: null
- upload_configuration:
type: upload_file
- logging: null
- update_hosts: null
- connectivity_tests: null
- openstack-network-agents-metadata: null
- conntrackd: null
- horizon: null
- openstack-haproxy-ceilometer: null
- openstack-network-common-config: null
- firewall: null
- cluster-haproxy: null
- globals: null
- glance: null
- tools: null
- openstack-haproxy: null
- cgroups: null
- aodh: null
- copy_deleted_nodes:
type: copy_files
- openstack-haproxy-ironic:
no_puppet_run: true
- setup_repositories: null
- swift-rebalance-cron:
no_puppet_run: true
- heat: null
- openstack-haproxy-stats: null
- ironic-api:
no_puppet_run: true
- top-role-mongo: null
- dns-client: null
- cluster-vrouter: null
- murano-rabbitmq:
no_puppet_run: true
- api-proxy: null
- cluster_health: null
- openstack-haproxy-horizon: null
- openstack-network-start:
type: skipped
- clear_nodes_info:
type: skipped
- copy_keys_ceph:
type: copy_files
- cluster: null
- sahara: null
- fuel_pkgs: null
- public_vip_ping: null
- upload_nodes_info:
type: skipped
- openstack-haproxy-glance: null
- murano:
no_puppet_run: true
- openstack-network-plugins-l2: null
- openstack-network-agents-dhcp: null
- openstack-haproxy-nova: null
- openstack-network-server-config: null
- openstack-haproxy-cinder: null
- ntp-server: null
- openstack-haproxy-heat: null
- openstack-cinder: null
- sync_time:
type: shell
- database: null
- configuration_symlink:
type: shell
- openstack-network-server-nova: null
- copy_haproxy_keys:
type: copy_files
- ssl-add-trust-chain: null

View File

@ -0,0 +1,183 @@
roles:
controller
tasks:
- ironic_post_swift_key:
type: shell
- openstack-haproxy-mysqld: null
- top-role-primary-mongo: null
- cinder-db: null
- dump_rabbitmq_definitions: null
- rsync_core_puppet:
type: sync
- ssl-dns-setup: null
- ceilometer-controller: null
- override_configuration: null
- ceilometer-keystone: null
- nova-db: null
- workloads_collector_add: null
- primary-openstack-network-plugins-l2: null
- radosgw-keystone: null
- virtual_ips: null
- primary-dns-server: null
- openstack-haproxy-murano:
no_puppet_run: true
- openstack-network-end:
type: skipped
- openstack-haproxy-radosgw: null
- openstack-haproxy-swift: null
- heat-db: null
- openstack-haproxy-neutron: null
- updatedb: null
- ironic-db:
no_puppet_run: true
- plugins_rsync:
no_puppet_run: true
- ceilometer-radosgw-user: null
- openstack-haproxy-keystone: null
- hosts: null
- primary-rabbitmq: null
- primary-cluster-haproxy: null
- openstack-network-routers: null
- reserved_ports: null
- controller_remaining_tasks: null
- glance-keystone: null
- openstack-haproxy-aodh: null
- murano-cfapi:
no_puppet_run: true
- vmware-vcenter:
no_puppet_run: true
- ironic-compute:
no_puppet_run: true
- primary-openstack-network-agents-metadata: null
- cinder-keystone: null
- copy_keys:
type: copy_files
- enable_rados: null
- ntp-check: null
- aodh-db: null
- disable_keystone_service_token:
no_puppet_run: true
- umm: null
- memcached: null
- allocate_hugepages: null
- openrc-delete:
skip:
- File[/root/openrc]
- plugins_setup_repositories:
no_puppet_run: true
- sahara-keystone: null
- openstack-haproxy-sahara: null
- ssl-keys-saving: null
- primary-cluster: null
- upload_cirros:
type: shell
- primary-keystone:
skip:
- File[/root/openrc]
- apache: null
- upload_configuration:
type: upload_file
- create-cinder-types: null
- neutron-keystone: null
- logging: null
- ntp-client: null
- nova-keystone: null
- update_hosts: null
- ironic-keystone:
no_puppet_run: true
- connectivity_tests: null
- primary-heat: null
- conntrackd: null
- sahara-db: null
- horizon: null
- openstack-haproxy-ceilometer: null
- openstack-network-common-config: null
- firewall: null
- primary-openstack-network-agents-l3: null
- globals: null
- aodh-keystone: null
- glance: null
- tools: null
- openstack-haproxy: null
- cgroups: null
- murano-cfapi-keystone:
no_puppet_run: true
- aodh: null
- copy_deleted_nodes:
type: copy_files
- openstack-haproxy-ironic:
no_puppet_run: true
- setup_repositories: null
- openstack-network-routers-ha:
no_puppet_run: true
- glance-db: null
- neutron-db: null
- ironic_upload_images:
type: shell
- swift-rebalance-cron:
no_puppet_run: true
- primary-ceph-mon: null
- openstack-haproxy-stats: null
- ironic-api:
no_puppet_run: true
- primary-ceph-radosgw: null
- dns-client: null
- cluster-vrouter: null
- murano-rabbitmq:
no_puppet_run: true
- api-proxy: null
- cluster_health: null
- heat-keystone: null
- openstack-haproxy-horizon: null
- openstack-network-start:
type: skipped
- clear_nodes_info:
type: skipped
- murano-db:
no_puppet_run: true
- copy_keys_ceph:
type: copy_files
- sahara: null
- fuel_pkgs: null
- swift-keystone:
no_puppet_run: true
- public_vip_ping: null
- upload_nodes_info:
type: skipped
- openstack-haproxy-glance: null
- murano:
no_puppet_run: true
- ceph_ready_check:
type: shell
- enable_quorum:
type: shell
- openstack-haproxy-nova: null
- openstack-network-server-config: null
- primary-database:
skip:
- File[/root/.my.cnf]
- vcenter_compute_zones_create:
type: shell
- openstack-haproxy-cinder: null
- ntp-server: null
- murano-keystone:
no_puppet_run: true
- swift-proxy_storage:
no_puppet_run: true
- primary-openstack-network-agents-dhcp: null
- openstack-haproxy-heat: null
- primary-openstack-controller: null
- openstack-cinder: null
- ceph_create_pools: null
- keystone-db:
skip:
- File[/root/.my.cnf]
- sync_time:
type: shell
- configuration_symlink:
type: shell
- openstack-network-server-nova: null
- copy_haproxy_keys:
type: copy_files
- openstack-network-networks: null
- ssl-add-trust-chain: null

View File

@ -39,9 +39,13 @@ class TaskIdempotency(LCMTestBasic):
result = {'tasks_idempotency': {}, result = {'tasks_idempotency': {},
'timeouterror_tasks': {}} 'timeouterror_tasks': {}}
pr_ctrl = (self.define_pr_ctrl()
if deployment == '3_ctrl_3_cmp_ceph_sahara'
else {})
for node in slave_nodes: for node in slave_nodes:
node_roles = "_".join(sorted(node["roles"])) node_roles = "_".join(sorted(node["roles"]))
if node.get('name') == pr_ctrl.get('name', None):
node_roles = 'primary-' + node_roles
node_ref = "{}_{}".format(node["id"], node_roles) node_ref = "{}_{}".format(node["id"], node_roles)
fixture = self.load_fixture(deployment, node_roles) fixture = self.load_fixture(deployment, node_roles)
@ -161,3 +165,26 @@ class TaskIdempotency(LCMTestBasic):
'There are non-idempotent tasks. ' 'There are non-idempotent tasks. '
'Please take a look at the output above!') 'Please take a look at the output above!')
self.env.make_snapshot('idempotency_{}'.format(deployment)) self.env.make_snapshot('idempotency_{}'.format(deployment))
@test(depends_on=[SetupLCMEnvironment.lcm_deploy_3_ctrl_3_cmp_ceph_sahara],
groups=['idempotency',
'idempotency_3_ctrl_3_cmp_ceph_sahara'])
@log_snapshot_after_test
def idempotency_3_ctrl_3_cmp_ceph_sahara(self):
"""Test idempotency for cluster with Sahara, Ceilometer,
Ceph in HA mode
Scenario:
1. Revert snapshot "lcm_deploy_3_ctrl_3_cmp_ceph_sahara"
2. Check task idempotency
Snapshot: "idempotency_3_ctrl_3_cmp_ceph_sahara"
"""
self.show_step(1)
deployment = "3_ctrl_3_cmp_ceph_sahara"
self.env.revert_snapshot('lcm_deploy_{}'.format(deployment))
self.show_step(2)
asserts.assert_true(self.check_idempotency(deployment),
'There are non-idempotent tasks. '
'Please take a look at the output above!')
self.env.make_snapshot('idempotency_{}'.format(deployment))