Retire the Congress Tempest Plugin project

Recently the TC has worked on determining the criteria for when an
OpenStack project should be retired.  When there was not a PTL nominee
for the Congress project, that triggered the TC to review the project
health per [1], and the TC has determined [2] that development work on
the project has ceased.  This decision was announced in the
openstack-discuss mailing list in April 2020 [3].

This commit retires the repository per the process for governance
removal in the Victoria cycle as specified in the Mandatory Repository
Retirement resolution [4] and detailed in the infra manual [5].

Should interest in developing Congress as part of OpenStack revive,
please revert this commit to have the project rejoin the list of active
projects.

The community wishes to express our thanks and appreciation to all of
those who have contributed to the Congress project over the years.

[1] https://governance.openstack.org/tc/reference/dropping-projects.html
[2] http://eavesdrop.openstack.org/irclogs/%23openstack-tc/latest.log.html#t2020-04-20T15:36:59
[3] http://lists.openstack.org/pipermail/openstack-discuss/2020-April/014292.html
[4] https://governance.openstack.org/tc/resolutions/20190711-mandatory-repository-retirement.html
[5] https://docs.opendev.org/opendev/infra-manual/latest/drivers.html#retiring-a-project

Change-Id: I74824174861c394e75b8c733ae9bbc3c9b410268
This commit is contained in:
Nate Johnston 2020-04-21 17:48:51 -04:00
parent f924c6cd98
commit 3d3098c893
89 changed files with 11 additions and 7615 deletions

59
.gitignore vendored
View File

@ -1,59 +0,0 @@
*.py[cod]
# C extensions
*.so
# Packages
*.egg*
*.egg-info
dist
build
eggs
parts
bin
var
sdist
develop-eggs
.installed.cfg
lib
lib64
# Installer logs
pip-log.txt
# Unit test / coverage reports
cover/
.coverage*
!.coveragerc
.tox
nosetests.xml
.testrepository
.stestr
.venv
# Translations
*.mo
# Mr Developer
.mr.developer.cfg
.project
.pydevproject
# Complexity
output/*.html
output/*/index.html
# Sphinx
doc/build
# pbr generates these
AUTHORS
ChangeLog
# Editors
*~
.*.swp
.*sw?
# Files created by releasenotes build
releasenotes/build

View File

@ -1,3 +0,0 @@
[DEFAULT]
test_path=./congress_tempest_plugin/tests
top_dir=./

View File

@ -1,156 +0,0 @@
- job:
name: congress-tempest-py3-mysql-train
parent: congress-tempest-py3
override-checkout: stable/train
- job:
name: congress-tempest-py3-mysql-stein
parent: congress-tempest-py3
override-checkout: stable/stein
#### Legacy jobs no longer used in stein; carried here for testing Q and R ####
- job:
name: congress-legacy-devstack-api-base
parent: legacy-dsvm-base
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^congress/tests/.*$
- ^releasenotes/.*$
required-projects:
- openstack/devstack-gate
- openstack/aodh
- openstack/python-aodhclient
- openstack/congress
- openstack/congress-dashboard
- openstack/congress-tempest-plugin
- openstack/python-congressclient
- openstack/murano
- openstack/murano-dashboard
- openstack/python-muranoclient
- openstack/mistral
- openstack/python-mistralclient
- openstack/mistral-tempest-plugin
- openstack/murano-tempest-plugin
- openstack/telemetry-tempest-plugin
- openstack/heat-tempest-plugin
- openstack/monasca-agent
- openstack/monasca-api
- openstack/monasca-common
- openstack/monasca-grafana-datasource
- openstack/monasca-notification
- openstack/monasca-persister
- openstack/monasca-statsd
- openstack/monasca-thresh
- openstack/monasca-ui
- openstack/python-monascaclient
- openstack/monasca-tempest-plugin
run: playbooks/legacy/congress-devstack-api-base/run.yaml
post-run: playbooks/legacy/congress-devstack-api-base/post.yaml
timeout: 6000
- job:
name: congress-legacy-pe-replicated-base
parent: legacy-dsvm-base
vars:
database: mysql
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^congress/tests/.*$
- ^releasenotes/.*$
required-projects:
- openstack/devstack-gate
- openstack/aodh
- openstack/python-aodhclient
- openstack/congress
- openstack/congress-dashboard
- openstack/congress-tempest-plugin
- openstack/python-congressclient
- openstack/murano
- openstack/murano-dashboard
- openstack/python-muranoclient
- openstack/mistral
- openstack/python-mistralclient
- openstack/mistral-tempest-plugin
- openstack/murano-tempest-plugin
- openstack/telemetry-tempest-plugin
- openstack/heat-tempest-plugin
run: playbooks/legacy/congress-legacy-pe-replicated-base/run.yaml
post-run: playbooks/legacy/congress-legacy-pe-replicated-base/post.yaml
timeout: 6000
- job:
name: congress-legacy-devstack-api-postgresql
parent: congress-legacy-devstack-api-base
voting: false
vars:
database: postgresql
- job:
name: congress-legacy-devstack-py35-api-mysql
parent: legacy-dsvm-base
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^congress/tests/.*$
- ^releasenotes/.*$
required-projects:
- openstack/devstack-gate
- openstack/congress
- openstack/congress-tempest-plugin
- openstack/congress-dashboard
- openstack/python-congressclient
- openstack/murano
- openstack/murano-dashboard
- openstack/python-muranoclient
- openstack/murano-tempest-plugin
- openstack/heat-tempest-plugin
run: playbooks/legacy/congress-devstack-py35-api-mysql/run.yaml
post-run: playbooks/legacy/congress-devstack-py35-api-mysql/post.yaml
timeout: 6000
- job:
name: congress-legacy-pe-replicated-mysql
parent: congress-legacy-pe-replicated-base
voting: false
- job:
name: congress-legacy-pe-replicated-postgresql
parent: congress-legacy-pe-replicated-base
voting: false
vars:
database: postgresql
- job:
name: congress-legacy-devstack-api-py35-mysql-rocky
parent: congress-legacy-devstack-py35-api-mysql
run: playbooks/legacy/congress-devstack-api-py35-rocky/run.yaml
post-run: playbooks/legacy/congress-devstack-api-py35-rocky/post.yaml
nodeset: openstack-single-node-xenial
- job:
name: congress-legacy-devstack-api-py35-mysql-queens
parent: congress-legacy-devstack-py35-api-mysql
run: playbooks/legacy/congress-devstack-api-py35-queens/run.yaml
post-run: playbooks/legacy/congress-devstack-api-py35-queens/post.yaml
nodeset: openstack-single-node-xenial
- project:
templates:
- check-requirements
- tempest-plugin-jobs
- publish-openstack-docs-pti
- openstack-python3-ussuri-jobs
check:
jobs:
- congress-tempest-py3-mysql
- congress-tempest-replicated-postgresql
- congress-tempest-py3-mysql-train
- congress-tempest-py3-mysql-stein
- congress-legacy-devstack-api-py35-mysql-rocky
- congress-legacy-devstack-api-py35-mysql-queens
gate:
queue: congress
jobs:
- congress-tempest-py3-mysql

View File

@ -1,17 +0,0 @@
If you would like to contribute to the development of OpenStack, you must
follow the steps in this page:
https://docs.openstack.org/infra/manual/developers.html
If you already have a good understanding of how the system works and your
OpenStack accounts are set up, you can skip to the development workflow
section of this documentation to learn how changes to OpenStack should be
submitted for review via the Gerrit tool:
https://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/congress

View File

@ -1,4 +0,0 @@
congress-tempest-plugin Style Commandments
==========================================
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/

176
LICENSE
View File

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

View File

@ -1,28 +1,14 @@
=======================
congress-tempest-plugin
=======================
==================================
Welcome to Congress Tempest Plugin
==================================
Tempest Plugin for Congress project.
This project covers Tempest tests for Congress project.
This project is no longer maintained.
* Free software: Apache license
* Documentation: https://docs.openstack.org/congress-tempest-plugin/latest
* Release notes: https://docs.openstack.org/releasenotes/congress/
* Source: https://opendev.org/openstack/congress-tempest-plugin
* Bugs: https://bugs.launchpad.net/congress
Tempest Integration
-------------------
To list all Congress tempest cases, go to tempest directory, then run::
$ testr list-tests congress
To run congress tempest plugin tests using tox, go to tempest directory, then run::
$ tox -eall-plugin congress
And, to run a specific test::
$ tox -eall-plugin congress_tempest_plugin.tests.scenario.test_congress_basic_ops.TestPolicyBasicOps.test_policy_basic_op
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
Freenode.

View File

View File

@ -1,2 +0,0 @@
[python: **.py]

View File

@ -1,69 +0,0 @@
# Copyright 2015 Intel Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from tempest import config # noqa
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt('congress',
default=True,
help="Whether or not Congress is expected to be available"),
]
congress_feature_group = cfg.OptGroup(name="congress-feature-enabled",
title="Congress Feature Flags")
CongressFeatureGroup = [
cfg.BoolOpt('monasca_webhook',
default=False,
help="monasca_webhook feature available"),
cfg.BoolOpt('monasca_webhook_rocky',
default=False,
help="monasca_webhook uses Rocky schema"),
cfg.BoolOpt('vitrage_webhook',
default=False,
help="vitrage_webhook feature available"),
cfg.BoolOpt('nova_driver',
default=True,
help="nova driver feature available"),
cfg.BoolOpt('nova_servers_addresses_table',
default=True,
help="nova driver servers.addresses table available"),
]
congressha_group = cfg.OptGroup(name="congressha", title="Congress HA Options")
CongressHAGroup = [
cfg.StrOpt("replica_type",
default="policyha",
help="service type used to create a replica congress server."),
cfg.IntOpt("replica_port",
default=4001,
help="The listening port for a replica congress server. "),
]
congressz3_group = cfg.OptGroup(name="congressz3", title="Congress Z3 Options")
CongressZ3Group = [
cfg.BoolOpt('enabled',
default=False,
help="Whether Z3 is installed or not for Congress"),
cfg.BoolOpt('support_builtins',
default=True,
help="builtins supported by Z3 engine"),
]

View File

@ -1,54 +0,0 @@
# Copyright 2015 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.test_discover import plugins
from congress_tempest_plugin import config as config_congress
class CongressTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(__file__)))[0]
test_dir = "congress_tempest_plugin/tests"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
config.register_opt_group(conf,
config_congress.service_available_group,
config_congress.ServiceAvailableGroup)
config.register_opt_group(conf, config_congress.congress_feature_group,
config_congress.CongressFeatureGroup)
config.register_opt_group(conf, config_congress.congressha_group,
config_congress.CongressHAGroup)
config.register_opt_group(conf, config_congress.congressz3_group,
config_congress.CongressZ3Group)
def get_opt_lists(self):
return [
(config_congress.congressha_group.name,
config_congress.CongressHAGroup),
(config_congress.congressz3_group.name,
config_congress.CongressZ3Group),
(config_congress.service_available_group.name,
config_congress.ServiceAvailableGroup),
(config_congress.congress_feature_group.name,
config_congress.CongressFeatureGroup),
]

View File

@ -1,68 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.network import base
class QosPoliciesClient(base.BaseNetworkClient):
def create_qos_policy(self, **kwargs):
"""Creates an OpenStack Networking qos_policy.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies'
post_data = {'policy': kwargs}
return self.create_resource(uri, post_data)
def update_security_group(self, qos_policy_id, **kwargs):
"""Updates a qos policy.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s' % qos_policy_id
post_data = {'policy': kwargs}
return self.update_resource(uri, post_data)
def show_qos_policy(self, qos_policy_id, **fields):
"""Shows details for a qos policy.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s' % qos_policy_id
return self.show_resource(uri, **fields)
def delete_qos_policy(self, qos_policy_id):
"""Deletes an OpenStack Networking qos policy.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s' % qos_policy_id
return self.delete_resource(uri)
def list_qos_policy(self, **filters):
"""Lists OpenStack Networking qos policies.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies'
return self.list_resources(uri, **filters)

View File

@ -1,73 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.network import base
class QosRuleClient(base.BaseNetworkClient):
def create_qos_rule(self, qos_policy_id, qos_rule_type, **kwargs):
"""Creates an OpenStack Networking qos rule.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s/%s' % (qos_policy_id, qos_rule_type)
post_data = {qos_rule_type[:-1]: kwargs}
return self.create_resource(uri, post_data)
def update_qos_rule(self, qos_policy_id, qos_rule_type,
qos_rule_id, **kwargs):
"""Updates a qos rule policy.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s/%s/%s' % (qos_policy_id,
qos_rule_type, qos_rule_id)
post_data = {'bandwidth_limit_rules': kwargs}
return self.update_resource(uri, post_data)
def show_qos_rule(self, qos_policy_id, qos_rule_type,
qos_rule_id, **fields):
"""Shows details for a qos rule policy.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s/%s/%s' % (qos_policy_id,
qos_rule_type, qos_rule_id)
return self.show_resource(uri, **fields)
def delete_qos_rule(self, qos_policy_id, qos_rule_type, qos_rule_id):
"""Deletes an OpenStack Networking qos rule policy.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s/%s/%s' % (qos_policy_id,
qos_rule_type, qos_rule_id)
return self.delete_resource(uri)
def list_qos_rule(self, qos_policy_id, **filters):
"""Lists OpenStack Networking qos rule policies.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/network/v2/index.html#quality-of-service
"""
uri = '/qos/policies/%s' % (qos_policy_id)
return self.list_resources(uri, **filters)

View File

@ -1,222 +0,0 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class PolicyClient(rest_client.RestClient):
policy = '/v1/policies'
policy_path = '/v1/policies/%s'
policy_rules = '/v1/policies/%s/rules'
policy_rules_path = '/v1/policies/%s/rules/%s'
policy_tables = '/v1/policies/%s/tables'
policy_table_path = '/v1/policies/%s/tables/%s'
policy_rows = '/v1/policies/%s/tables/%s/rows'
policy_rows_trace = '/v1/policies/%s/tables/%s/rows?trace=True'
policies = '/v1/policies'
policies_status = '/v1/policies/%s/status'
policy_action = '/v1/policies/%s?%s'
library_policy = '/v1/librarypolicies'
library_policy_path = '/v1/librarypolicies/%s'
library_policies = '/v1/librarypolicies'
datasources = '/v1/data-sources'
datasource_path = '/v1/data-sources/%s'
datasource_tables = '/v1/data-sources/%s/tables'
datasource_table_path = '/v1/data-sources/%s/tables/%s'
datasource_status = '/v1/data-sources/%s/status'
datasource_schema = '/v1/data-sources/%s/schema'
datasource_webhook = '/v1/data-sources/%s/webhook'
datasource_table_schema = '/v1/data-sources/%s/tables/%s/spec'
datasource_rows = '/v1/data-sources/%s/tables/%s/rows'
driver = '/v1/system/drivers'
driver_path = '/v1/system/drivers/%s'
def _add_params_to_url(self, url, params):
for key in params:
url = url + '?{param_name}={param_value}'.format(
param_name=key, param_value=params[key])
return url
def _resp_helper(self, resp, body=None):
if body:
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def create_policy(self, body, params=None):
if params is None:
params = {}
body = json.dumps(body)
resp, body = self.post(
self._add_params_to_url(self.policy, params), body=body)
return self._resp_helper(resp, body)
def delete_policy(self, policy):
resp, body = self.delete(
self.policy_path % policy)
return self._resp_helper(resp, body)
def show_policy(self, policy):
resp, body = self.get(
self.policy_path % policy)
return self._resp_helper(resp, body)
def create_library_policy(self, body):
body = json.dumps(body)
resp, body = self.post(
self.library_policy, body=body)
return self._resp_helper(resp, body)
def delete_library_policy(self, policy):
resp, body = self.delete(
self.library_policy_path % policy)
return self._resp_helper(resp, body)
def show_library_policy(self, policy):
resp, body = self.get(
self.library_policy_path % policy)
return self._resp_helper(resp, body)
def create_policy_rule(self, policy_name, body=None):
body = json.dumps(body)
resp, body = self.post(
self.policy_rules % policy_name, body=body)
return self._resp_helper(resp, body)
def delete_policy_rule(self, policy_name, rule_id):
resp, body = self.delete(
self.policy_rules_path % (policy_name, rule_id))
return self._resp_helper(resp, body)
def show_policy_rule(self, policy_name, rule_id):
resp, body = self.get(
self.policy_rules_path % (policy_name, rule_id))
return self._resp_helper(resp, body)
def list_policy_rows(self, policy_name, table, trace=None):
if trace:
query = self.policy_rows_trace
else:
query = self.policy_rows
resp, body = self.get(query % (policy_name, table))
return self._resp_helper(resp, body)
def list_policy_rules(self, policy_name):
resp, body = self.get(self.policy_rules % (policy_name))
return self._resp_helper(resp, body)
def list_policy(self):
resp, body = self.get(self.policies)
return self._resp_helper(resp, body)
def list_library_policy(self):
resp, body = self.get(self.library_policies)
return self._resp_helper(resp, body)
def list_policy_tables(self, policy_name):
resp, body = self.get(self.policy_tables % (policy_name))
return self._resp_helper(resp, body)
def list_policy_status(self, policy_name):
resp, body = self.get(self.policies_status % (policy_name))
return self._resp_helper(resp, body)
def execute_policy_action(self, policy_name, action, trace, delta, body):
body = json.dumps(body)
uri = "?action=%s&trace=%s&delta=%s" % (action, trace, delta)
resp, body = self.post(
(self.policy_path % policy_name) + str(uri), body=body)
return self._resp_helper(resp, body)
def show_policy_table(self, policy_name, table_id):
resp, body = self.get(self.policy_table_path % (policy_name, table_id))
return self._resp_helper(resp, body)
def list_datasources(self):
resp, body = self.get(self.datasources)
return self._resp_helper(resp, body)
def list_datasource_tables(self, datasource_name):
resp, body = self.get(self.datasource_tables % (datasource_name))
return self._resp_helper(resp, body)
def list_datasource_rows(self, datasource_name, table_name):
resp, body = self.get(self.datasource_rows %
(datasource_name, table_name))
return self._resp_helper(resp, body)
def list_datasource_status(self, datasource_name):
resp, body = self.get(self.datasource_status % datasource_name)
return self._resp_helper(resp, body)
def show_datasource_schema(self, datasource_name):
resp, body = self.get(self.datasource_schema % datasource_name)
return self._resp_helper(resp, body)
def show_datasource_table_schema(self, datasource_name, table_name):
resp, body = self.get(self.datasource_table_schema %
(datasource_name, table_name))
return self._resp_helper(resp, body)
def show_datasource_table(self, datasource_name, table_id):
resp, body = self.get(self.datasource_table_path %
(datasource_name, table_id))
return self._resp_helper(resp, body)
def create_datasource(self, body=None):
body = json.dumps(body)
resp, body = self.post(
self.datasources, body=body)
return self._resp_helper(resp, body)
def delete_datasource(self, datasource):
resp, body = self.delete(
self.datasource_path % datasource)
return self._resp_helper(resp, body)
def update_datasource_row(self, datasource_name, table_id, rows):
body = json.dumps(rows)
resp, body = self.put(
self.datasource_rows % (datasource_name, table_id), body)
return self._resp_helper(resp)
def send_datasource_webhook(self, datasource_name, body):
body = json.dumps(body)
resp, body = self.post(
self.datasource_webhook % datasource_name, body=body)
return self._resp_helper(resp)
def execute_datasource_action(self, service_name, action, body):
body = json.dumps(body)
uri = "?action=%s" % (action)
resp, body = self.post(
(self.datasource_path % service_name) + str(uri), body=body)
return self._resp_helper(resp, body)
def list_drivers(self):
resp, body = self.get(self.driver)
return self._resp_helper(resp, body)
def show_driver(self, driver):
resp, body = self.get(self.driver_path % (driver))
return self._resp_helper(resp, body)
def request_refresh(self, driver, body=None):
body = json.dumps(body)
resp, body = self.post(self.datasource_path %
(driver) + "?action=request-refresh",
body=body)
return self._resp_helper(resp, body)

View File

@ -1,171 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestKeystoneV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestKeystoneV2Driver, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestKeystoneV2Driver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.keystone = self.os_primary.identity_client
self.tenants_client = self.os_primary.tenants_client
self.roles_client = self.os_primary.roles_client
self.users_client = self.os_primary.users_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'keystone')
@decorators.attr(type='smoke')
def test_keystone_users_table(self):
user_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'users')['columns'])
user_id_col = next(i for i, c in enumerate(user_schema)
if c['name'] == 'id')
def _check_data_table_keystone_users():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
users = self.users_client.list_users()['users']
user_map = {}
for user in users:
user_map[user['id']] = user
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'users'))
for row in results['results']:
try:
user_row = user_map[row['data'][user_id_col]]
except KeyError:
return False
for index in range(len(user_schema)):
if ((user_schema[index]['name'] == 'tenantId' and
'tenantId' not in user_row) or
(user_schema[index]['name'] == 'email' and
'email' not in user_row)):
# Keystone does not return the tenantId or email column
# if not present.
pass
elif (str(row['data'][index]) !=
str(user_row[user_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_users,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_roles_table(self):
role_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'roles')['columns'])
role_id_col = next(i for i, c in enumerate(role_schema)
if c['name'] == 'id')
def _check_data_table_keystone_roles():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
roles = self.roles_client.list_roles()['roles']
roles_map = {}
for role in roles:
roles_map[role['id']] = role
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'roles'))
for row in results['results']:
try:
role_row = roles_map[row['data'][role_id_col]]
except KeyError:
return False
for index in range(len(role_schema)):
if (str(row['data'][index]) !=
str(role_row[role_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_roles,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_tenants_table(self):
tenant_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'tenants')['columns'])
tenant_id_col = next(i for i, c in enumerate(tenant_schema)
if c['name'] == 'id')
def _check_data_table_keystone_tenants():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
tenants = self.tenants_client.list_tenants()['tenants']
tenants_map = {}
for tenant in tenants:
tenants_map[tenant['id']] = tenant
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'tenants'))
for row in results['results']:
try:
tenant_row = tenants_map[row['data'][tenant_id_col]]
except KeyError:
return False
for index in range(len(tenant_schema)):
if (str(row['data'][index]) !=
str(tenant_row[tenant_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_tenants,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('keystone'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,91 +0,0 @@
# Copyright 2016 NEC Corporation. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestAodhDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestAodhDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'aodh', False):
msg = ("%s skipped as aodh is not available" %
cls.__class__.__name__)
raise cls.skipException(msg)
def setUp(self):
super(TestAodhDriver, self).setUp()
self.alarms_client = self.os_admin.alarms_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'aodh')
@decorators.attr(type='smoke')
def test_aodh_alarms_table(self):
self.alarms_client.create_alarm(
name='test-alarm',
enabled=False,
type='event',
event_rule={})
alarms_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'alarms')['columns'])
alarms_id_col = next(i for i, c in enumerate(alarms_schema)
if c['name'] == 'alarm_id')
def _check_data_table_aodh_alarms():
# Fetch data from aodh each time, because this test may start
# before aodh has all the users.
alarms = self.alarms_client.list_alarms()
alarm_map = {}
for alarm in alarms:
alarm_map[alarm['alarm_id']] = alarm
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'alarms'))
for row in results['results']:
try:
alarm_row = alarm_map[row['data'][alarms_id_col]]
except KeyError:
return False
for index in range(len(alarms_schema)):
if (str(row['data'][index]) !=
str(alarm_row[alarms_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data_table_aodh_alarms,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('aodh'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,130 +0,0 @@
# Copyright 2017 Orange. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"Tempest tests for config datasource"
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
def find_col(schema, name):
"Finds the index of a column in a congress table."
return next(i for i, c in enumerate(schema) if c['name'] == name)
class TestCfgValidatorDriver(manager_congress.ScenarioPolicyBase):
"""Tempest tests for the config datasource.
Checks that the datasource is available and test it on congress
configuration files.
"""
def setUp(self):
super(TestCfgValidatorDriver, self).setUp()
self.keypairs = {}
self.servers = []
datasources = self.os_admin.congress_client.list_datasources()
for datasource in datasources['results']:
if datasource['name'] == 'config':
self.datasource_id = datasource['id']
return
self.skipTest('no datasource config configured.')
@decorators.attr(type='smoke')
def test_update_no_error(self):
"Test that config datasource is correctly launched."
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('config'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')
@decorators.attr(type='smoke')
def test_metadata_sent(self):
"Test that metadata on congress options are sent."
client = self.os_admin.congress_client
schema1 = (
client.show_datasource_table_schema(
self.datasource_id, 'option')['columns'])
col1_name = find_col(schema1, 'name')
col1_group = find_col(schema1, 'group')
col1_namespace = find_col(schema1, 'namespace')
schema2 = (
client.show_datasource_table_schema(
self.datasource_id, 'namespace')['columns'])
col2_name = find_col(schema2, 'name')
col2_id = find_col(schema2, 'id')
def _check_metadata():
res1 = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'option')).get('results', None)
res2 = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'namespace')).get('results', None)
if res1 is None or res2 is None:
return False
row1 = next((r for r in res1
if r['data'][col1_name] == u'datasource_sync_period'),
None)
row2 = next((r for r in res2
if r['data'][col2_name] == u'congress'),
None)
if row1 is None or row2 is None:
return False
if row1['data'][col1_group] != 'DEFAULT':
return False
return row1['data'][col1_namespace] == row2['data'][col2_id]
if not test_utils.call_until_true(
func=_check_metadata,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_options_sent(self):
"Test that there is at least one value for congress option."
# driver config option is deprecated, no longer valid
# driver = u'congress.datasources.cfgvalidator_driver.ValidatorDriver'
auth_strategy = 'keystone'
client = self.os_admin.congress_client
schema = (
client.show_datasource_table_schema(
self.datasource_id, 'binding')['columns'])
col_value = find_col(schema, 'val')
def _check_value():
res = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'binding')).get('results', None)
if res is None:
return False
row = next((r for r in res
if r['data'][col_value] == auth_strategy),
None)
return row is not None
if not test_utils.call_until_true(
func=_check_value,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -1,116 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import clients
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestCinderDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestCinderDriver, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
def setUp(self):
super(TestCinderDriver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.cinder = self.os_primary.volumes_v2_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'cinder')
res = self.cinder.create_volume(size=1, description=None, name='v0',
consistencygroup_id=None, metadata={})
LOG.debug('result of creating new volume: %s', res)
@decorators.attr(type='smoke')
def test_cinder_volumes_table(self):
volume_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'volumes')['columns'])
volume_id_col = next(i for i, c in enumerate(volume_schema)
if c['name'] == 'id')
def _check_data_table_cinder_volumes():
# Fetch data from cinder each time, because this test may start
# before cinder has all the users.
volumes = self.cinder.list_volumes()['volumes']
LOG.debug('cinder volume list: %s', volumes)
volumes_map = {}
for volume in volumes:
volumes_map[volume['id']] = volume
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'volumes'))
LOG.debug('congress cinder volumes table: %s', results)
# check that congress and cinder return the same volume IDs
rows_volume_id_set = set()
for row in results['results']:
rows_volume_id_set.add(row['data'][volume_id_col])
if rows_volume_id_set != frozenset(volumes_map.keys()):
LOG.debug('volumes IDs mismatch')
return False
# FIXME(ekcs): the following code is broken because 'user_id'
# and 'description' fields do not appear in results provided by
# [tempest].os.volumes_client.list_volumes().
# Detailed checking disabled for now. Re-enable when fixed.
# It appears the code was written for v1 volumes client but never
# worked. The problem was not evident because the list of volumes
# was empty.
# Additional adaptation is needed for v2 volumes client.
# for row in results['results']:
# try:
# volume_row = volumes_map[row['data'][volume_id_col]]
# except KeyError:
# return False
# for index in range(len(volume_schema)):
# if (str(row['data'][index]) !=
# str(volume_row[volume_schema[index]['name']])):
# return False
return True
if not test_utils.call_until_true(
func=_check_data_table_cinder_volumes,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('cinder'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,87 +0,0 @@
# Copyright 2016 NTT All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager_congress
class TestDoctorDriver(manager_congress.ScenarioPolicyBase):
def setUp(self):
super(TestDoctorDriver, self).setUp()
doctor_setting = {
'name': 'doctor',
'driver': 'doctor',
'config': None,
}
self.client = self.os_admin.congress_client
response = self.client.create_datasource(doctor_setting)
self.datasource_id = response['id']
def tearDown(self):
super(TestDoctorDriver, self).tearDown()
self.client.delete_datasource(self.datasource_id)
def _list_datasource_rows(self, datasource, table):
return self.client.list_datasource_rows(datasource, table)
@decorators.attr(type='smoke')
def test_doctor_event_tables(self):
rows = [
{
"time": "2016-02-22T11:48:55Z",
"type": "compute.host.down",
"details": {
"hostname": "compute1",
"status": "down",
"monitor": "zabbix1",
"monitor_event_id": "111"
}
}
]
expected_row = [
"2016-02-22T11:48:55Z",
"compute.host.down",
"compute1",
"down",
"zabbix1",
"111"
]
# Check if service is up
@helper.retry_on_exception
def _check_service():
self.client.list_datasource_status(self.datasource_id)
return True
if not test_utils.call_until_true(func=_check_service,
duration=60, sleep_for=1):
raise exceptions.TimeoutException("Doctor dataservice is not up")
self.client.update_datasource_row(self.datasource_id, 'events', rows)
results = self._list_datasource_rows(self.datasource_id, 'events')
if len(results['results']) != 1:
error_msg = ('Unexpected additional rows are '
'inserted. row details: %s' % results['results'])
raise exceptions.InvalidStructure(error_msg)
if results['results'][0]['data'] != expected_row:
msg = ('inserted row %s is not expected row %s'
% (results['results'][0]['data'], expected_row))
raise exceptions.InvalidStructure(msg)

View File

@ -1,136 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestGlanceV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestGlanceV2Driver, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
def setUp(self):
super(TestGlanceV2Driver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.glancev2 = self.os_primary.image_client_v2
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'glancev2')
@decorators.attr(type='smoke')
@utils.services('image')
def test_glancev2_images_table(self):
image_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'images')['columns'])
image_id_col = next(i for i, c in enumerate(image_schema)
if c['name'] == 'id')
def _check_data_table_glancev2_images():
# Fetch data from glance each time, because this test may start
# before glance has all the users.
images = self.glancev2.list_images()['images']
image_map = {}
for image in images:
image_map[image['id']] = image
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'images'))
for row in results['results']:
try:
image_row = image_map[row['data'][image_id_col]]
except KeyError:
return False
for index in range(len(image_schema)):
# glancev2 doesn't return kernel_id/ramdisk_id if
# it isn't present...
if ((image_schema[index]['name'] == 'kernel_id' and
'kernel_id' not in row['data']) or
(image_schema[index]['name'] == 'ramdisk_id' and
'ramdisk_id' not in row['data'])):
continue
# FIXME(arosen): congress-server should retain the type
# but doesn't today.
if (str(row['data'][index]) !=
str(image_row[image_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_glancev2_images,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('image')
def test_glancev2_tags_table(self):
def _check_data_table_glance_images():
# Fetch data from glance each time, because this test may start
# before glance has all the users.
images = self.glancev2.list_images()['images']
image_tag_map = {}
for image in images:
image_tag_map[image['id']] = image['tags']
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'tags'))
for row in results['results']:
image_id, tag = row['data'][0], row['data'][1]
glance_image_tags = image_tag_map.get(image_id)
if not glance_image_tags:
# congress had image that glance doesn't know about.
return False
if tag not in glance_image_tags:
# congress had a tag that wasn't on the image.
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_glance_images,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('glancev2'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,45 +0,0 @@
# Copyright 2017 VMware Corporation. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestHeatDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestHeatDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'heat_plugin', False):
msg = ("%s skipped because heat service is not configured" %
cls.__class__.__name__)
raise cls.skipException(msg)
# TODO(testing): checks on correctness of data in updates
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('heat'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,45 +0,0 @@
# Copyright 2017 VMware Corporation. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestIronicDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestIronicDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'ironic', False):
msg = ("%s skipped because ironic service is not configured" %
cls.__class__.__name__)
raise cls.skipException(msg)
# TODO(testing): checks on correctness of data in updates
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('ironic'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,206 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestKeystoneV3Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestKeystoneV3Driver, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestKeystoneV3Driver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.keystone = self.os_primary.identity_v3_client
self.projects_client = self.os_primary.projects_client
self.domains_client = self.os_primary.domains_client
self.roles_client = self.os_primary.roles_v3_client
self.users_client = self.os_primary.users_v3_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'keystonev3')
@decorators.attr(type='smoke')
def test_keystone_users_table(self):
user_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'users')['columns'])
user_id_col = next(i for i, c in enumerate(user_schema)
if c['name'] == 'id')
def _check_data_table_keystone_users():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
users = self.users_client.list_users()['users']
user_map = {}
for user in users:
user_map[user['id']] = user
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'users'))
for row in results['results']:
try:
user_row = user_map[row['data'][user_id_col]]
except KeyError:
return False
for index in range(len(user_schema)):
if ((user_schema[index]['name'] == 'default_project_id' and
'default_project_id' not in user_row)):
# Keystone does not return the tenantId or email column
# if not present.
pass
elif (str(row['data'][index]) !=
str(user_row[user_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_users,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_roles_table(self):
role_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'roles')['columns'])
role_id_col = next(i for i, c in enumerate(role_schema)
if c['name'] == 'id')
def _check_data_table_keystone_roles():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
roles = self.roles_client.list_roles()['roles']
roles_map = {}
for role in roles:
roles_map[role['id']] = role
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'roles'))
for row in results['results']:
try:
role_row = roles_map[row['data'][role_id_col]]
except KeyError:
return False
for index in range(len(role_schema)):
if (str(row['data'][index]) !=
str(role_row[role_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_roles,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_domains_table(self):
domains_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'domains')['columns'])
domain_id_col = next(i for i, c in enumerate(domains_schema)
if c['name'] == 'id')
def _check_data_table_keystone_domains():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
domains = self.domains_client.list_domains()['domains']
domains_map = {}
for domain in domains:
domains_map[domain['id']] = domain
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'domains'))
for row in results['results']:
try:
domain_row = domains_map[row['data'][domain_id_col]]
except KeyError:
return False
for index in range(len(domains_schema)):
if (str(row['data'][index]) !=
str(domain_row[domains_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_domains,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_keystone_projects_table(self):
projects_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'projects')['columns'])
project_id_col = next(i for i, c in enumerate(projects_schema)
if c['name'] == 'id')
def _check_data_table_keystone_projects():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
projects = self.projects_client.list_projects()['projects']
projects_map = {}
for project in projects:
projects_map[project['id']] = project
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'projects'))
for row in results['results']:
try:
project_row = projects_map[row['data'][project_id_col]]
except KeyError:
return False
for index in range(len(projects_schema)):
if (str(row['data'][index]) !=
str(project_row[projects_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data_table_keystone_projects,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('keystonev3'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,77 +0,0 @@
# Copyright 2017 VMware Corporation. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import config
from tempest.lib import decorators
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestMistralDriver(manager_congress.DatasourceDriverTestBase):
@classmethod
def skip_checks(cls):
super(TestMistralDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'mistral', False):
msg = ("%s skipped because mistral service is not configured" %
cls.__class__.__name__)
raise cls.skipException(msg)
def setUp(self):
super(TestMistralDriver, self).setUp()
self.datasource_name = 'mistral'
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, self.datasource_name)
@decorators.attr(type='smoke')
def test_mistral_workflows_table(self):
table_name = 'workflows'
service_data_fetch_func = (
lambda: self.os_admin.mistral_client.get_list_obj(
'workflows')[1]['workflows'])
self.check_service_data_against_congress_table(
table_name, service_data_fetch_func,
missing_attributes_allowed=['description', 'updated_at'])
@decorators.attr(type='smoke')
def test_mistral_actions_table(self):
table_name = 'actions'
service_data_fetch_func = (
lambda: self.os_admin.mistral_client.get_list_obj(
'actions')[1]['actions'])
self.check_service_data_against_congress_table(
table_name, service_data_fetch_func)
# FIXME(ekcs): enable when we figure out how to use admin project in
# tempest test setup to populate executions with dummy data.
# @decorators.attr(type='smoke')
# def test_mistral_workflow_executions_table(self):
# table_name = 'workflow_executions'
# service_data_fetch_func = lambda: self.service_client.get_list_obj(
# 'executions')[1]['executions']
# self.check_service_data_against_congress_table(
# table_name, service_data_fetch_func)
#
# @decorators.attr(type='smoke')
# def test_mistral_action_executions_table(self):
# table_name = 'action_executions'
# service_data_fetch_func = lambda: self.service_client.get_list_obj(
# 'action_executions')[1]['action_executions']
# self.check_service_data_against_congress_table(
# table_name, service_data_fetch_func)

View File

@ -1,218 +0,0 @@
# Copyright 2018 VMware Corporation. NEC, Inc. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
DRIVER_NAME = 'monasca'
class TestMonascaDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestMonascaDriver, cls).skip_checks()
if not getattr(CONF.service_available, DRIVER_NAME, False):
msg = ("%s skipped because %s service is not configured" %
(cls.__class__.__name__, DRIVER_NAME))
raise cls.skipException(msg)
# TODO(testing): checks on correctness of data in updates
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error(DRIVER_NAME),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')
class TestMonascaWebhookDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestMonascaWebhookDriver, cls).skip_checks()
if not CONF.congress_feature_enabled.monasca_webhook:
msg = ("feature not available in this congress version")
raise cls.skipException(msg)
def setUp(self):
super(TestMonascaWebhookDriver, self).setUp()
monasca_setting = {
'name': 'monasca_webhook',
'driver': 'monasca_webhook',
'config': None,
}
self.client = self.os_admin.congress_client
response = self.client.create_datasource(monasca_setting)
self.datasource_id = response['id']
def tearDown(self):
super(TestMonascaWebhookDriver, self).tearDown()
self.client.delete_datasource(self.datasource_id)
def _list_datasource_rows(self, datasource, table):
return self.client.list_datasource_rows(datasource, table)
@decorators.attr(type='smoke')
@testtools.skipIf(CONF.congress_feature_enabled.monasca_webhook_rocky,
'Test expects new (post-Rocky) monasca webhook schema.')
def test_monasca_alarm_notification_table(self):
test_alarm = {
'metrics': [
{u'dimensions': {u'hostname': u'openstack-13.local.lan',
u'service': u'monitoring'},
u'id': None,
u'name': u'load.avg_1_min'}],
'alarm_id': u'3beb4934-053d-4f8f-9704-273bffc244',
'state': u'ALARM',
'alarm_timestamp': 1531821822,
'tenant_id': u'3661888238874df098988deab07c599d',
'old_state': u'UNDETERMINED',
'alarm_description': u'',
'message': u'Thresholds were exceeded for alarm',
'alarm_definition_id': u'8e5d033f-28cc-459f-91d4-813307e4ca',
'alarm_name': u'alarmPerHost23'}
# Check if service is up
@helper.retry_on_exception
def _check_service():
self.client.list_datasource_status(self.datasource_id)
return True
if not test_utils.call_until_true(func=_check_service,
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"Monasca-Webhook data source service is not up")
def _check_result_for_exception(result, expected_result,
result_length=1):
if len(result['results']) != result_length:
error_msg = ('Unexpected number of rows are '
'inserted. Row details: %s' % result['results'])
raise exceptions.InvalidStructure(error_msg)
output = []
for data in result['results']:
output.append(data['data'])
output = sorted(output)
expected_result = sorted(expected_result)
for index in range(result_length):
if output[index] != expected_result[index]:
msg = ('inserted row %s is not expected row %s'
% (output[index], expected_result[index]))
raise exceptions.InvalidStructure(msg)
self.client.send_datasource_webhook(self.datasource_id, test_alarm)
alarm_notification = self._list_datasource_rows(self.datasource_id,
'alarm_notification')
expected_alarm_notification = [[u'3beb4934-053d-4f8f-9704-273bffc244',
u'8e5d033f-28cc-459f-91d4-813307e4ca',
u'alarmPerHost23',
u'',
1531821822,
u'ALARM',
u'UNDETERMINED',
u'Thresholds were exceeded for alarm',
u'3661888238874df098988deab07c599d']]
_check_result_for_exception(alarm_notification,
expected_alarm_notification)
metrics_result = self._list_datasource_rows(self.datasource_id,
'alarms.metrics')
dimension_id = metrics_result['results'][0]['data'][3]
expected_metrics = [[u'3beb4934-053d-4f8f-9704-273bffc244',
u'None',
u'load.avg_1_min',
dimension_id]]
_check_result_for_exception(metrics_result, expected_metrics)
dimension_result = self._list_datasource_rows(
self.datasource_id, 'alarms.metrics.dimensions')
expected_dimensions = [
[dimension_id, u'hostname', u'openstack-13.local.lan'],
[dimension_id, u'service', u'monitoring']]
_check_result_for_exception(dimension_result, expected_dimensions, 2)
@decorators.attr(type='smoke')
@testtools.skipUnless(CONF.congress_feature_enabled.monasca_webhook_rocky,
'Test expects monasca webhook rocky schema.')
def test_monasca_alarm_notification_table_rocky(self):
test_alarm = {
'metrics': [
{u'dimensions': {u'hostname': u'openstack-13.local.lan',
u'service': u'monitoring'},
u'id': None,
u'name': u'load.avg_1_min'}],
'alarm_id': u'3beb4934-053d-4f8f-9704-273bffc2441b',
'state': u'ALARM',
'alarm_timestamp': 1531821822,
'tenant_id': u'3661888238874df098988deab07c599d',
'old_state': u'UNDETERMINED',
'alarm_description': u'',
'message': u'Thresholds were exceeded for the sub-alarms',
'alarm_definition_id': u'8e5d033f-28cc-459f-91d4-813307e4ca8a',
'alarm_name': u'alarmPerHost23'}
# Check if service is up
@helper.retry_on_exception
def _check_service():
self.client.list_datasource_status(self.datasource_id)
return True
if not test_utils.call_until_true(func=_check_service,
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"Monasca-Webhook data source service is not up")
self.client.send_datasource_webhook(self.datasource_id, test_alarm)
results = self._list_datasource_rows(self.datasource_id,
'alarm_notification')
if len(results['results']) != 1:
error_msg = ('Unexpected additional rows are '
'inserted. row details: %s' % results['results'])
raise exceptions.InvalidStructure(error_msg)
expected_row = [u'3beb4934-053d-4f8f-9704-273bffc2441b',
u'8e5d033f-28cc-459f-91d4-813307e4ca8a',
u'alarmPerHost23',
u'',
1531821822,
u'ALARM',
u'UNDETERMINED',
u'Thresholds were exceeded for the sub-alarms',
u'3661888238874df098988deab07c599d',
u'None',
u'load.avg_1_min',
u'openstack-13.local.lan',
u'monitoring']
if results['results'][0]['data'] != expected_row:
msg = ('inserted row %s is not expected row %s'
% (results['results'][0]['data'], expected_row))
raise exceptions.InvalidStructure(msg)

View File

@ -1,209 +0,0 @@
# Copyright (c) 2015 Hewlett-Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import string
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestMuranoDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestMuranoDriver, cls).skip_checks()
if not getattr(CONF.service_available, 'murano', False):
msg = ("%s skipped as murano is not available" %
cls.__class__.__name__)
raise cls.skipException(msg)
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestMuranoDriver, self).setUp()
self.congress_client = (
self.os_admin.congress_client)
@decorators.attr(type='smoke')
@utils.services('compute')
def test_murano_predeployment(self):
def _delete_policy_rules(policy_name):
result = self.congress_client.list_policy_rules(
policy_name)['results']
for rule in result:
self.congress_client.delete_policy_rule(
policy_name,
rule['id'])
def _create_random_policy():
policy_name = "murano_%s" % ''.join(
random.choice(string.ascii_lowercase)
for x in range(10))
body = {"name": policy_name}
resp = self.congress_client.create_policy(body)
self.addCleanup(_delete_policy_rules, resp['name'])
return resp['name']
def _create_datasource():
body = {"config": {"username": CONF.auth.admin_username,
"tenant_name": CONF.auth.admin_project_name,
"password": CONF.auth.admin_password,
"auth_url": CONF.identity.uri},
"driver": "murano",
"name": "murano"}
try:
datasource = self.congress_client.create_datasource(body)['id']
self.addCleanup(self.congress_client.delete_datasource,
datasource)
except exceptions.Conflict:
pass
def _create_rule(policy_name, rule):
self.congress_client.create_policy_rule(policy_name, rule)
def _simulate_policy(policy_name, query):
resp = self.congress_client.execute_policy_action(
policy_name,
"simulate",
False,
False,
query)
return resp['result']
rule1 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.medium\")"
}
rule2 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.small\")"
}
rule3 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.tiny\")"
}
rule4 = {
"rule": "murano_pending_envs(env_id) :- "
"murano:objects(env_id, tenant_id, \"io.murano.Environment\"),"
"murano:states(env_id, env_state),"
"equal(env_state, \"pending\")"
}
rule5 = {
"rule": "murano_instances(env_id, instance_id) :- "
"murano:objects(env_id, tenant_id, \"io.murano.Environment\"),"
"murano:objects(service_id, env_id, service_type),"
"murano:parent_types(service_id, \"io.murano.Object\"),"
"murano:parent_types(service_id, \"io.murano.Application\"),"
"murano:parent_types(service_id, service_type),"
"murano:objects(instance_id, service_id, instance_type),"
"murano:parent_types(instance_id,"
"\"io.murano.resources.Instance\"),"
"murano:parent_types(instance_id, \"io.murano.Object\"),"
"murano:parent_types(instance_id, instance_type)"
}
rule6 = {
"rule": "murano_instance_flavors(instance_id, flavor) :- "
"murano:properties(instance_id, \"flavor\", flavor)"
}
rule7 = {
"rule": "predeploy_error(env_id) :- "
"murano_pending_envs(env_id),"
"murano_instances(env_id, instance_id),"
"murano_instance_flavors(instance_id, flavor),"
"not allowed_flavors(flavor)"
}
sim_query1 = {
"query": "predeploy_error(env_id)",
"action_policy": "action",
"sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\","
"\"io.murano.Environment\") murano:states+(\"env_uuid\", "
"\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", "
"\"service_type\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Object\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Application\") murano:parent_types+(\"service_uuid\","
"\"service_type\") murano:objects+(\"instance_uuid\", "
"\"service_uuid\", \"service_type\") murano:objects+(\""
"instance_uuid\", \"service_uuid\", \"instance_type\") "
"murano:parent_types+(\"instance_uuid\", "
"\"io.murano.resources.Instance\") murano:parent_types+(\""
"instance_uuid\", \"io.murano.Object\") murano:parent_types+(\""
"instance_uuid\", \"instance_type\") murano:properties+(\""
"instance_uuid\", \"flavor\", \"m1.small\")"
}
sim_query2 = {
"query": "predeploy_error(env_id)",
"action_policy": "action",
"sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\","
"\"io.murano.Environment\") murano:states+(\"env_uuid\", "
"\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", "
"\"service_type\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Object\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Application\") murano:parent_types+(\"service_uuid\","
"\"service_type\") murano:objects+(\"instance_uuid\", "
"\"service_uuid\", \"service_type\") murano:objects+(\""
"instance_uuid\", \"service_uuid\", \"instance_type\") "
"murano:parent_types+(\"instance_uuid\", "
"\"io.murano.resources.Instance\") murano:parent_types+(\""
"instance_uuid\", \"io.murano.Object\") murano:parent_types+(\""
"instance_uuid\", \"instance_type\") murano:properties+(\""
"instance_uuid\", \"flavor\", \"m1.large\")"
}
_create_datasource()
policy_name = _create_random_policy()
_create_rule(policy_name, rule1)
_create_rule(policy_name, rule2)
_create_rule(policy_name, rule3)
_create_rule(policy_name, rule4)
_create_rule(policy_name, rule5)
_create_rule(policy_name, rule6)
_create_rule(policy_name, rule7)
result = _simulate_policy(policy_name, sim_query1)
self.assertEmpty(result)
result = _simulate_policy(policy_name, sim_query2)
self.assertEqual('predeploy_error("env_uuid")', result[0])
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('murano'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,446 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from tempest import clients
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestNeutronV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestNeutronV2Driver, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.service_available.neutron:
skip_msg = ("%s skipped as neutron is not available"
% cls.__name__)
raise cls.skipException(skip_msg)
def setUp(self):
super(TestNeutronV2Driver, self).setUp()
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
self.networks_client = self.os_admin.networks_client
self.subnets_client = self.os_admin.subnets_client
self.ports_client = self.os_admin.ports_client
self.security_groups_client = self.os_admin.security_groups_client
self.routers_client = self.os_admin.routers_client
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'neutronv2')
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_networks_table(self):
@helper.retry_on_exception
def _check_data():
networks = self.networks_client.list_networks()
network_map = {}
for network in networks['networks']:
network_map[network['id']] = network
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
network_schema = (client.show_datasource_table_schema(
self.datasource_id, 'networks')['columns'])
results = (client.list_datasource_rows(
self.datasource_id, 'networks'))
for row in results['results']:
network_row = network_map[row['data'][0]]
for index in range(len(network_schema)):
if (str(row['data'][index]) !=
str(network_row[network_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_ports_tables(self):
port_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'ports')['columns'])
port_sec_binding_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_group_port_bindings')['columns'])
fixed_ips_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'fixed_ips')['columns'])
@helper.retry_on_exception
def _check_data():
ports_from_neutron = self.ports_client.list_ports()
port_map = {}
for port in ports_from_neutron['ports']:
port_map[port['id']] = port
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
ports = (client.list_datasource_rows(self.datasource_id, 'ports'))
security_group_port_bindings = (
client.list_datasource_rows(
self.datasource_id, 'security_group_port_bindings'))
fixed_ips = (
client.list_datasource_rows(self.datasource_id, 'fixed_ips'))
# Validate ports table
for row in ports['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_schema)):
if (str(row['data'][index]) !=
str(port_row[port_schema[index]['name']])):
return False
# validate security_group_port_bindings table
for row in security_group_port_bindings['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_sec_binding_schema)):
row_index = port_sec_binding_schema[index]['name']
# Translate port_id -> id
if row_index == 'port_id':
if (str(row['data'][index]) !=
str(port_row['id'])):
return False
elif row_index == 'security_group_id':
if (str(row['data'][index]) not in
port_row['security_groups']):
return False
# validate fixed_ips
for row in fixed_ips['results']:
port_row = port_map[row['data'][0]]
for index in range(len(fixed_ips_schema)):
row_index = fixed_ips_schema[index]['name']
if row_index in ['subnet_id', 'ip_address']:
if not port_row['fixed_ips']:
continue
for fixed_ip in port_row['fixed_ips']:
if row['data'][index] == fixed_ip[row_index]:
break
else:
# no subnet_id/ip_address match found
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_subnets_tables(self):
subnet_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'subnets')['columns'])
host_routes_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'host_routes')['columns'])
dns_nameservers_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'dns_nameservers')['columns'])
allocation_pools_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'allocation_pools')['columns'])
@helper.retry_on_exception
def _check_data():
subnets_from_neutron = self.subnets_client.list_subnets()
subnet_map = {}
for subnet in subnets_from_neutron['subnets']:
subnet_map[subnet['id']] = subnet
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
subnets = (
client.list_datasource_rows(self.datasource_id, 'subnets'))
host_routes = (
client.list_datasource_rows(self.datasource_id, 'host_routes'))
dns_nameservers = (
client.list_datasource_rows(
self.datasource_id, 'dns_nameservers'))
allocation_pools = (
client.list_datasource_rows(
self.datasource_id, 'allocation_pools'))
# Validate subnets table
for row in subnets['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(subnet_schema)):
if (str(row['data'][index]) !=
str(subnet_row[subnet_schema[index]['name']])):
return False
# validate dns_nameservers
for row in dns_nameservers['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(dns_nameservers_schema)):
row_index = dns_nameservers_schema[index]['name']
if row_index in ['dns_nameserver']:
if (row['data'][index]
not in subnet_row['dns_nameservers']):
return False
# validate host_routes
for row in host_routes['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(host_routes_schema)):
row_index = host_routes_schema[index]['name']
if row_index in ['destination', 'nexthop']:
if not subnet_row['host_routes']:
continue
for host_route in subnet_row['host_routes']:
if row['data'][index] == host_route[row_index]:
break
else:
# no destination/nexthop match found
return False
# validate allocation_pools
for row in allocation_pools['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(allocation_pools_schema)):
row_index = allocation_pools_schema[index]['name']
if row_index in ['start', 'end']:
if not subnet_row['allocation_pools']:
continue
for allocation_pool in subnet_row['allocation_pools']:
if (row['data'][index] ==
allocation_pool[row_index]):
break
else:
# no destination/nexthop match found
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_routers_tables(self):
router_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'routers')['columns'])
ext_gw_info_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'external_gateway_infos')['columns'])
@helper.retry_on_exception
def _check_data():
routers_from_neutron = self.routers_client.list_routers()
router_map = {}
for router in routers_from_neutron['routers']:
router_map[router['id']] = router
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
routers = (
client.list_datasource_rows(self.datasource_id, 'routers'))
ext_gw_info = (
client.list_datasource_rows(
self.datasource_id, 'external_gateway_infos'))
# Validate routers table
for row in routers['results']:
router_row = router_map[row['data'][0]]
for index in range(len(router_schema)):
if (str(row['data'][index]) !=
str(router_row[router_schema[index]['name']])):
return False
# validate external_gateway_infos
for row in ext_gw_info['results']:
router_ext_gw_info = (
router_map[row['data'][0]]['external_gateway_info'])
# populate router_id
router_ext_gw_info['router_id'] = row['data'][0]
for index in range(len(ext_gw_info_schema)):
val = router_ext_gw_info[ext_gw_info_schema[index]['name']]
if (str(row['data'][index]) != str(val)):
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_security_groups_table(self):
sg_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_groups')['columns'])
@helper.retry_on_exception
def _check_data():
client = self.security_groups_client
security_groups_neutron = client.list_security_groups()
security_groups_map = {}
for security_group in security_groups_neutron['security_groups']:
security_groups_map[security_group['id']] = security_group
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
security_groups = (
client.list_datasource_rows(
self.datasource_id, 'security_groups'))
# Validate security_group table
for row in security_groups['results']:
sg_row = security_groups_map[row['data'][0]]
for index in range(len(sg_schema)):
if (str(row['data'][index]) !=
str(sg_row[sg_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('network')
def test_neutronv2_security_group_rules_table(self):
sgrs_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_group_rules')['columns'])
@helper.retry_on_exception
def _check_data():
client = self.security_groups_client
security_groups_neutron = client.list_security_groups()
sgrs_map = {} # security_group_rules
for sg in security_groups_neutron['security_groups']:
for sgr in sg['security_group_rules']:
sgrs_map[sgr['id']] = sgr
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
security_group_rules = (
client.list_datasource_rows(
self.datasource_id, 'security_group_rules'))
# Validate security_group_rules table
for row in security_group_rules['results']:
sg_rule_row = sgrs_map[row['data'][1]]
for index in range(len(sgrs_schema)):
if (str(row['data'][index]) !=
str(sg_rule_row[sgrs_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_neutronv2_attach_detach_port_security_group(self):
self.network, self.subnet, self.router = self.create_networks()
self.check_networks()
# first create a test port with exactly 1 (default) security group
post_body = {
"port_security_enabled": True,
"network_id": self.network['id']}
body = self.ports_client.create_port(**post_body)
test_port = body['port']
self.addCleanup(self.ports_client.delete_port, test_port['id'])
# test detach and re-attach
test_policy = self._create_random_policy()
# use rules to detach group
self._create_policy_rule(
test_policy,
'execute[neutronv2:detach_port_security_group("%s", "%s")] '
':- p(1)' % (
test_port['id'], test_port['security_groups'][0]))
self._create_policy_rule(test_policy, 'p(1)')
def _check_data(num_sec_grps):
updated_port = self.ports_client.show_port(test_port['id'])
return len(updated_port['port']['security_groups']) == num_sec_grps
if not test_utils.call_until_true(func=lambda: _check_data(0),
duration=30, sleep_for=1):
raise exceptions.TimeoutException("Security group did not detach "
"within allotted time.")
# use rules to attach group
self._create_policy_rule(
test_policy,
'execute[neutronv2:attach_port_security_group("%s", "%s")] '
':- p(2)' % (
test_port['id'], test_port['security_groups'][0]))
self._create_policy_rule(test_policy, 'p(2)')
if not test_utils.call_until_true(func=lambda: _check_data(1),
duration=30, sleep_for=1):
raise exceptions.TimeoutException("Security group did not attach "
"within allotted time.")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('neutronv2'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,249 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest import clients
from tempest.common import utils as tempest_utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
RULE_TYPE = "bandwidth_limit_rules"
class TestNeutronV2QosDriver(manager_congress.ScenarioPolicyBase):
DATASOURCE_NAME = 'neutronv2_qos'
@classmethod
def skip_checks(cls):
super(TestNeutronV2QosDriver, cls).skip_checks()
# TODO(qos): check whether QoS extension is enabled
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.service_available.neutron:
skip_msg = ("%s skipped as neutron is not available"
% cls.__name__)
raise cls.skipException(skip_msg)
if not tempest_utils.is_extension_enabled('qos', 'network'):
skip_msg = ("%s skipped as neutron QoS extension is not available"
% cls.__name__)
raise cls.skipException(skip_msg)
def setUp(self):
super(TestNeutronV2QosDriver, self).setUp()
self.qos_rules = []
self.qos_policies = []
self.os_primary = clients.Manager(
self.os_admin.auth_provider.credentials)
body = {"config": {"username": CONF.auth.admin_username,
"tenant_name": CONF.auth.admin_project_name,
"password": CONF.auth.admin_password,
"auth_url": CONF.identity.uri},
"driver": self.DATASOURCE_NAME,
"name": self.DATASOURCE_NAME}
try:
self.os_admin.congress_client.create_datasource(body)['id']
except exceptions.Conflict:
pass
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, self.DATASOURCE_NAME)
# Get client
self.admin_qos_client = self.os_admin.qos_client
self.admin_qos_rule_client = self.os_admin.qos_rule_client
self.networks_client = self.os_primary.networks_client
self.ports_client = self.os_primary.ports_client
# Create qos and qos rule
self.qos_policy = self._create_qos_policy('test_qos_policy',
description="test",
share=True)
self.qos_rule = self._create_qos_bandwidth_limit_rule(
self.qos_policy['id'], 1000, 1000)
# Associate policy with port
body = self.networks_client.create_network(
name="test_qos_network")
self.network = body["network"]
body = self.ports_client.create_port(
network_id=self.network['id'])
self.port = body["port"]
self.ports_client.update_port(
self.port['id'], qos_policy_id=self.qos_policy['id'])
def tearDown(self):
super(TestNeutronV2QosDriver, self).tearDown()
# Clear port and net
self.ports_client.delete_port(self.port['id'])
self.networks_client.delete_network(self.network["id"])
# Clear qos policy and qos rule
self.admin_qos_rule_client.delete_qos_rule(
self.qos_policy['id'], RULE_TYPE, self.qos_rule['id'])
self.admin_qos_client.delete_qos_policy(self.qos_policy['id'])
def _create_qos_policy(self, name, description=None, share=False):
"""Wrapper utility that returns a test QoS policy."""
body = self.admin_qos_client.create_qos_policy(
name=name, description=description, shared=share)
qos_policy = body['policy']
self.qos_policies.append(qos_policy)
return qos_policy
def _create_qos_bandwidth_limit_rule(self, policy_id, max_kbps,
max_burst_kbps,
direction='egress'):
"""Wrapper utility that returns a test QoS bandwidth limit rule."""
rule_type = RULE_TYPE
body = self.admin_qos_rule_client.create_qos_rule(
policy_id, rule_type,
max_kbps=max_kbps, max_burst_kbps=max_burst_kbps,
direction=direction)
qos_rule = body['bandwidth_limit_rule']
self.qos_rules.append(qos_rule)
return qos_rule
@decorators.skip_because(bug='1811740')
@decorators.attr(type='smoke')
@tempest_utils.services('network')
def test_neutronv2_ports_tables(self):
port_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'ports')['columns'])
port_qos_binding_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'qos_policy_port_bindings')['columns'])
qos_policy_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'policies')['columns'])
qos_rule_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'rules')['columns'])
@helper.retry_on_exception
def _check_data_for_port():
ports_from_neutron = self.ports_client.list_ports()
port_map = {}
for port in ports_from_neutron['ports']:
port_map[port['id']] = port
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
ports = (client.list_datasource_rows(self.datasource_id, 'ports'))
qos_policy_port_bindings = (
client.list_datasource_rows(
self.datasource_id, 'qos_policy_port_bindings'))
# Validate ports table
for row in ports['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_schema)):
if (str(row['data'][index]) !=
str(port_row[port_schema[index]['name']])):
return False
# validate qos_policy_port_bindings table
for row in qos_policy_port_bindings['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_qos_binding_schema)):
row_index = port_qos_binding_schema[index]['name']
# Translate port_id -> id
if row_index == 'port_id':
if (str(row['data'][index]) !=
str(port_row['id'])):
return False
elif row_index == 'qos_policy_id':
if (str(row['data'][index]) not in
port_row['policies']):
return False
return True
@helper.retry_on_exception
def _check_data_for_qos():
qos_from_neutron = self.admin_qos_client.list_qos_policy()
rule_from_neutron = self.admin_qos_rule_client.list_qos_rule(
self.qos_policy['id'])
policy_map = {}
rule_map = {}
for policy in qos_from_neutron['policies']:
policy_map[policy['id']] = policy
for rule in rule_from_neutron['policy']['rules']:
rule_map[self.qos_policy['id']] = rule
client = self.os_admin.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
qos_policies = (client.list_datasource_rows(
self.datasource_id, 'policies'))
qos_rules = (client.list_datasource_rows(
self.datasource_id, 'rules'))
# Validate policies table
for row in qos_policies['results']:
policy_row = policy_map[row['data'][0]]
for index in range(len(qos_policy_schema)):
if (str(row['data'][index]) !=
str(policy_row[qos_policy_schema[index]['name']])):
return False
# Validate rules table
for row in qos_rules['results']:
rule_row = rule_map[row['data'][0]]
for index in range(len(qos_rule_schema)):
if str(row['data'][index]) != "None":
if (str(row['data'][index]) !=
str(rule_row[qos_rule_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(func=_check_data_for_port,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
if not test_utils.call_until_true(func=_check_data_for_qos,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error(
self.DATASOURCE_NAME),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,241 +0,0 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestNovaDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestNovaDriver, cls).skip_checks()
if not CONF.service_available.nova:
skip_msg = ("%s skipped as nova is not available" % cls.__name__)
raise cls.skipException(skip_msg)
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
if not CONF.congress_feature_enabled.nova_driver:
# queens flag for skipping nova driver tests because
# congress nova driver in queens does not work with the new
# novaclient 10.1.0 now part of upper-constraints
# https://review.opendev.org/#/c/571540/
msg = 'Nova driver not available.'
raise cls.skipException(msg)
def setUp(self):
super(TestNovaDriver, self).setUp()
self.keypairs = {}
self.servers = []
self.datasource_id = manager_congress.get_datasource_id(
self.os_admin.congress_client, 'nova')
self._setup_network_and_servers()
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_nova_datasource_driver_servers(self):
server_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'servers')['columns'])
# Convert some of the column names.
def convert_col(col):
if col == 'host_id':
return 'hostId'
elif col == 'image_id':
return 'image'
elif col == 'flavor_id':
return 'flavor'
elif col == 'created_at':
return 'created'
elif col == 'zone':
return 'OS-EXT-AZ:availability_zone'
elif col == 'host_name':
return 'OS-EXT-SRV-ATTR:hypervisor_hostname'
else:
return col
keys_servers = [convert_col(c['name']) for c in server_schema]
@helper.retry_on_exception
def _check_data_table_nova_servers():
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'servers'))
for row in results['results']:
match = True
for index in range(len(keys_servers)):
if keys_servers[index] in ['image', 'flavor']:
val = self.servers[0][keys_servers[index]]['id']
# Test servers created doesn't have this attribute,
# so ignoring the same in tempest tests.
elif keys_servers[index] in \
['OS-EXT-SRV-ATTR:hypervisor_hostname']:
continue
else:
val = self.servers[0][keys_servers[index]]
if row['data'][index] != val:
match = False
break
if match:
return True
return False
if not test_utils.call_until_true(func=_check_data_table_nova_servers,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
@testtools.skipUnless(
CONF.congress_feature_enabled.nova_servers_addresses_table,
'test checks nova server addresses added in stein')
def test_nova_datasource_driver_servers_addresses(self):
server_addresses_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, 'servers.addresses')['columns'])
def convert_col(col):
if col == 'server_id':
return 'id'
elif col == 'address':
return 'addr'
elif col == 'mac_address':
return 'OS-EXT-IPS-MAC:mac_addr'
elif col == 'address_type':
return 'OS-EXT-IPS:type'
else:
return col
@helper.retry_on_exception
def _check_data_table_nova_servers_addresses():
# Note(Akhil): Right now comparing data of only one server we are
# creating in test. Which in future will be tested on all servers.
# updating self.servers after associating floating ip to it
self.servers[0] = self.show_server(self.servers[0]['id'])
addresses = self.servers[0]['addresses']
# according to current test server created there is only
# one network attached. On changing test server following
# method of getting network name must be updated
network_name = list(addresses.keys())[0]
# checks if floating ip is updated in self.servers,
# alongside fixed ip
if len(addresses[network_name]) != 2:
return False
keys = [convert_col(c['name']) for c in server_addresses_schema]
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'servers.addresses'))
# Note: Below section is checking that every address in addresses
# is reflected in results['results']
match = True
# traversing addresses of test server from nova service
for address in addresses[network_name]:
# traversing server addresses from congress nova datasource
for row in results['results']:
for index in range(len(keys)):
if keys[index] == 'id':
val = self.servers[0]['id']
elif keys[index] == 'network_name':
val = network_name
else:
val = address[keys[index]]
if row['data'][index] != val:
match = False
break
match = True
if match:
break
if not match:
return False
if match:
return True
return False
if not test_utils.call_until_true(
func=_check_data_table_nova_servers_addresses,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_nova_datasource_driver_flavors(self):
@helper.retry_on_exception
def _check_data_table_nova_flavors():
# Fetch data from nova each time, because this test may start
# before nova has all the users.
flavors = self.flavors_client.list_flavors(detail=True)
flavor_id_map = {}
for flavor in flavors['flavors']:
flavor_id_map[flavor['id']] = flavor
results = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, 'flavors'))
# TODO(alexsyip): Not sure what the following OS-FLV-EXT-DATA:
# prefix is for.
keys = ['id', 'name', 'vcpus', 'ram', 'disk',
'OS-FLV-EXT-DATA:ephemeral', 'rxtx_factor']
for row in results['results']:
match = True
try:
flavor_row = flavor_id_map[row['data'][0]]
except KeyError:
return False
for index in range(len(keys)):
if row['data'][index] != flavor_row[keys[index]]:
match = False
break
if match:
return True
return False
if not test_utils.call_until_true(func=_check_data_table_nova_flavors,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('nova'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,45 +0,0 @@
# Copyright 2017 VMware Inc. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestSwiftDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestSwiftDriver, cls).skip_checks()
if not CONF.service_available.swift:
msg = ("%s skipped because swift service is not configured" %
cls.__class__.__name__)
raise cls.skipException(msg)
# TODO(testing): checks on correctness of data in updates
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error('swift'),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,220 +0,0 @@
# Copyright (c) 2018 VMware Inc
# Copyright 2016 NTT All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
LOG = logging.getLogger(__name__)
DS_NAME = 'vitrage'
DRIVER_NAME = 'vitrage'
TEST_PAYLOAD_ACTIVATE = {
"notification": "vitrage.alarm.activate",
"payload": {
"vitrage_id": "2def31e9-6d9f-4c16-b007-893caa806cd4",
"resource": {
"vitrage_id": "437f1f4c-ccce-40a4-ac62-1c2f1fd9f6ac",
"name": "app-1-server-1-jz6qvznkmnif",
"update_timestamp": "2018-01-22 10:00:34.327142+00:00",
"vitrage_category": "RESOURCE",
"vitrage_operational_state": "OK",
"vitrage_type": "nova.instance",
"project_id": "8f007e5ba0944e84baa6f2a4f2b5d03a",
"id": "9b7d93b9-94ec-41e1-9cec-f28d4f8d702c"},
"update_timestamp": "2018-01-22T10:00:34Z",
"vitrage_category": "ALARM",
"state": "Active",
"vitrage_type": "vitrage",
"vitrage_operational_severity": "WARNING",
"name": "Instance memory performance degraded"}}
TEST_PAYLOAD_DEACTIVATE = {
"notification": "vitrage.alarm.deactivate",
"payload": {
"vitrage_id": "2def31e9-6d9f-4c16-b007-893caa806cd4",
"resource": {
"vitrage_id": "437f1f4c-ccce-40a4-ac62-1c2f1fd9f6ac",
"name": "app-1-server-1-jz6qvznkmnif",
"update_timestamp": "2018-01-22 11:00:34.327142+00:00",
"vitrage_category": "RESOURCE",
"vitrage_operational_state": "OK",
"vitrage_type": "nova.instance",
"project_id": "8f007e5ba0944e84baa6f2a4f2b5d03a",
"id": "9b7d93b9-94ec-41e1-9cec-f28d4f8d702c"},
"update_timestamp": "2018-01-22T11:00:34Z",
"vitrage_category": "ALARM",
"state": "Inactive",
"vitrage_type": "vitrage",
"vitrage_operational_severity": "OK",
"name": "Instance memory performance degraded"}}
class TestVitrageDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestVitrageDriver, cls).skip_checks()
if not CONF.congress_feature_enabled.vitrage_webhook:
msg = ("feature not available in this congress version")
raise cls.skipException(msg)
def setUp(self):
super(TestVitrageDriver, self).setUp()
vitrage_setting = {
'name': DS_NAME,
'driver': DRIVER_NAME,
'config': None,
}
self.client = self.os_admin.congress_client
response = self.client.create_datasource(vitrage_setting)
self.datasource_id = response['id']
# Check if service is up
@helper.retry_on_exception
def _check_service():
self.client.list_datasource_status(self.datasource_id)
return True
if not test_utils.call_until_true(func=_check_service,
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"Vitrage data source service is not up")
def tearDown(self):
super(TestVitrageDriver, self).tearDown()
self.client.delete_datasource(self.datasource_id)
def _list_datasource_rows(self, datasource, table):
return self.client.list_datasource_rows(datasource, table)
@decorators.attr(type='smoke')
def test_vitrage_alarms_table(self):
self.client.send_datasource_webhook(
self.datasource_id, TEST_PAYLOAD_ACTIVATE)
results = self._list_datasource_rows(self.datasource_id, 'alarms')
if len(results['results']) != 1:
error_msg = ('Unexpected additional rows are '
'inserted. row details: %s' % results['results'])
raise exceptions.InvalidStructure(error_msg)
expected_row = [u'Instance memory performance degraded',
u'Active',
u'vitrage',
u'WARNING',
u'2def31e9-6d9f-4c16-b007-893caa806cd4',
u'2018-01-22T10:00:34Z',
results['results'][0]['data'][6],
u'app-1-server-1-jz6qvznkmnif',
u'9b7d93b9-94ec-41e1-9cec-f28d4f8d702c',
u'437f1f4c-ccce-40a4-ac62-1c2f1fd9f6ac',
u'8f007e5ba0944e84baa6f2a4f2b5d03a',
u'OK',
u'nova.instance']
if results['results'][0]['data'] != expected_row:
msg = ('inserted row %s is not expected row %s'
% (results['results'][0]['data'], expected_row))
raise exceptions.InvalidStructure(msg)
self.client.send_datasource_webhook(
self.datasource_id, TEST_PAYLOAD_DEACTIVATE)
results = self._list_datasource_rows(self.datasource_id, 'alarms')
if len(results['results']) != 1:
error_msg = ('Unexpected additional rows are '
'inserted. row details: %s' % results['results'])
raise exceptions.InvalidStructure(error_msg)
expected_row = [u'Instance memory performance degraded',
u'Inactive',
u'vitrage',
u'OK',
u'2def31e9-6d9f-4c16-b007-893caa806cd4',
u'2018-01-22T11:00:34Z',
results['results'][0]['data'][6],
u'app-1-server-1-jz6qvznkmnif',
u'9b7d93b9-94ec-41e1-9cec-f28d4f8d702c',
u'437f1f4c-ccce-40a4-ac62-1c2f1fd9f6ac',
u'8f007e5ba0944e84baa6f2a4f2b5d03a',
u'OK',
u'nova.instance']
if results['results'][0]['data'] != expected_row:
msg = ('inserted row %s is not expected row %s'
% (results['results'][0]['data'], expected_row))
raise exceptions.InvalidStructure(msg)
@decorators.attr(type='smoke')
def test_vitrage_alarms_table_subscribe_then_publish(self):
policy_name = self._create_random_policy('vitrage')
policy_rule = 'test(name) :- {}:alarms(name=name)'.format(DS_NAME)
self._create_policy_rule_retry(policy_name, policy_rule)
self.client.send_datasource_webhook(
self.datasource_id, TEST_PAYLOAD_ACTIVATE)
@helper.retry_on_exception
def _check_policy_eval():
rows = self.client.list_policy_rows(policy_name, 'test')['results']
if len(rows) < 1:
LOG.debug('Too few rows: %s', rows)
return False
elif len(rows) > 1:
LOG.debug('Too many rows: %s', rows)
return False
elif rows[0]['data'] != ['Instance memory performance degraded']:
LOG.debug('Incorrect row data: %s', rows[0]['data'])
return False
return True
if not test_utils.call_until_true(func=_check_policy_eval,
duration=10, sleep_for=1):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_vitrage_alarms_table_publish_then_subscribe(self):
self.client.send_datasource_webhook(
self.datasource_id, TEST_PAYLOAD_ACTIVATE)
policy_name = self._create_random_policy('vitrage')
policy_rule = 'test(name) :- {}:alarms(name=name)'.format(DS_NAME)
self._create_policy_rule_retry(policy_name, policy_rule)
@helper.retry_on_exception
def _check_policy_eval():
rows = self.client.list_policy_rows(policy_name, 'test')['results']
if len(rows) < 1:
LOG.debug('Too few rows: %s', rows)
return False
elif len(rows) > 1:
LOG.debug('Too many rows: %s', rows)
return False
elif rows[0]['data'] != ['Instance memory performance degraded']:
LOG.debug('Incorrect row data: %s', rows[0]['data'])
return False
return True
if not test_utils.call_until_true(func=_check_policy_eval,
duration=10, sleep_for=1):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -1,299 +0,0 @@
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import subprocess
import tempfile
from oslo_log import log as logging
import six
from tempest.common import credentials_factory as credentials
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import manager as tempestmanager
from urllib3 import exceptions as urllib3_exceptions
from congress_tempest_plugin.services.policy import policy_client
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestHA(manager_congress.ScenarioPolicyBase):
def setUp(self):
super(TestHA, self).setUp()
self.keypairs = {}
self.servers = []
self.replicas = {}
self.services_client = self.os_admin.identity_services_v3_client
self.endpoints_client = self.os_admin.endpoints_v3_client
self.client = self.os_admin.congress_client
def _prepare_replica(self, port_num):
replica_url = "http://127.0.0.1:%d" % port_num
resp = self.services_client.create_service(
name='congressha',
type=CONF.congressha.replica_type,
description='policy ha service')
self.replica_service_id = resp['service']['id']
resp = self.endpoints_client.create_endpoint(
service_id=self.replica_service_id,
region=CONF.identity.region,
interface='public',
url=replica_url)
self.replica_endpoint_id = resp['endpoint']['id']
def _cleanup_replica(self):
self.endpoints_client.delete_endpoint(self.replica_endpoint_id)
self.services_client.delete_service(self.replica_service_id)
def start_replica(self, port_num):
self._prepare_replica(port_num)
f = tempfile.NamedTemporaryFile(mode='w', suffix='.conf',
prefix='congress%d-' % port_num,
dir='/tmp', delete=False)
conf_file = f.name
template = open('/etc/congress/congress.conf')
conf = template.read()
# Add 'bind_port' and 'datasource_sync_period' to conf file.
index = conf.find('[DEFAULT]') + len('[DEFAULT]\n')
conf = (conf[:index] +
'bind_port = %d\n' % port_num +
'rpc_response_timeout = 10\n' +
conf[index:])
# set datasource sync period interval to 5
conf = conf.replace('datasource_sync_period = 30',
'datasource_sync_period = 5')
sindex = conf.find('signing_dir')
conf = conf[:sindex] + '#' + conf[sindex:]
conf = conf + '\n[dse]\nbus_id = replica-node\n'
LOG.debug("Configuration file for replica: %s\n", conf)
f.write(conf)
f.close()
# start all services on replica node
api = self.start_service('api', conf_file)
pe = self.start_service('policy-engine', conf_file)
data = self.start_service('datasources', conf_file)
assert port_num not in self.replicas
LOG.debug("successfully started replica services\n")
self.replicas[port_num] = ({'API': api, 'PE': pe, 'DS': data},
conf_file)
def start_service(self, name, conf_file):
service = '--' + name
node = name + '-replica-node'
args = ['congress-server', service,
'--node-id', node, '--config-file', conf_file]
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=helper.root_path())
return p
def stop_replica(self, port_num):
procs, conf_file = self.replicas[port_num]
# Using proc.terminate() will block at proc.wait(), no idea why yet
# kill all processes
for p in procs.values():
p.kill()
p.wait()
os.unlink(conf_file)
self.replicas[port_num] = (None, conf_file)
self._cleanup_replica()
def create_client(self, client_type):
creds = credentials.get_configured_admin_credentials('identity_admin')
auth_prov = tempestmanager.get_auth_provider(creds)
return policy_client.PolicyClient(
auth_prov, client_type,
CONF.identity.region)
def _check_replica_server_status(self, client):
try:
LOG.debug("Check replica server status")
client.list_policy()
LOG.debug("replica server ready")
return True
except exceptions.Unauthorized:
LOG.debug("connection refused")
return False
except (socket.error, urllib3_exceptions.MaxRetryError,
exceptions.UnexpectedResponseCode):
LOG.debug("Replica server not ready")
return False
except Exception:
raise
return False
def find_fake(self, client):
datasources = client.list_datasources()
for r in datasources['results']:
if r['name'] == 'fake':
LOG.debug('existing fake driver: %s', str(r['id']))
return r['id']
return None
def _check_resource_exists(self, client, resource):
try:
body = None
if resource == 'datasource':
LOG.debug("Check datasource exists")
body = self.client.list_datasource_status('fake')
else:
LOG.debug("Check policy exists")
body = self.client.list_policy_status('fake')
LOG.debug("resource status: %s", str(body))
except exceptions.NotFound:
LOG.debug("resource 'fake' not found")
return False
return True
def _check_resource_missing(self, client, resource):
return not self._check_resource_exists(client, resource)
def create_fake(self, client):
# Create fake datasource if it does not exist. Returns the
# fake datasource id.
fake_id = self.find_fake(client)
if fake_id:
return fake_id
item = {'id': None,
'name': 'fake',
'driver': 'fake_datasource',
'config': {"username": "fakeu",
"tenant_name": "faket",
"password": "fakep",
"auth_url": "http://127.0.0.1:5000/v2"},
'description': 'bar',
'enabled': True}
ret = client.create_datasource(item)
LOG.debug('created fake driver: %s', str(ret['id']))
return ret['id']
@decorators.attr(type='smoke')
def test_datasource_db_sync_add_remove(self):
# Verify that a replica adds a datasource when a datasource
# appears in the database.
replica_server = False
try:
# Check fake if exists. else create
fake_id = self.create_fake(self.client)
# Start replica
self.start_replica(CONF.congressha.replica_port)
replica_client = self.create_client(CONF.congressha.replica_type)
# Check replica server status
if not test_utils.call_until_true(
func=lambda: self._check_replica_server_status(
replica_client),
duration=90, sleep_for=2):
for port_num in self.replicas:
procs = self.replicas[port_num][0]
for service_key in procs:
output, error = procs[service_key].communicate()
LOG.debug("Replica port %s service %s logs: %s",
port_num,
service_key,
six.StringIO(output.decode()).getvalue())
raise exceptions.TimeoutException("Replica Server not ready")
# Relica server is up
replica_server = True
# primary server might sync later than replica server due to
# diff in datasource sync interval(P-30, replica-5). So checking
# replica first
# Verify that replica server synced fake dataservice and policy
if not test_utils.call_until_true(
func=lambda: self._check_resource_exists(
replica_client, 'datasource'),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica doesn't have fake dataservice, data sync failed")
if not test_utils.call_until_true(
func=lambda: self._check_resource_exists(
replica_client, 'policy'),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica doesn't have fake policy, policy sync failed")
# Verify that primary server synced fake dataservice and policy
if not test_utils.call_until_true(
func=lambda: self._check_resource_exists(
self.client, 'datasource'),
duration=90, sleep_for=1):
raise exceptions.TimeoutException(
"primary doesn't have fake dataservice, data sync failed")
if not test_utils.call_until_true(
func=lambda: self._check_resource_exists(
self.client, 'policy'),
duration=90, sleep_for=1):
raise exceptions.TimeoutException(
"primary doesn't have fake policy, policy sync failed")
# Remove fake from primary server instance.
LOG.debug("removing fake datasource %s", str(fake_id))
self.client.delete_datasource(fake_id)
# Verify that replica server has no fake datasource and fake policy
if not test_utils.call_until_true(
func=lambda: self._check_resource_missing(
replica_client, 'datasource'),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica still has fake dataservice, sync failed")
if not test_utils.call_until_true(
func=lambda: self._check_resource_missing(
replica_client, 'policy'),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica still fake policy, policy synchronizer failed")
LOG.debug("removed fake datasource from replica instance")
# Verify that primary server has no fake datasource and fake policy
if not test_utils.call_until_true(
func=lambda: self._check_resource_missing(
self.client, 'datasource'),
duration=90, sleep_for=1):
raise exceptions.TimeoutException(
"primary still has fake dataservice, sync failed")
if not test_utils.call_until_true(
func=lambda: self._check_resource_missing(
self.client, 'policy'),
duration=90, sleep_for=1):
raise exceptions.TimeoutException(
"primary still fake policy, policy synchronizer failed")
LOG.debug("removed fake datasource from primary instance")
finally:
if replica_server:
self.stop_replica(CONF.congressha.replica_port)

View File

@ -1,64 +0,0 @@
# Copyright (c) 2015 Hewlett Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import tenacity
def retry_check_function_return_value_condition(
f, check_condition, error_msg=None, retry_interval=1,
retry_attempts=20):
"""Check if function f returns value s.t check_condition(value) is True."""
@tenacity.retry(stop=tenacity.stop_after_attempt(retry_attempts),
wait=tenacity.wait_fixed(retry_interval))
def retried_function():
r = f()
if not check_condition(r):
raise Exception(error_msg or
'Actual return value ({}) does not satisfy '
'provided condition'.format(r))
return r
return retried_function()
def retry_check_function_return_value(f, expected_value, error_msg=None):
"""Check if function f returns expected value."""
if not error_msg:
error_msg = 'Expected value "%s" not found' % expected_value
retry_check_function_return_value_condition(
f, lambda v: v == expected_value, error_msg)
def retry_on_exception(f):
"""Decorator to retry on an exception."""
def wrapper():
try:
return f()
except Exception:
return False
return wrapper
def root_path():
"""Return path to root of source code."""
x = os.path.realpath(__file__)
x, y = os.path.split(x) # drop "helper.py"
x, y = os.path.split(x) # drop "scenario"
x, y = os.path.split(x) # drop "tests"
x, y = os.path.split(x) # drop "congress_tempest_plugin"
return x

View File

@ -1,976 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# local congress copy of upstream file. Maintained temporarily while
# file undergoes refactoring upstream.
import subprocess
import netaddr
from oslo_log import log
from oslo_utils import netutils
from tempest.common import compute
from tempest.common import image as common_image
from tempest.common.utils.linux import remote_client
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.os_primary.flavors_client
cls.compute_floating_ips_client = (
cls.os_primary.compute_floating_ips_client)
if CONF.service_available.glance:
# Check if glance v1 is available to determine which client to use.
if CONF.image_feature_enabled.api_v1:
cls.image_client = cls.os_primary.image_client
elif CONF.image_feature_enabled.api_v2:
cls.image_client = cls.os_primary.image_client_v2
else:
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
# Compute image client
cls.compute_images_client = cls.os_primary.compute_images_client
cls.keypairs_client = cls.os_primary.keypairs_client
# Nova security groups client
cls.compute_security_groups_client = (
cls.os_primary.compute_security_groups_client)
cls.compute_security_group_rules_client = (
cls.os_primary.compute_security_group_rules_client)
cls.servers_client = cls.os_primary.servers_client
cls.interface_client = cls.os_primary.interfaces_client
# Neutron network client
cls.networks_client = cls.os_primary.networks_client
cls.ports_client = cls.os_primary.ports_client
cls.routers_client = cls.os_primary.routers_client
cls.subnets_client = cls.os_primary.subnets_client
cls.floating_ips_client = cls.os_primary.floating_ips_client
cls.security_groups_client = cls.os_primary.security_groups_client
cls.security_group_rules_client = (
cls.os_primary.security_group_rules_client)
if (CONF.volume_feature_enabled.api_v2 or
CONF.volume_feature_enabled.api_v3):
cls.volumes_client = cls.os_primary.volumes_client_latest
cls.snapshots_client = cls.os_primary.snapshots_client_latest
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.ports_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = result['port']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_port, port['id'])
return port
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name=name)
self.addCleanup(client.delete_keypair, name)
return body['keypair']
def show_server(self, server_id, client=None):
if not client:
client = self.servers_client
return client.show_server(server_id)['server']
def create_server(self, name=None, image_id=None, flavor=None,
validatable=False, wait_until='ACTIVE',
clients=None, **kwargs):
"""Wrapper utility that returns a test server.
This wrapper utility calls the common create test server and
returns a test server. The purpose of this wrapper is to minimize
the impact on the code of the tests already using this
function.
"""
# NOTE(jlanoux): As a first step, ssh checks in the scenario
# tests need to be run regardless of the run_validation and
# validatable parameters and thus until the ssh validation job
# becomes voting in CI. The test resources management and IP
# association are taken care of in the scenario tests.
# Therefore, the validatable parameter is set to false in all
# those tests. In this way create_server just return a standard
# server and the scenario tests always perform ssh checks.
# Needed for the cross_tenant_traffic test:
if clients is None:
clients = self.os_primary
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-server")
vnic_type = CONF.network.port_vnic_type
# If vnic_type is configured create port for
# every network
if vnic_type:
ports = []
create_port_body = {'binding:vnic_type': vnic_type,
'namestart': 'port-smoke'}
if kwargs:
# Convert security group names to security group ids
# to pass to create_port
if 'security_groups' in kwargs:
security_groups = \
clients.security_groups_client.list_security_groups(
).get('security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
sec_groups_names = [s['name'] for s in kwargs.pop(
'security_groups')]
security_groups_ids = [sec_dict[s]
for s in sec_groups_names]
if security_groups_ids:
create_port_body[
'security_groups'] = security_groups_ids
networks = kwargs.pop('networks', [])
else:
networks = []
# If there are no networks passed to us we look up
# for the project's private networks and create a port.
# The same behaviour as we would expect when passing
# the call to the clients with no networks
if not networks:
networks = clients.networks_client.list_networks(
**{'router:external': False, 'fields': 'id'})['networks']
# It's net['uuid'] if networks come from kwargs
# and net['id'] if they come from
# clients.networks_client.list_networks
for net in networks:
net_id = net.get('uuid', net.get('id'))
if 'port' not in net:
port = self._create_port(network_id=net_id,
client=clients.ports_client,
**create_port_body)
ports.append({'port': port['id']})
else:
ports.append({'port': net['port']})
if ports:
kwargs['networks'] = ports
self.ports = ports
tenant_network = self.get_tenant_network()
body, servers = compute.create_test_server(
clients,
tenant_network=tenant_network,
wait_until=wait_until,
name=name, flavor=flavor,
image_id=image_id, **kwargs)
self.addCleanup(waiters.wait_for_server_termination,
clients.servers_client, body['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
clients.servers_client.delete_server, body['id'])
server = clients.servers_client.show_server(body['id'])['server']
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None):
if size is None:
size = CONF.volume.volume_size
if imageRef:
image = self.compute_images_client.show_image(imageRef)['image']
min_disk = image.get('minDisk')
size = max(size, min_disk)
if name is None:
name = data_utils.rand_name(self.__class__.__name__ + "-volume")
kwargs = {'display_name': name,
'snapshot_id': snapshot_id,
'imageRef': imageRef,
'volume_type': volume_type,
'size': size}
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.volumes_client.delete_volume, volume['id'])
# NOTE(e0ne): Cinder API v2 uses name instead of display_name
if 'display_name' in volume:
self.assertEqual(name, volume['display_name'])
else:
self.assertEqual(name, volume['name'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
if secgroup_id is None:
sgs = _client.list_security_groups()['security_groups']
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = _client_rules.create_security_group_rule(
parent_group_id=secgroup_id, **ruleset)['security_group_rule']
rules.append(sg_rule)
return rules
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.compute_security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.compute_security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, ip_address, username=None, private_key=None):
"""Get a SSH client to a remote server
@param ip_address the server floating or fixed IP address to use
for ssh validation
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@return a RemoteClient object
"""
if username is None:
username = CONF.validation.image_ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.validation.auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.validation.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip_address, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ('Initializing SSH connection to %(ip)s failed. '
'Error: %(error)s' % {'ip': ip_address,
'error': e})
caller = test_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
self._log_console_output()
raise
return linux_client
def _image_create(self, name, fmt, path,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
}
if CONF.image_feature_enabled.api_v1:
params['is_public'] = 'False'
params['properties'] = properties
params = {'headers': common_image.image_meta_to_headers(**params)}
else:
params['visibility'] = 'private'
# Additional properties are flattened out in the v2 API.
params.update(properties)
body = self.image_client.create_image(**params)
image = body['image'] if 'image' in body else body
self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
with open(path, 'rb') as image_file:
if CONF.image_feature_enabled.api_v1:
self.image_client.update_image(image['id'], data=image_file)
else:
self.image_client.store_image_file(image['id'], image_file)
return image['id']
def _log_console_output(self, servers=None):
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
return
if not servers:
servers = self.servers_client.list_servers()
servers = servers['servers']
for server in servers:
try:
console_output = self.servers_client.get_console_output(
server['id'])['output']
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
except lib_exc.NotFound:
LOG.debug("Server %s disappeared(deleted) while looking "
"for the console log", server['id'])
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None, mtu=None):
timeout = ping_timeout or CONF.validation.ping_timeout
cmd = ['ping', '-c1', '-w1']
if mtu:
cmd += [
# don't fragment
'-M', 'do',
# ping receives just the size of ICMP payload
'-s', str(net_utils.get_ping_payload_size(mtu, 4))
]
cmd.append(ip_address)
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return (proc.returncode == 0) == should_succeed
caller = test_utils.find_test_caller()
LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
' expected result is %(should_succeed)s', {
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'should_succeed':
'reachable' if should_succeed else 'unreachable'
})
result = test_utils.call_until_true(ping, timeout, 1)
LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
'ping result is %(result)s', {
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'result': 'expected' if result else 'unexpected'
})
return result
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True,
mtu=None):
"""Check server connectivity
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:param mtu: network MTU to use for connectivity validation
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect,
mtu=mtu),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
self.get_remote_client(ip_address, username, private_key)
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
This class provide helpers for network scenario tests, using the neutron
API. Helpers from ancestor which use the nova network API are overridden
with the neutron API.
This Class also enforces using Neutron instead of novanetwork.
Subclassed tests will be skipped if Neutron is not enabled
"""
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(NetworkScenarioTest, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron not available')
def _create_network(self, networks_client=None,
tenant_id=None,
namestart='network-smoke-',
port_security_enabled=True):
if not networks_client:
networks_client = self.networks_client
if not tenant_id:
tenant_id = networks_client.tenant_id
name = data_utils.rand_name(namestart)
network_kwargs = dict(name=name, tenant_id=tenant_id)
# Neutron disables port security by default so we have to check the
# config before trying to create the network with port_security_enabled
if CONF.network_feature_enabled.port_security:
network_kwargs['port_security_enabled'] = port_security_enabled
result = networks_client.create_network(**network_kwargs)
network = result['network']
self.assertEqual(network['name'], name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
networks_client.delete_network,
network['id'])
return network
def _create_subnet(self, network, subnets_client=None,
routers_client=None, namestart='subnet-smoke',
**kwargs):
"""Create a subnet for the given network
within the cidr block configured for tenant networks.
"""
if not subnets_client:
subnets_client = self.subnets_client
if not routers_client:
routers_client = self.routers_client
def cidr_in_use(cidr, tenant_id):
"""Check cidr existence
:returns: True if subnet with cidr already exist in tenant
False else
"""
cidr_in_use = self.os_admin.subnets_client.list_subnets(
tenant_id=tenant_id, cidr=cidr)['subnets']
return len(cidr_in_use) != 0
ip_version = kwargs.pop('ip_version', 4)
if ip_version == 6:
tenant_cidr = netaddr.IPNetwork(
CONF.network.project_network_v6_cidr)
num_bits = CONF.network.project_network_v6_mask_bits
else:
tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
num_bits = CONF.network.project_network_mask_bits
result = None
str_cidr = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(num_bits):
str_cidr = str(subnet_cidr)
if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
continue
subnet = dict(
name=data_utils.rand_name(namestart),
network_id=network['id'],
tenant_id=network['tenant_id'],
cidr=str_cidr,
ip_version=ip_version,
**kwargs
)
try:
result = subnets_client.create_subnet(**subnet)
break
except lib_exc.Conflict as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = result['subnet']
self.assertEqual(subnet['cidr'], str_cidr)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
subnets_client.delete_subnet, subnet['id'])
return subnet
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
if ip_addr:
ports = self.os_admin.ports_client.list_ports(
device_id=server['id'],
fixed_ips='ip_address=%s' % ip_addr)['ports']
else:
ports = self.os_admin.ports_client.list_ports(
device_id=server['id'])['ports']
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
# with 2 subnets
p_status = ['ACTIVE']
# NOTE(vsaienko) With Ironic, instances live on separate hardware
# servers. Neutron does not bind ports for Ironic instances, as a
# result the port remains in the DOWN state.
# TODO(vsaienko) remove once bug: #1599836 is resolved.
if getattr(CONF.service_available, 'ironic', False):
p_status.append('DOWN')
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
if netutils.is_valid_ipv4(fxip["ip_address"]) and
p['status'] in p_status]
inactive = [p for p in ports if p['status'] != 'ACTIVE']
if inactive:
LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
self.assertNotEqual(0, len(port_map),
"No IPv4 addresses found in: %s" % ports)
self.assertEqual(len(port_map), 1,
"Found multiple IPv4 addresses: %s. "
"Unable to determine which port to target."
% port_map)
return port_map[0]
def _get_network_by_name(self, network_name):
net = self.os_admin.networks_client.list_networks(
name=network_name)['networks']
self.assertNotEqual(len(net), 0,
"Unable to get network by name: %s" % network_name)
return net[0]
def create_floating_ip(self, thing, external_network_id=None,
port_id=None, client=None):
"""Create a floating IP and associates to a resource/port on Neutron"""
if not external_network_id:
external_network_id = CONF.network.public_network_id
if not client:
client = self.floating_ips_client
if not port_id:
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
result = client.create_floatingip(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing['tenant_id'],
fixed_ip_address=ip4
)
floating_ip = result['floatingip']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_floatingip,
floating_ip['id'])
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id, _ = self._get_server_port_id_and_ip4(server)
kwargs = dict(port_id=port_id)
floating_ip = self.floating_ips_client.update_floatingip(
floating_ip['id'], **kwargs)['floatingip']
self.assertEqual(port_id, floating_ip['port_id'])
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
""":param floating_ip: floating_ips_client.create_floatingip"""
kwargs = dict(port_id=None)
floating_ip = self.floating_ips_client.update_floatingip(
floating_ip['id'], **kwargs)['floatingip']
self.assertIsNone(floating_ip['port_id'])
return floating_ip
def _check_tenant_network_connectivity(self, server,
username,
private_key,
should_connect=True,
servers_for_debug=None):
if not CONF.network.project_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
for net_name, ip_addresses in server['addresses'].items():
for ip_address in ip_addresses:
self.check_vm_connectivity(ip_address['addr'],
username,
private_key,
should_connect=should_connect)
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers_for_debug)
self._log_net_info(e)
raise
def _check_remote_connectivity(self, source, dest, should_succeed=True,
nic=None):
"""check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
:param should_succeed: boolean should ping succeed or not
:param nic: specific network interface to ping from
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_remote():
try:
source.ping_host(dest, nic=nic)
except lib_exc.SSHExecCommandFailed:
LOG.warning('Failed to ping IP: %s via a ssh connection '
'from: %s.', dest, source.ssh_client.host)
return not should_succeed
return should_succeed
return test_utils.call_until_true(ping_remote,
CONF.validation.ping_timeout,
1)
def _create_security_group(self, security_group_rules_client=None,
tenant_id=None,
namestart='secgroup-smoke',
security_groups_client=None):
if security_group_rules_client is None:
security_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
if tenant_id is None:
tenant_id = security_groups_client.tenant_id
secgroup = self._create_empty_security_group(
namestart=namestart, client=security_groups_client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule(
security_group_rules_client=security_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client)
for rule in rules:
self.assertEqual(tenant_id, rule['tenant_id'])
self.assertEqual(secgroup['id'], rule['security_group_id'])
return secgroup
def _create_empty_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: the created security group
"""
if client is None:
client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
result = client.create_security_group(**sg_dict)
secgroup = result['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(tenant_id, secgroup['tenant_id'])
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_security_group, secgroup['id'])
return secgroup
def _default_security_group(self, client=None, tenant_id=None):
"""Get default secgroup for given tenant_id.
:returns: default secgroup for given tenant
"""
if client is None:
client = self.security_groups_client
if not tenant_id:
tenant_id = client.tenant_id
sgs = [
sg for sg in list(client.list_security_groups().values())[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertGreater(len(sgs), 0, msg)
return sgs[0]
def _create_security_group_rule(self, secgroup=None,
sec_group_rules_client=None,
tenant_id=None,
security_groups_client=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
default secgroup in tenant_id.
:param secgroup: the security group.
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
for example, to allow incoming ssh:
rule = {
direction: 'ingress'
protocol:'tcp',
port_range_min: 22,
port_range_max: 22
}
"""
if sec_group_rules_client is None:
sec_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
if not tenant_id:
tenant_id = security_groups_client.tenant_id
if secgroup is None:
secgroup = self._default_security_group(
client=security_groups_client, tenant_id=tenant_id)
ruleset = dict(security_group_id=secgroup['id'],
tenant_id=secgroup['tenant_id'])
ruleset.update(kwargs)
sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
sg_rule = sg_rule['security_group_rule']
self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
return sg_rule
def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
secgroup=None,
security_groups_client=None):
"""Create loginable security group rule
This function will create:
1. egress and ingress tcp port 22 allow rule in order to allow ssh
access for ipv4.
2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
"""
if security_group_rules_client is None:
security_group_rules_client = self.security_group_rules_client
if security_groups_client is None:
security_groups_client = self.security_groups_client
rules = []
rulesets = [
dict(
# ssh
protocol='tcp',
port_range_min=22,
port_range_max=22,
),
dict(
# ping
protocol='icmp',
),
dict(
# ipv6-icmp for ping6
protocol='icmp',
ethertype='IPv6',
)
]
sec_group_rules_client = security_group_rules_client
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
sec_group_rules_client=sec_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client,
**ruleset)
except lib_exc.Conflict as ex:
# if rule already exist - skip rule and continue
msg = 'Security group rule already exists'
if msg not in ex._error_string:
raise ex
else:
self.assertEqual(r_direction, sg_rule['direction'])
rules.append(sg_rule)
return rules
def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
if not client:
client = self.routers_client
if not tenant_id:
tenant_id = client.tenant_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
body = client.show_router(router_id)
return body['router']
elif network_id:
router = self._create_router(client, tenant_id)
kwargs = {'external_gateway_info': dict(network_id=network_id)}
router = client.update_router(router['id'], **kwargs)['router']
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
client = self.routers_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
tenant_id=tenant_id)
router = result['router']
self.assertEqual(router['name'], name)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_router,
router['id'])
return router
# def _update_router_admin_state(self, router, admin_state_up):
# kwargs = dict(admin_state_up=admin_state_up)
# router = self.routers_client.update_router(
# router['id'], **kwargs)['router']
# self.assertEqual(admin_state_up, router['admin_state_up'])
def create_networks(self, networks_client=None,
routers_client=None, subnets_client=None,
tenant_id=None, dns_nameservers=None,
port_security_enabled=True):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
on the same shared network.
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
:returns: network, subnet, router
"""
if CONF.network.shared_physical_network:
# NOTE(Shrews): This exception is for environments where tenant
# credential isolation is available, but network separation is
# not (the current baremetal case). Likely can be removed when
# test account mgmt is reworked:
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
if not CONF.compute.fixed_network_name:
m = 'fixed_network_name must be specified in config'
raise lib_exc.InvalidConfiguration(m)
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
subnet = None
else:
network = self._create_network(
networks_client=networks_client,
tenant_id=tenant_id,
port_security_enabled=port_security_enabled)
router = self._get_router(client=routers_client,
tenant_id=tenant_id)
subnet_kwargs = dict(network=network,
subnets_client=subnets_client,
routers_client=routers_client)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
subnet = self._create_subnet(**subnet_kwargs)
if not routers_client:
routers_client = self.routers_client
router_id = router['id']
routers_client.add_router_interface(router_id,
subnet_id=subnet['id'])
# save a cleanup job to remove this association between
# router and subnet
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
routers_client.remove_router_interface, router_id,
subnet_id=subnet['id'])
return network, subnet, router

View File

@ -1,416 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import random
import re
import string
from oslo_log import log as logging
from tempest.common import credentials_factory as credentials
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import manager as tempestmanager
from congress_tempest_plugin.services.congress_network import qos_client
from congress_tempest_plugin.services.congress_network import qos_rule_client
from congress_tempest_plugin.services.policy import policy_client
# use local copy of tempest scenario manager during upstream refactoring
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager
CONF = config.CONF
LOG = logging.getLogger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
def get_datasource_id(client, name):
datasources = client.list_datasources()
for datasource in datasources['results']:
if datasource['name'] == name:
return datasource['id']
raise Exception("Datasource %s not found." % name)
# Note: these tests all use neutron today so we mix with that.
class ScenarioPolicyBase(manager.NetworkScenarioTest):
@classmethod
def setUpClass(cls):
super(ScenarioPolicyBase, cls).setUpClass()
# auth provider for admin credentials
creds = credentials.get_configured_admin_credentials('identity_admin')
auth_prov = tempestmanager.get_auth_provider(creds)
cls.setup_required_clients(auth_prov)
@classmethod
def setup_required_clients(cls, auth_prov):
# Get congress client
cls.os_admin.congress_client = policy_client.PolicyClient(
auth_prov, "policy", CONF.identity.region)
cls.os_admin.qos_client = qos_client.QosPoliciesClient(
auth_prov, "network", CONF.identity.region)
cls.os_admin.qos_rule_client = qos_rule_client.QosRuleClient(
auth_prov, "network", CONF.identity.region)
# Get alarms client
if getattr(CONF.service_available, 'aodh', False):
import telemetry_tempest_plugin.aodh.service.client as alarm_client
cls.os_admin.alarms_client = (
alarm_client.AlarmingClient(
auth_prov,
CONF.alarming_plugin.catalog_type, CONF.identity.region,
CONF.alarming_plugin.endpoint_type))
# Get mistral client
if getattr(CONF.service_available, 'mistral', False):
import mistral_tempest_tests.services.\
v2.mistral_client as mistral_client
cls.os_admin.mistral_client = mistral_client.MistralClientV2(
auth_prov, 'workflowv2')
def _setup_network_and_servers(self):
self.security_group = self._create_security_group()
self.network, self.subnet, self.router = self.create_networks()
self.check_networks()
name = data_utils.rand_name('server-smoke')
server = self._create_server(name, self.network)
self._check_tenant_network_connectivity()
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def check_networks(self):
"""Check for newly created network/subnet/router.
Checks that we see the newly created network/subnet/router via
checking the result of list_[networks,routers,subnets].
"""
seen_nets = self.os_admin.networks_client.list_networks()
seen_names = [n['name'] for n in seen_nets['networks']]
seen_ids = [n['id'] for n in seen_nets['networks']]
self.assertIn(self.network['name'], seen_names)
self.assertIn(self.network['id'], seen_ids)
if self.subnet:
seen_subnets = self.os_admin.subnets_client.list_subnets()
seen_net_ids = [n['network_id'] for n in seen_subnets['subnets']]
seen_subnet_ids = [n['id'] for n in seen_subnets['subnets']]
self.assertIn(self.network['id'], seen_net_ids)
self.assertIn(self.subnet['id'], seen_subnet_ids)
if self.router:
seen_routers = self.os_admin.routers_client.list_routers()
seen_router_ids = [n['id'] for n in seen_routers['routers']]
seen_router_names = [n['name'] for n in seen_routers['routers']]
self.assertIn(self.router['name'],
seen_router_names)
self.assertIn(self.router['id'],
seen_router_ids)
def check_datasource_no_error(self, datasource_name):
"""Check that datasource has no error on latest update"""
ds_status = self.os_admin.congress_client.list_datasource_status(
datasource_name)
if (ds_status['initialized'] == 'True' and
ds_status['number_of_updates'] != '0' and
ds_status['last_error'] == 'None'):
return True
else:
LOG.debug('datasource %s not initialized, not polled, or shows '
'error. Full status: %s', datasource_name, ds_status)
return False
def _create_server(self, name, network):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [{'name': self.security_group['name']}]
create_kwargs = {
'networks': [
{'uuid': network['id']},
],
'key_name': keypair['name'],
'security_groups': security_groups,
}
server = self.create_server(name=name, wait_until='ACTIVE',
**create_kwargs)
self.servers.append(server)
return server
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def _check_tenant_network_connectivity(self):
ssh_login = CONF.validation.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(ScenarioPolicyBase, self)._check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
def _create_and_associate_floating_ips(self, server):
public_network_id = CONF.network.public_network_id
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
ssh_login = CONF.compute.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = None
if should_connect:
private_key = self._get_server_key(server)
# call the common method in the parent class
super(ScenarioPolicyBase, self)._check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
self._disassociate_floating_ip(floating_ip)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
name = data_utils.rand_name('new_server-smoke-')
# create a new server for the floating ip
server = self._create_server(name, self.network)
self._associate_floating_ip(floating_ip, server)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, server)
def _create_new_network(self):
self.new_net = self._create_network(tenant_id=self.tenant_id)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+):')
ipatxt = ssh_client.exec_command("ip address")
return reg.findall(ipatxt)
def _check_network_internal_connectivity(self, network):
"""via ssh check VM internal connectivity:
- ping internal gateway and DHCP port, implying in-tenant connectivity
pinging both, because L3 and DHCP agents might be on different nodes.
"""
floating_ip, server = self.floating_ip_tuple
# get internal ports' ips:
# get all network ports in the new network
ports = self.os_admin.ports_client.list_ports(
tenant_id=server['tenant_id'], network_id=network.id)['ports']
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in ports
if p['device_owner'].startswith('network'))
self._check_server_connectivity(floating_ip, internal_ips)
def _check_network_external_connectivity(self):
"""ping public network default gateway to imply external conn."""
if not CONF.network.public_network_id:
msg = 'public network not defined.'
LOG.info(msg)
return
subnet = self.os_admin.subnets_client.list_subnets(
network_id=CONF.network.public_network_id)['subnets']
self.assertEqual(1, len(subnet), "Found %d subnets" % len(subnet))
external_ips = [subnet[0]['gateway_ip']]
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
def _check_server_connectivity(self, floating_ip, address_list):
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(self.floating_ip_tuple.server)
ssh_source = self._ssh_to_server(ip_address, private_key)
for remote_ip in address_list:
try:
self.assertTrue(self._check_remote_connectivity(ssh_source,
remote_ip),
"Timed out waiting for %s to become "
"reachable" % remote_ip)
except Exception:
LOG.exception("Unable to access {dest} via ssh to "
"floating-ip {src}".format(dest=remote_ip,
src=floating_ip))
raise
def _create_random_policy(self, prefix='nova'):
policy_name = prefix + "_%s" % ''.join(
random.choice(string.ascii_lowercase) for x in range(10))
body = {"name": policy_name}
resp = self.os_admin.congress_client.create_policy(body)
self.addCleanup(self.os_admin.congress_client.delete_policy,
resp['id'])
return resp['name']
def _create_policy_rule(self, policy_name, rule, rule_name=None,
comment=None):
body = {'rule': rule}
if rule_name:
body['name'] = rule_name
if comment:
body['comment'] = comment
client = self.os_admin.congress_client
response = client.create_policy_rule(policy_name, body)
if response:
self.addCleanup(client.delete_policy_rule, policy_name,
response['id'])
return response
else:
raise Exception('Failed to create policy rule (%s, %s)'
% (policy_name, rule))
def _create_policy_rule_retry(
self, policy_name, rule, rule_name=None, comment=None):
return helper.retry_check_function_return_value_condition(
lambda: self._create_policy_rule(
policy_name, rule, rule_name, comment),
lambda v: True, retry_attempts=50, retry_interval=2)
class DatasourceDriverTestBase(ScenarioPolicyBase):
def check_service_data_against_congress_table(
self, table_name, service_data_fetch_func, check_nonempty=True,
missing_attributes_allowed=None):
if missing_attributes_allowed is None:
missing_attributes_allowed = []
table_schema = (
self.os_admin.congress_client.show_datasource_table_schema(
self.datasource_id, table_name)['columns'])
table_id_col = next(i for i, c in enumerate(table_schema)
if c['name'] == 'id')
def _check_data():
# Fetch data each time, because test may go before service has data
service_data = service_data_fetch_func()
if check_nonempty and len(service_data) == 0:
LOG.debug('Congress %s table source service data is empty. '
'Unable to check data.', table_name)
return False
LOG.debug('Congress %s table source service data: %s',
table_name, service_data)
table_data = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, table_name)['results'])
LOG.debug('Congress %s table data: %s', table_name, table_data)
# check same cardinality
if len(service_data) != len(table_data):
LOG.debug('Cardinality mismatch between congress %s '
'table and service data', table_name)
return False
# construct map from id to service data items
service_data_map = {}
for data_item in service_data:
service_data_map[data_item['id']] = data_item
for row in table_data:
try:
service_item = service_data_map[row['data'][table_id_col]]
except KeyError:
return False
for index in range(len(table_schema)):
# case: key is not present in service_item, allow
# if it is expected (sometimes an objects won't have key
# when the value is not present, e.g. description not set)
if (str(row['data'][index]) == 'None' and
table_schema[index][
'name'] in missing_attributes_allowed and
table_schema[index]['name'] not in service_item):
return True
# normal case: service_item value must equal
# congress table value
if (str(row['data'][index]) !=
str(service_item[table_schema[index]['name']])):
return False
return True
if not test_utils.call_until_true(
func=_check_data, duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
def check_service_data_against_congress_subtable(
self, table_name, service_data_fetch_func,
service_subdata_attribute):
def _check_data():
# Fetch data each time, because test may go before service has data
service_data = service_data_fetch_func()
LOG.debug('Congress %s table source service data: %s',
table_name, service_data)
table_data = (
self.os_admin.congress_client.list_datasource_rows(
self.datasource_id, table_name)['results'])
LOG.debug('Congress %s table data: %s', table_name, table_data)
# construct map from id to service data items
service_data_map = {}
for data_item in service_data:
service_data_map[data_item['id']] = data_item[
service_subdata_attribute]
expected_number_of_rows = 0
for row in table_data:
row_id, row_data = row['data'][0], row['data'][1]
service_subdata = service_data_map.get(row_id)
if not service_subdata or row_data not in service_subdata:
# congress table has item not in service data.
LOG.debug('Congress %s table has row (%s, %s) not in '
'service data', table_name, row_id, row_data)
return False
expected_number_of_rows += len(service_subdata)
# check cardinality
if expected_number_of_rows != len(table_data):
LOG.debug('Cardinality mismatch between congress %s '
'table and service data', table_name)
return False
return True
if not test_utils.call_until_true(
func=_check_data,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.attr(type='smoke')
def test_update_no_error(self):
if not test_utils.call_until_true(
func=lambda: self.check_datasource_no_error(
self.datasource_name),
duration=30, sleep_for=5):
raise exceptions.TimeoutException('Datasource could not poll '
'without error.')

View File

@ -1,294 +0,0 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import time
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from congress_tempest_plugin.tests.scenario import helper
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestPolicyBasicOps(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestPolicyBasicOps, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestPolicyBasicOps, self).setUp()
self.keypairs = {}
self.servers = []
def _create_test_server(self, name=None):
image_ref = CONF.compute.image_ref
flavor_ref = CONF.compute.flavor_ref
keypair = self.create_keypair()
security_group = self._create_security_group()
security_groups = [{'name': security_group['name']}]
create_kwargs = {'key_name': keypair['name'],
'security_groups': security_groups}
instance = self.create_server(name=name,
image_id=image_ref,
flavor=flavor_ref,
wait_until='ACTIVE',
**create_kwargs)
return instance
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_execution_action(self):
metadata = {'testkey1': 'value3'}
res = {'meta': {'testkey1': 'value3'}}
server = self._create_test_server()
congress_client = self.os_admin.congress_client
servers_client = self.os_admin.servers_client
policy = self._create_random_policy()
service = 'nova'
action = 'servers.set_meta'
action_args = {'args': {'positional': [],
'named': {'server': server['id'],
'metadata': metadata}}}
body = action_args
f = lambda: servers_client.show_server_metadata_item(server['id'],
'testkey1')
# execute via datasource api
body.update({'name': action})
congress_client.execute_datasource_action(service, "execute", body)
helper.retry_check_function_return_value(f, res)
# execute via policy api
body.update({'name': service + ':' + action})
congress_client.execute_policy_action(policy, "execute", False,
False, body)
helper.retry_check_function_return_value(f, res)
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
def test_policy_basic_op(self):
self._setup_network_and_servers()
body = {"rule": "port_security_group(id, security_group_name) "
":-neutronv2:ports(id, tenant_id, name, network_id,"
"mac_address, admin_state_up, status, device_id, "
"device_owner),"
"neutronv2:security_group_port_bindings(id, "
"security_group_id), neutronv2:security_groups("
"security_group_id, tenant_id1, security_group_name,"
"description)"}
results = self.os_admin.congress_client.create_policy_rule(
'classification', body)
rule_id = results['id']
self.addCleanup(
self.os_admin.congress_client.delete_policy_rule,
'classification', rule_id)
# Find the ports of on this server
ports = self.os_admin.ports_client.list_ports(
device_id=self.servers[0]['id'])['ports']
def check_data():
results = self.os_admin.congress_client.list_policy_rows(
'classification', 'port_security_group')
for row in results['results']:
if (row['data'][0] == ports[0]['id'] and
row['data'][1] ==
self.servers[0]['security_groups'][0]['name']):
return True
else:
return False
time.sleep(65) # sleep for replicated PE sync
# Note(ekcs): do not use retry because we want to make sure the call
# succeeds on the first try after adequate time.
# If retry used, it may pass based on succeding on one replica but
# failing on all others.
self.assertTrue(check_data(),
"Data did not converge in time or failure in server")
@decorators.attr(type='smoke')
@utils.services('compute', 'network')
@testtools.skipUnless(CONF.congress_feature_enabled.nova_driver,
'Test involving Nova driver skipped for queens.')
# queens flag for skipping nova driver tests because
# congress nova driver in queens does not work with the new
# novaclient 10.1.0 now part of upper-constraints
# https://review.opendev.org/#/c/571540/
def test_reactive_enforcement(self):
servers_client = self.os_admin.servers_client
server_name = 'server_under_test'
server = self._create_test_server(name=server_name)
policy_name = self._create_random_policy()
meta_key = 'meta_test_key1'
meta_val = 'value1'
meta_data = {'meta': {meta_key: meta_val}}
rules = [
'execute[nova:servers_set_meta(id, "%s", "%s")] :- '
'test_servers(id)' % (meta_key, meta_val),
'test_servers(id) :- '
'nova:servers(id, name, host_id, status, tenant_id,'
'user_id, image_id, flavor_id, zone, host_name),'
'equal(name, "%s")' % server_name]
for rule in rules:
self._create_policy_rule(policy_name, rule)
f = lambda: servers_client.show_server_metadata_item(server['id'],
meta_key)
time.sleep(80) # sleep for replicated PE sync
# Note: seems reactive enforcement takes a bit longer
# succeeds on the first try after adequate time.
# If retry used, it may pass based on succeding on one replica but
# failing on all others.
self.assertEqual(f(), meta_data)
class TestPolicyLibraryBasicOps(manager_congress.ScenarioPolicyBase):
@decorators.attr(type='smoke')
def test_policy_library_basic_op(self):
response = self.os_admin.congress_client.list_library_policy()
initial_state = response['results']
self.assertGreater(
len(initial_state), 0, 'library policy shows no policies, '
'indicating failed load-on-startup.')
test_policy = {
"name": "test_policy",
"description": "test policy description",
"kind": "nonrecursive",
"abbreviation": "abbr",
"rules": [{"rule": "p(x) :- q(x)", "comment": "test comment",
"name": "test name"},
{"rule": "p(x) :- q2(x)", "comment": "test comment2",
"name": "test name2"}]
}
response = self.os_admin.congress_client.create_library_policy(
test_policy)
policy_id = response['id']
test_policy['id'] = policy_id
def delete_if_found(id_):
try:
self.os_admin.congress_client.delete_library_policy(id_)
except exceptions.NotFound:
pass
self.addCleanup(delete_if_found, policy_id)
response = self.os_admin.congress_client.list_library_policy()
new_state = response['results']
self.assertEqual(len(initial_state) + 1, len(new_state),
'new library policy not reflected in list results')
self.assertIn(test_policy, new_state,
'new library policy not reflected in list results')
self.os_admin.congress_client.delete_library_policy(policy_id)
response = self.os_admin.congress_client.list_library_policy()
new_state = response['results']
self.assertEqual(len(initial_state), len(new_state),
'library policy delete not reflected in list results')
self.assertNotIn(test_policy, new_state,
'library policy delete not reflected in list results')
@decorators.attr(type='smoke')
def test_create_library_policies(self):
'''test the library policies by loading into policy engine'''
# list library policies (by name) to skip in this test, perhaps
# because it depends on datasources not available in gate
skip_names_list = []
response = self.os_admin.congress_client.list_library_policy()
library_policies = response['results']
for library_policy in library_policies:
if library_policy['name'] not in skip_names_list:
resp = self.os_admin.congress_client.create_policy(
body=None, params={'library_policy': library_policy['id']})
self.assertEqual(resp.response['status'], '201',
'Policy activation failed')
self.addCleanup(self.os_admin.congress_client.delete_policy,
resp['id'])
class TestCongressDataSources(manager_congress.ScenarioPolicyBase):
@classmethod
def skip_checks(cls):
super(TestCongressDataSources, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def test_all_loaded_datasources_are_initialized(self):
@helper.retry_on_exception
def _check_all_datasources_are_initialized():
datasources = self.os_admin.congress_client.list_datasources()
for datasource in datasources['results']:
results = (
self.os_admin.congress_client.list_datasource_status(
datasource['id']))
if results['initialized'] != 'True':
return False
return True
if not test_utils.call_until_true(
func=_check_all_datasources_are_initialized,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
def test_all_datasources_have_tables(self):
@helper.retry_on_exception
def check_data():
datasources = self.os_admin.congress_client.list_datasources()
for datasource in datasources['results']:
results = (
self.os_admin.congress_client.list_datasource_tables(
datasource['id']))
# NOTE(arosen): if there are no results here we return false as
# there is something wrong with a driver as it doesn't expose
# any tables.
if not results['results']:
return False
return True
if not test_utils.call_until_true(func=check_data,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -1,181 +0,0 @@
# Copyright 2017 Orange. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"Tempest tests for config datasource"
import random
import string
import testtools
from tempest import config
from tempest.lib import decorators
from congress_tempest_plugin.tests.scenario import manager_congress
CONF = config.CONF
class TestZ3(manager_congress.ScenarioPolicyBase):
"""Tests for Z3 policies with an actual Z3 engine"""
@classmethod
def skip_checks(cls):
"""Checks that Z3 is available"""
super(TestZ3, cls).skip_checks()
if not CONF.congressz3.enabled:
msg = ("%s skipped as z3 is not available" %
cls.__class__.__name__)
raise cls.skipException(msg)
def _add_policy(self, seed, kind="nonrecursive"):
"short cut for adding a policy with random name"
client = self.os_admin.congress_client
def cleanup(policy_name):
"""Removes the policy"""
result = client.list_policy_rules(policy_name)
for rule in result['results']:
client.delete_policy_rule(policy_name, rule['id'])
client.delete_policy(policy_name)
suffix = ''.join(
random.choice(string.ascii_lowercase)
for x in range(10))
policy_name = "%s_%s" % (seed, suffix)
body = {"name": policy_name, "kind": kind}
resp = client.create_policy(body)
self.addCleanup(cleanup, resp['name'])
return resp['name']
def _add_rule(self, policy_name, rule):
"""Shortcut to add a rule to a policy"""
self.os_admin.congress_client.create_policy_rule(
policy_name, {'rule': rule})
@decorators.attr(type='smoke')
def test_z3_recursivity(self):
"""Recursivity in Z3
This test checks Z3 in isolation on a simple recursive problem.
"""
computations = self._add_policy("recur", "z3")
expected = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6),
(2, 3), (2, 4), (2, 5), (2, 6),
(4, 5), (4, 6)]
for pair in [(1, 2), (2, 3), (2, 4), (4, 5), (4, 6)]:
self._add_rule(computations, 'link(%d, %d)' % pair)
self._add_rule(computations, 'path(x, y) :- link(x, y)')
self._add_rule(computations,
'path(x, y) :- link(x, z), path(z, y)')
result = self.os_admin.congress_client.list_policy_rows(
computations, "path")
extracted = [(row['data'][0], row['data'][1])
for row in result['results']]
self.assertEqual(expected, sorted(extracted))
@decorators.attr(type='smoke')
def test_z3_inter_policies(self):
"""Inter policy call
This test create a non Z3 policy populate a table.
This table is used by a Z3 policy to compute a transitive closure
Another policy will use the computed result and create a result table
using builtins not available to Z3.
"""
facts = self._add_policy("fact")
formatter = self._add_policy("form")
computations = self._add_policy("comp", "z3")
expected = []
for pair in [(1, 2), (2, 3), (2, 4), (4, 5), (4, 6)]:
self._add_rule(facts, 'link("N%d", "N%d")' % pair)
for pair in [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6),
(2, 3), (2, 4), (2, 5), (2, 6),
(4, 5), (4, 6)]:
expected.append(u'N%d - N%d' % pair)
self._add_rule(computations, 'path(x, y) :- %s:link(x, y)' % facts)
self._add_rule(computations,
'path(x, y) :- %s:link(x, z), path(z, y)' % facts)
self._add_rule(
formatter,
('res(x) :- %s:path(y, z), builtin:concat(y, " - ", t), '
'builtin:concat(t, z, x)') % computations)
result = self.os_admin.congress_client.list_policy_rows(
formatter, "res")
extracted = [row['data'][0] for row in result['results']]
self.assertEqual(expected, sorted(extracted))
@decorators.attr(type='smoke')
@testtools.skipUnless(CONF.congressz3.support_builtins,
'Z3 builtins not supported in this version.')
def test_z3_builtins(self):
"""Basic builtins test
This test verifies some facts on basic builtins.
"""
policy = self._add_policy("comp", "z3")
x = 456
y = 123
self._add_rule(policy, 'x(%d)' % x)
self._add_rule(policy, 'y(%d)' % y)
self._add_rule(
policy,
('arith(a,b,c,d,e,f) :- x(x), y(y), builtin:plus(x, y, a), '
'builtin:minus(x, y, b), builtin:mul(x, y, c), '
'builtin:and(x,y,d), builtin:or(x,y,e), builtin:bnot(x,f)'))
result = self.os_admin.congress_client.list_policy_rows(
policy, "arith")
extracted = [row['data'] for row in result['results']]
expected = [[x + y, x - y, x * y, x & y, x | y, ~x & 0xffffffff]]
self.assertEqual(expected, extracted)
@decorators.attr(type='smoke')
@testtools.skipUnless(CONF.congressz3.support_builtins,
'Z3 builtins not supported in this version.')
def test_z3_compare_builtins(self):
"""Basic builtins test for comparison
This test verifies some facts on basic builtins.
"""
policy = self._add_policy("comp", "z3")
x = 456
y = 123
self._add_rule(policy, 'x(%d)' % x)
self._add_rule(policy, 'y(%d)' % y)
checks = [
('equal(x,y)', x == y),
('equal(x,x)', x == x),
('lt(x,y)', x < y),
('lt(y,x)', y < x),
('lt(x,x)', x < x),
('lteq(x,y)', x <= y),
('lteq(y,x)', y <= x),
('lteq(x,x)', x <= x),
('gt(x,y)', x > y),
('gt(y,x)', y > x),
('gt(x,x)', x > x),
('gteq(x,y)', x >= y),
('gteq(y,x)', y >= x),
('gteq(x,x)', x >= x)
]
for (num, (pred, _)) in enumerate(checks):
self._add_rule(
policy,
('check(%d) :- x(x), y(y), builtin:%s' % (num, pred)))
result = self.os_admin.congress_client.list_policy_rows(
policy, "check")
extracted = [row['data'][0] for row in result['results']]
extracted.sort()
expected = [n for (n, (_, c)) in enumerate(checks) if c]
self.assertEqual(expected, extracted)

View File

@ -1,5 +0,0 @@
====================
Administrators guide
====================
Administrators guide of congress-tempest-plugin.

View File

@ -1,5 +0,0 @@
================================
Command line interface reference
================================
CLI reference of congress-tempest-plugin.

View File

@ -1,80 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'openstackdocstheme',
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'congress-tempest-plugin'
copyright = u'2017, OpenStack Developers'
# openstackdocstheme options
repository_name = 'openstack/congress-tempest-plugin'
bug_project = 'congress'
bug_tag = ''
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme = 'openstackdocs'
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Developers', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -1,5 +0,0 @@
=============
Configuration
=============
Configuration of congress-tempest-plugin.

View File

@ -1,4 +0,0 @@
============
Contributing
============
.. include:: ../../../CONTRIBUTING.rst

View File

@ -1,9 +0,0 @@
===========================
Contributor Documentation
===========================
.. toctree::
:maxdepth: 2
contributing

View File

@ -1,30 +0,0 @@
.. congress-tempest-plugin documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
=======================================================
Welcome to the documentation of congress_tempest_plugin
=======================================================
Contents:
.. toctree::
:maxdepth: 2
readme
install/index
library/index
contributor/index
configuration/index
cli/index
user/index
admin/index
reference/index
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@ -1,10 +0,0 @@
2. Edit the ``/etc/congress_tempest_plugin/congress_tempest_plugin.conf`` file and complete the following
actions:
* In the ``[database]`` section, configure database access:
.. code-block:: ini
[database]
...
connection = mysql+pymysql://congress_tempest_plugin:CONGRESS_TEMPEST_PLUGIN_DBPASS@controller/congress_tempest_plugin

View File

@ -1,75 +0,0 @@
Prerequisites
-------------
Before you install and configure the congress service,
you must create a database, service credentials, and API endpoints.
#. To create the database, complete these steps:
* Use the database access client to connect to the database
server as the ``root`` user:
.. code-block:: console
$ mysql -u root -p
* Create the ``congress_tempest_plugin`` database:
.. code-block:: none
CREATE DATABASE congress_tempest_plugin;
* Grant proper access to the ``congress_tempest_plugin`` database:
.. code-block:: none
GRANT ALL PRIVILEGES ON congress_tempest_plugin.* TO 'congress_tempest_plugin'@'localhost' \
IDENTIFIED BY 'CONGRESS_TEMPEST_PLUGIN_DBPASS';
GRANT ALL PRIVILEGES ON congress_tempest_plugin.* TO 'congress_tempest_plugin'@'%' \
IDENTIFIED BY 'CONGRESS_TEMPEST_PLUGIN_DBPASS';
Replace ``CONGRESS_TEMPEST_PLUGIN_DBPASS`` with a suitable password.
* Exit the database access client.
.. code-block:: none
exit;
#. Source the ``admin`` credentials to gain access to
admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. To create the service credentials, complete these steps:
* Create the ``congress_tempest_plugin`` user:
.. code-block:: console
$ openstack user create --domain default --password-prompt congress_tempest_plugin
* Add the ``admin`` role to the ``congress_tempest_plugin`` user:
.. code-block:: console
$ openstack role add --project service --user congress_tempest_plugin admin
* Create the congress_tempest_plugin service entities:
.. code-block:: console
$ openstack service create --name congress_tempest_plugin --description "congress" congress
#. Create the congress service API endpoints:
.. code-block:: console
$ openstack endpoint create --region RegionOne \
congress public http://controller:XXXX/vY/%\(tenant_id\)s
$ openstack endpoint create --region RegionOne \
congress internal http://controller:XXXX/vY/%\(tenant_id\)s
$ openstack endpoint create --region RegionOne \
congress admin http://controller:XXXX/vY/%\(tenant_id\)s

View File

@ -1,9 +0,0 @@
=========================
congress service overview
=========================
The congress service provides...
The congress service consists of the following components:
``congress_tempest_plugin-api`` service
Accepts and responds to end user compute API calls...

View File

@ -1,17 +0,0 @@
===================================
congress service installation guide
===================================
.. toctree::
:maxdepth: 2
get_started.rst
install.rst
verify.rst
next-steps.rst
The congress service (congress_tempest_plugin) provides...
This chapter assumes a working setup of OpenStack following the
`OpenStack Installation Tutorial
<https://docs.openstack.org/project-install-guide/ocata/>`_.

View File

@ -1,34 +0,0 @@
.. _install-obs:
Install and configure for openSUSE and SUSE Linux Enterprise
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the congress service
for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1.
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# zypper --quiet --non-interactive install
.. include:: common_configure.rst
Finalize installation
---------------------
Start the congress services and configure them to start when
the system boots:
.. code-block:: console
# systemctl enable openstack-congress_tempest_plugin-api.service
# systemctl start openstack-congress_tempest_plugin-api.service

View File

@ -1,33 +0,0 @@
.. _install-rdo:
Install and configure for Red Hat Enterprise Linux and CentOS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the congress service
for Red Hat Enterprise Linux 7 and CentOS 7.
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# yum install
.. include:: common_configure.rst
Finalize installation
---------------------
Start the congress services and configure them to start when
the system boots:
.. code-block:: console
# systemctl enable openstack-congress_tempest_plugin-api.service
# systemctl start openstack-congress_tempest_plugin-api.service

View File

@ -1,31 +0,0 @@
.. _install-ubuntu:
Install and configure for Ubuntu
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the congress
service for Ubuntu 14.04 (LTS).
.. include:: common_prerequisites.rst
Install and configure components
--------------------------------
#. Install the packages:
.. code-block:: console
# apt-get update
# apt-get install
.. include:: common_configure.rst
Finalize installation
---------------------
Restart the congress services:
.. code-block:: console
# service openstack-congress_tempest_plugin-api restart

View File

@ -1,20 +0,0 @@
.. _install:
Install and configure
~~~~~~~~~~~~~~~~~~~~~
This section describes how to install and configure the
congress service, code-named congress_tempest_plugin, on the controller node.
This section assumes that you already have a working OpenStack
environment with at least the following components installed:
.. (add the appropriate services here and further notes)
Note that installation and configuration vary by distribution.
.. toctree::
:maxdepth: 2
install-obs.rst
install-rdo.rst
install-ubuntu.rst

View File

@ -1,9 +0,0 @@
.. _next-steps:
Next steps
~~~~~~~~~~
Your OpenStack environment now includes the congress_tempest_plugin service.
To add additional services, see
https://docs.openstack.org/project-install-guide/ocata/.

View File

@ -1,24 +0,0 @@
.. _verify:
Verify operation
~~~~~~~~~~~~~~~~
Verify operation of the congress service.
.. note::
Perform these commands on the controller node.
#. Source the ``admin`` project credentials to gain access to
admin-only CLI commands:
.. code-block:: console
$ . admin-openrc
#. List service components to verify successful launch and registration
of each process:
.. code-block:: console
$ openstack congress service list

View File

@ -1,7 +0,0 @@
========
Usage
========
To use congress-tempest-plugin in a project::
import congress_tempest_plugin

View File

@ -1 +0,0 @@
.. include:: ../../README.rst

View File

@ -1,5 +0,0 @@
==========
References
==========
References of congress-tempest-plugin.

View File

@ -1,5 +0,0 @@
===========
Users guide
===========
Users guide of congress-tempest-plugin.

View File

@ -1,80 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*nose_results.html
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testr_results.html.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.testrepository/tmp*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testrepository.subunit.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}/tox'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.tox/*/log/*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -1,106 +0,0 @@
- hosts: all
name: Autoconverted job legacy-congress-dsvm-api-mysql from old job gate-congress-dsvm-api-mysql-ubuntu-xenial
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin congress https://opendev.org/openstack/congress
enable_plugin murano https://opendev.org/openstack/murano
enable_plugin aodh https://opendev.org/openstack/aodh
enable_plugin mistral https://opendev.org/openstack/mistral
enable_plugin monasca-api https://opendev.org/openstack/monasca-api
enable_plugin neutron https://opendev.org/openstack/neutron
# To deploy congress as multi-process (api, pe, datasources)
CONGRESS_MULTIPROCESS_DEPLOYMENT=True
CONGRESS_EXPOSE_ENCRYPTION_KEY_FOR_TEST=True
LIBS_FROM_GIT=python-congressclient
ENABLE_CONGRESS_Z3=True
USE_Z3_RELEASE=4.7.1
TEMPEST_PLUGINS='/opt/stack/new/congress-tempest-plugin /opt/stack/new/telemetry-tempest-plugin /opt/stack/new/murano-tempest-plugin /opt/stack/new/heat-tempest-plugin /opt/stack/new/mistral-tempest-plugin /opt/stack/new/monasca-tempest-plugin'
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
ENABLED_SERVICES=s-proxy,s-object,s-container,s-account,
ENABLED_SERVICES+=neutron-qos
export SERVICE_TIMEOUT=120 # default too short for this job
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_TEMPEST_REGEX="congress_tempest_plugin"
export DEVSTACK_GATE_NEUTRON=1
export DEVSTACK_GATE_USE_PYTHON3=1
export PROJECTS="openstack/congress $PROJECTS"
export PROJECTS="openstack/congress-dashboard $PROJECTS"
export PROJECTS="openstack/python-congressclient $PROJECTS"
export PROJECTS="openstack/congress-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano $PROJECTS"
export PROJECTS="openstack/aodh $PROJECTS"
export PROJECTS="openstack/mistral $PROJECTS"
export PROJECTS="openstack/python-mistralclient $PROJECTS"
export PROJECTS="openstack/mistral-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano-dashboard $PROJECTS"
export PROJECTS="openstack/python-muranoclient $PROJECTS"
export PROJECTS="openstack/python-aodhclient $PROJECTS"
export PROJECTS="openstack/telemetry-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano-tempest-plugin $PROJECTS"
export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
export PROJECTS="openstack/monasca-api $PROJECTS"
export PROJECTS="openstack/monasca-persister $PROJECTS"
export PROJECTS="openstack/monasca-common $PROJECTS"
export PROJECTS="openstack/monasca-agent $PROJECTS"
export PROJECTS="openstack/monasca-notification $PROJECTS"
export PROJECTS="openstack/monasca-thresh $PROJECTS"
export PROJECTS="openstack/monasca-statsd $PROJECTS"
export PROJECTS="openstack/python-monascaclient $PROJECTS"
export PROJECTS="openstack/monasca-grafana-datasource $PROJECTS"
export PROJECTS="openstack/monasca-ui $PROJECTS"
export PROJECTS="openstack/monasca-tempest-plugin $PROJECTS"
export ENABLED_SERVICES
export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
if [ "{{ database }}" == "postgresql" ] ; then
export DEVSTACK_GATE_POSTGRES=1
fi
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -1,80 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*nose_results.html
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testr_results.html.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.testrepository/tmp*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testrepository.subunit.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}/tox'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.tox/*/log/*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -1,89 +0,0 @@
- hosts: all
name: Autoconverted job legacy-congress-dsvm-py35-api-mysql from old job gate-congress-dsvm-py35-api-mysql-ubuntu-xenial-nv
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
# swift is not ready for python3 yet
disable_service s-account
disable_service s-container
disable_service s-object
disable_service s-proxy
# without Swift, c-bak cannot run (in the Gate at least)
disable_service c-bak
# aodh not ready for python3 yet
# enable_plugin aodh https://opendev.org/openstack/aodh
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin congress https://opendev.org/openstack/congress
# disable murano on queens-py35 because of test instability
# enable_plugin murano https://opendev.org/openstack/murano
enable_plugin neutron https://opendev.org/openstack/neutron
# To deploy congress as multi-process (api, pe, datasources)
CONGRESS_MULTIPROCESS_DEPLOYMENT=True
CONGRESS_EXPOSE_ENCRYPTION_KEY_FOR_TEST=True
ENABLE_CONGRESS_Z3=True
USE_Z3_RELEASE=4.7.1
TEMPEST_PLUGINS='/opt/stack/new/congress-tempest-plugin /opt/stack/new/murano-tempest-plugin /opt/stack/new/heat-tempest-plugin'
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export DEVSTACK_GATE_USE_PYTHON3=True
# swift is not ready for python3 yet
# ENABLED_SERVICES=s-proxy,s-object,s-container,s-account,
ENABLED_SERVICES+=neutron-qos
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_TEMPEST_REGEX="congress_tempest_plugin"
export DEVSTACK_GATE_NEUTRON=1
export PROJECTS="openstack/congress $PROJECTS"
export PROJECTS="openstack/congress-dashboard $PROJECTS"
export PROJECTS="openstack/python-congressclient $PROJECTS"
export PROJECTS="openstack/congress-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano $PROJECTS"
export PROJECTS="openstack/murano-dashboard $PROJECTS"
export PROJECTS="openstack/python-muranoclient $PROJECTS"
export PROJECTS="openstack/murano-tempest-plugin $PROJECTS"
export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
export ENABLED_SERVICES
export BRANCH_OVERRIDE=stable/queens
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -1,80 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*nose_results.html
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testr_results.html.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.testrepository/tmp*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testrepository.subunit.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}/tox'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.tox/*/log/*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -1,88 +0,0 @@
- hosts: all
name: Autoconverted job legacy-congress-dsvm-py35-api-mysql from old job gate-congress-dsvm-py35-api-mysql-ubuntu-xenial-nv
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
# swift is not ready for python3 yet
disable_service s-account
disable_service s-container
disable_service s-object
disable_service s-proxy
# without Swift, c-bak cannot run (in the Gate at least)
disable_service c-bak
# aodh not ready for python3 yet
# enable_plugin aodh https://opendev.org/openstack/aodh
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin congress https://opendev.org/openstack/congress
enable_plugin murano https://opendev.org/openstack/murano
enable_plugin neutron https://opendev.org/openstack/neutron
# To deploy congress as multi-process (api, pe, datasources)
CONGRESS_MULTIPROCESS_DEPLOYMENT=True
CONGRESS_EXPOSE_ENCRYPTION_KEY_FOR_TEST=True
ENABLE_CONGRESS_Z3=True
USE_Z3_RELEASE=4.7.1
TEMPEST_PLUGINS='/opt/stack/new/congress-tempest-plugin /opt/stack/new/murano-tempest-plugin /opt/stack/new/heat-tempest-plugin'
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export DEVSTACK_GATE_USE_PYTHON3=True
# swift is not ready for python3 yet
# ENABLED_SERVICES=s-proxy,s-object,s-container,s-account,
ENABLED_SERVICES+=neutron-qos
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_TEMPEST_REGEX="congress_tempest_plugin"
export DEVSTACK_GATE_NEUTRON=1
export PROJECTS="openstack/congress $PROJECTS"
export PROJECTS="openstack/congress-dashboard $PROJECTS"
export PROJECTS="openstack/python-congressclient $PROJECTS"
export PROJECTS="openstack/congress-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano $PROJECTS"
export PROJECTS="openstack/murano-dashboard $PROJECTS"
export PROJECTS="openstack/python-muranoclient $PROJECTS"
export PROJECTS="openstack/murano-tempest-plugin $PROJECTS"
export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
export ENABLED_SERVICES
export BRANCH_OVERRIDE=stable/rocky
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -1,80 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*nose_results.html
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testr_results.html.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.testrepository/tmp*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testrepository.subunit.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}/tox'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.tox/*/log/*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -1,89 +0,0 @@
- hosts: all
name: Autoconverted job legacy-congress-dsvm-py35-api-mysql from old job gate-congress-dsvm-py35-api-mysql-ubuntu-xenial-nv
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
# swift is not ready for python3 yet
disable_service s-account
disable_service s-container
disable_service s-object
disable_service s-proxy
# without Swift, c-bak cannot run (in the Gate at least)
disable_service c-bak
# aodh not ready for python3 yet
# enable_plugin aodh https://opendev.org/openstack/aodh
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin congress https://opendev.org/openstack/congress
enable_plugin murano https://opendev.org/openstack/murano
enable_plugin neutron https://opendev.org/openstack/neutron
# To deploy congress as multi-process (api, pe, datasources)
CONGRESS_MULTIPROCESS_DEPLOYMENT=True
CONGRESS_EXPOSE_ENCRYPTION_KEY_FOR_TEST=True
LIBS_FROM_GIT=python-congressclient
ENABLE_CONGRESS_Z3=True
USE_Z3_RELEASE=4.7.1
TEMPEST_PLUGINS='/opt/stack/new/congress-tempest-plugin /opt/stack/new/murano-tempest-plugin /opt/stack/new/heat-tempest-plugin'
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
export DEVSTACK_GATE_USE_PYTHON3=True
# swift is not ready for python3 yet
# ENABLED_SERVICES=s-proxy,s-object,s-container,s-account,
ENABLED_SERVICES+=neutron-qos
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_TEMPEST_REGEX="congress_tempest_plugin"
export DEVSTACK_GATE_NEUTRON=1
export PROJECTS="openstack/congress $PROJECTS"
export PROJECTS="openstack/congress-dashboard $PROJECTS"
export PROJECTS="openstack/python-congressclient $PROJECTS"
export PROJECTS="openstack/congress-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano $PROJECTS"
export PROJECTS="openstack/murano-dashboard $PROJECTS"
export PROJECTS="openstack/python-muranoclient $PROJECTS"
export PROJECTS="openstack/murano-tempest-plugin $PROJECTS"
export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
export ENABLED_SERVICES
export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -1,80 +0,0 @@
- hosts: primary
tasks:
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*nose_results.html
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testr_results.html.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.testrepository/tmp*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=**/*testrepository.subunit.gz
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}/tox'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/.tox/*/log/*
- --include=*/
- --exclude=*
- --prune-empty-dirs
- name: Copy files from {{ ansible_user_dir }}/workspace/ on node
synchronize:
src: '{{ ansible_user_dir }}/workspace/'
dest: '{{ zuul.executor.log_root }}'
mode: pull
copy_links: true
verify_host: true
rsync_opts:
- --include=/logs/**
- --include=*/
- --exclude=*
- --prune-empty-dirs

View File

@ -1,91 +0,0 @@
- hosts: all
name: Congress pe-replicated base
tasks:
- name: Ensure legacy workspace directory
file:
path: '{{ ansible_user_dir }}/workspace'
state: directory
- shell:
cmd: |
set -e
set -x
cat > clonemap.yaml << EOF
clonemap:
- name: openstack/devstack-gate
dest: devstack-gate
EOF
/usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
https://opendev.org \
openstack/devstack-gate
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
cat << 'EOF' >>"/tmp/dg-local.conf"
[[local|localrc]]
enable_plugin heat https://opendev.org/openstack/heat
enable_plugin congress https://opendev.org/openstack/congress
enable_plugin murano https://opendev.org/openstack/murano
enable_plugin aodh https://opendev.org/openstack/aodh
enable_plugin mistral https://opendev.org/openstack/mistral
enable_plugin neutron https://opendev.org/openstack/neutron
CONGRESS_REPLICATED=True
# To deploy congress as multi-process (api, pe, datasources)
CONGRESS_MULTIPROCESS_DEPLOYMENT=True
CONGRESS_EXPOSE_ENCRYPTION_KEY_FOR_TEST=True
LIBS_FROM_GIT=python-congressclient
TEMPEST_PLUGINS='/opt/stack/new/congress-tempest-plugin /opt/stack/new/telemetry-tempest-plugin /opt/stack/new/murano-tempest-plugin /opt/stack/new/heat-tempest-plugin /opt/stack/new/mistral-tempest-plugin'
EOF
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'
- shell:
cmd: |
set -e
set -x
ENABLED_SERVICES=s-proxy,s-object,s-container,s-account,
ENABLED_SERVICES+=neutron-qos
export PYTHONUNBUFFERED=true
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_USE_PYTHON3=1
export DEVSTACK_GATE_TEMPEST_REGEX="congress_tempest_plugin"
export DEVSTACK_GATE_NEUTRON=1
export PROJECTS="openstack/congress $PROJECTS"
export PROJECTS="openstack/congress-dashboard $PROJECTS"
export PROJECTS="openstack/python-congressclient $PROJECTS"
export PROJECTS="openstack/congress-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano $PROJECTS"
export PROJECTS="openstack/mistral $PROJECTS"
export PROJECTS="openstack/python-mistralclient $PROJECTS"
export PROJECTS="openstack/mistral-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano-dashboard $PROJECTS"
export PROJECTS="openstack/python-muranoclient $PROJECTS"
export PROJECTS="openstack/aodh $PROJECTS"
export PROJECTS="openstack/python-aodhclient $PROJECTS"
export PROJECTS="openstack/telemetry-tempest-plugin $PROJECTS"
export PROJECTS="openstack/murano-tempest-plugin $PROJECTS"
export PROJECTS="openstack/heat-tempest-plugin $PROJECTS"
export ENABLED_SERVICES
export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
if [ "{{ database }}" == "postgresql" ] ; then
export DEVSTACK_GATE_POSTGRES=1
fi
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
./safe-devstack-vm-gate-wrap.sh
executable: /bin/bash
chdir: '{{ ansible_user_dir }}/workspace'
environment: '{{ zuul | zuul_legacy_vars }}'

View File

@ -1,6 +0,0 @@
---
upgrade:
- |
Python 2.7 support has been dropped. Last release of Congress-tempest-plugin
to support python 2.7 is OpenStack Train. The minimum version of Python now
supported by Congress-tempest-plugin is Python 3.5.

View File

@ -1,268 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2017, OpenStack Developers'
# openstackdocstheme options
repository_name = 'openstack/congress-tempest-plugin'
bug_project = 'congress'
bug_tag = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'congress_tempest_pluginReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'congress_tempest_pluginReleaseNotes.tex',
u'congress_tempest_plugin Release Notes Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'congress_tempest_pluginrereleasenotes',
u'congress_tempest_plugin Release Notes Documentation',
[u'OpenStack Foundation'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'congress_tempest_plugin ReleaseNotes',
u'congress_tempest_plugin Release Notes Documentation',
u'OpenStack Foundation', 'congress_tempest_pluginReleaseNotes',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']

View File

@ -1,8 +0,0 @@
============================================
congress_tempest_plugin Release Notes
============================================
.. toctree::
:maxdepth: 1
unreleased

View File

@ -1,5 +0,0 @@
==============================
Current Series Release Notes
==============================
.. release-notes::

View File

@ -1,13 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.log>=3.36.0 # Apache-2.0
six>=1.10.0 # MIT
tempest>=17.1.0 # Apache-2.0
tenacity>=4.4.0 # Apache-2.0
urllib3>=1.21.1 # MIT

View File

@ -1,50 +0,0 @@
[metadata]
name = congress-tempest-plugin
summary = Tempest Plugin for Congress project
description-file =
README.rst
author = OpenStack
author-email = openstack-discuss@lists.openstack.org
home-page = https://opendev.org/openstack/congress-tempest-plugin
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
[files]
packages =
congress_tempest_plugin
[build_sphinx]
all-files = 1
warning-is-error = 1
source-dir = doc/source
build-dir = doc/build
[upload_sphinx]
upload-dir = doc/build/html
[compile_catalog]
directory = congress_tempest_plugin/locale
domain = congress_tempest_plugin
[update_catalog]
domain = congress_tempest_plugin
output_dir = congress_tempest_plugin/locale
input_file = congress_tempest_plugin/locale/congress_tempest_plugin.pot
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = congress_tempest_plugin/locale/congress_tempest_plugin.pot
[entry_points]
tempest.test_plugins =
congress_tests = congress_tempest_plugin.plugin:CongressTempestPlugin

View File

@ -1,29 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)

View File

@ -1,12 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking>=1.1.0,<1.2.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0
sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD
openstackdocstheme>=1.20.0 # Apache-2.0
# releasenotes
reno>=2.5.0 # Apache-2.0

53
tox.ini
View File

@ -1,53 +0,0 @@
[tox]
minversion = 3.1.1
envlist = py37,pep8
skipsdist = True
ignore_basepython_conflict = True
[testenv]
basepython = python3
usedevelop = True
install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
PYTHONWARNINGS=default::DeprecationWarning
OS_STDOUT_CAPTURE=1
OS_STDERR_CAPTURE=1
OS_TEST_TIMEOUT=60
deps = -r{toxinidir}/test-requirements.txt
commands = stestr run {posargs}
[testenv:pep8]
commands = flake8 {posargs}
[testenv:venv]
commands = {posargs}
[testenv:cover]
setenv =
VIRTUAL_ENV={envdir}
PYTHON=coverage run --source congress_tempest_plugin --parallel-mode
commands =
stestr run {posargs}
coverage combine
coverage html -d cover
coverage xml -o cover/coverage.xml
[testenv:docs]
commands = python setup.py build_sphinx
[testenv:releasenotes]
commands =
sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:debug]
commands = oslo_debug_helper {posargs}
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
# E731 skipped as assign a lambda expression
show-source = True
ignore = E123,E125,E731
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build