Change to use tempest plugin framework

Tempest plugin framework provides more flexibility for projects.
With this plugin, it's not requiring the tests live in the tempest
tree.

Change-Id: I8681140d02926b4b3e6af84b8f03b9385e62cc72
This commit is contained in:
Zhenzan Zhou 2015-09-16 14:14:07 +08:00
commit 955fa96f1f
23 changed files with 2246 additions and 0 deletions

0
__init__.py Normal file
View File

30
config.py Normal file
View File

@ -0,0 +1,30 @@
# Copyright 2015 Intel Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from tempest import config # noqa
congressha_group = cfg.OptGroup(name="congressha", title="Congress HA Options")
CongressHAGroup = [
cfg.StrOpt("replica_type",
default="policyha",
help="service type used to create a replica congress server."),
cfg.IntOpt("replica_port",
default=4001,
help="The listening port for a replica congress server. "),
]

39
plugin.py Normal file
View File

@ -0,0 +1,39 @@
# Copyright 2015 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest import config
from tempest.test_discover import plugins
from congress_tempest_tests import config as config_congress
class CongressTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
os.path.abspath(__file__)))[0]
test_dir = "congress_tempest_tests/tests"
full_test_dir = os.path.join(base_path, test_dir)
return full_test_dir, base_path
def register_opts(self, conf):
config.register_opt_group(conf, config_congress.congressha_group,
config_congress.CongressHAGroup)
def get_opt_lists(self):
return [(config_congress.congressha_group.name,
config_congress.CongressHAGroup)]

0
services/__init__.py Normal file
View File

View File

View File

@ -0,0 +1,173 @@
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest.common import service_client
class PolicyClient(service_client.ServiceClient):
policy = '/v1/policies'
policy_path = '/v1/policies/%s'
policy_rules = '/v1/policies/%s/rules'
policy_rules_path = '/v1/policies/%s/rules/%s'
policy_tables = '/v1/policies/%s/tables'
policy_table_path = '/v1/policies/%s/tables/%s'
policy_rows = '/v1/policies/%s/tables/%s/rows'
policy_rows_trace = '/v1/policies/%s/tables/%s/rows?trace=True'
policies = '/v1/policies'
policy_action = '/v1/policies/%s?%s'
datasources = '/v1/data-sources'
datasource_path = '/v1/data-sources/%s'
datasource_tables = '/v1/data-sources/%s/tables'
datasource_table_path = '/v1/data-sources/%s/tables/%s'
datasource_status = '/v1/data-sources/%s/status'
datasource_schema = '/v1/data-sources/%s/schema'
datasource_table_schema = '/v1/data-sources/%s/tables/%s/spec'
datasource_rows = '/v1/data-sources/%s/tables/%s/rows'
driver = '/v1/system/drivers'
driver_path = '/v1/system/drivers/%s'
def _resp_helper(self, resp, body):
body = json.loads(body)
return service_client.ResponseBody(resp, body)
def create_policy(self, body):
body = json.dumps(body)
resp, body = self.post(
self.policy, body=body)
return self._resp_helper(resp, body)
def delete_policy(self, policy):
resp, body = self.delete(
self.policy_path % policy)
return self._resp_helper(resp, body)
def show_policy(self, policy):
resp, body = self.get(
self.policy_path % policy)
return self._resp_helper(resp, body)
def create_policy_rule(self, policy_name, body=None):
body = json.dumps(body)
resp, body = self.post(
self.policy_rules % policy_name, body=body)
return self._resp_helper(resp, body)
def delete_policy_rule(self, policy_name, rule_id):
resp, body = self.delete(
self.policy_rules_path % (policy_name, rule_id))
return self._resp_helper(resp, body)
def show_policy_rule(self, policy_name, rule_id):
resp, body = self.get(
self.policy_rules_path % (policy_name, rule_id))
return self._resp_helper(resp, body)
def list_policy_rows(self, policy_name, table, trace=None):
if trace:
query = self.policy_rows_trace
else:
query = self.policy_rows
resp, body = self.get(query % (policy_name, table))
return self._resp_helper(resp, body)
def list_policy_rules(self, policy_name):
resp, body = self.get(self.policy_rules % (policy_name))
return self._resp_helper(resp, body)
def list_policy(self):
resp, body = self.get(self.policies)
return self._resp_helper(resp, body)
def list_policy_tables(self, policy_name):
resp, body = self.get(self.policy_tables % (policy_name))
return self._resp_helper(resp, body)
def execute_policy_action(self, policy_name, action, trace, delta, body):
body = json.dumps(body)
uri = "?action=%s&trace=%s&delta=%s" % (action, trace, delta)
resp, body = self.post(
(self.policy_path % policy_name) + str(uri), body=body)
return self._resp_helper(resp, body)
def show_policy_table(self, policy_name, table_id):
resp, body = self.get(self.policy_table_path % (policy_name, table_id))
return self._resp_helper(resp, body)
def list_datasources(self):
resp, body = self.get(self.datasources)
return self._resp_helper(resp, body)
def list_datasource_tables(self, datasource_name):
resp, body = self.get(self.datasource_tables % (datasource_name))
return self._resp_helper(resp, body)
def list_datasource_rows(self, datasource_name, table_name):
resp, body = self.get(self.datasource_rows %
(datasource_name, table_name))
return self._resp_helper(resp, body)
def list_datasource_status(self, datasource_name):
resp, body = self.get(self.datasource_status % datasource_name)
return self._resp_helper(resp, body)
def show_datasource_schema(self, datasource_name):
resp, body = self.get(self.datasource_schema % datasource_name)
return self._resp_helper(resp, body)
def show_datasource_table_schema(self, datasource_name, table_name):
resp, body = self.get(self.datasource_table_schema %
(datasource_name, table_name))
return self._resp_helper(resp, body)
def show_datasource_table(self, datasource_name, table_id):
resp, body = self.get(self.datasource_table_path %
(datasource_name, table_id))
return self._resp_helper(resp, body)
def create_datasource(self, body=None):
body = json.dumps(body)
resp, body = self.post(
self.datasources, body=body)
return self._resp_helper(resp, body)
def delete_datasource(self, datasource):
resp, body = self.delete(
self.datasource_path % datasource)
return self._resp_helper(resp, body)
def execute_datasource_action(self, service_name, action, body):
body = json.dumps(body)
uri = "?action=%s" % (action)
resp, body = self.post(
(self.datasource_path % service_name) + str(uri), body=body)
return self._resp_helper(resp, body)
def list_drivers(self):
resp, body = self.get(self.driver)
return self._resp_helper(resp, body)
def show_driver(self, driver):
resp, body = self.get(self.driver_path % (driver))
return self._resp_helper(resp, body)
def request_refresh(self, driver, body=None):
body = json.dumps(body)
resp, body = self.post(self.datasource_path %
(driver) + "?action=request-refresh",
body=body)
return self._resp_helper(resp, body)

17
tests/README.rst Normal file
View File

@ -0,0 +1,17 @@
====================
Tempest Integration
====================
This directory contains Tempest tests to cover Congress project.
To list all Congress tempest cases, go to tempest directory, then run::
$ testr list-tests congress
To run only these tests in tempest, go to tempest directory, then run::
$ ./run_tempest -N -- congress
To run a single test case, go to tempest directory, then run with test case name, e.g.::
$ ./run_tempest.sh -N -- congress_tempest_tests.tests.scenario.test_congress_basic_ops.TestPolicyBasicOps.test_policy_basic_op

0
tests/__init__.py Normal file
View File

0
tests/api/__init__.py Normal file
View File

View File

View File

@ -0,0 +1,81 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import decorators
from tempest import clients # noqa
from tempest import config # noqa
from tempest import exceptions # noqa
from tempest import test # noqa
from congress_tempest_tests.tests.scenario import manager_congress # noqa
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestCeilometerDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestCeilometerDriver, cls).check_preconditions()
def setUp(cls):
super(TestCeilometerDriver, cls).setUp()
if not CONF.service_available.ceilometer:
msg = ("%s skipped as ceilometer is not available" %
cls.__class__.__name__)
raise cls.skipException(msg)
cls.os = clients.Manager(cls.admin_manager.auth_provider.credentials)
cls.telemetry_client = cls.os.telemetry_client
cls.datasource_id = manager_congress.get_datasource_id(
cls.admin_manager.congress_client, 'ceilometer')
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
def test_ceilometer_meters_table(self):
meter_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'meters')['columns'])
meter_id_col = next(i for i, c in enumerate(meter_schema)
if c['name'] == 'meter_id')
def _check_data_table_ceilometer_meters():
# Fetch data from ceilometer each time, because this test may start
# before ceilometer has all the users.
meters = self.telemetry_client.list_meters()
meter_map = {}
for meter in meters:
meter_map[meter['meter_id']] = meter
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'meters'))
for row in results['results']:
try:
meter_row = meter_map[row['data'][meter_id_col]]
except KeyError:
return False
for index in range(len(meter_schema)):
if (str(row['data'][index]) !=
str(meter_row[meter_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data_table_ceilometer_meters,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -0,0 +1,83 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import decorators
from tempest import clients # noqa
from tempest import config # noqa
from tempest import exceptions # noqa
from tempest import test # noqa
from congress_tempest_tests.tests.scenario import manager_congress # noqa
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestCinderDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestCinderDriver, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable or
CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or'
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(cls):
super(TestCinderDriver, cls).setUp()
cls.os = clients.Manager(cls.admin_manager.auth_provider.credentials)
cls.cinder = cls.os.volumes_client
cls.datasource_id = manager_congress.get_datasource_id(
cls.admin_manager.congress_client, 'cinder')
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
def test_cinder_volumes_table(self):
volume_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'volumes')['columns'])
volume_id_col = next(i for i, c in enumerate(volume_schema)
if c['name'] == 'id')
def _check_data_table_cinder_volumes():
# Fetch data from cinder each time, because this test may start
# before cinder has all the users.
volumes = self.cinder.list_volumes()
volumes_map = {}
for volume in volumes:
volumes_map[volume['id']] = volume
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'volumes'))
for row in results['results']:
try:
volume_row = volumes_map[row['data'][volume_id_col]]
except KeyError:
return False
for index in range(len(volume_schema)):
if (str(row['data'][index]) !=
str(volume_row[volume_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data_table_cinder_volumes,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -0,0 +1,127 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import decorators
from tempest import clients # noqa
from tempest import config # noqa
from tempest import exceptions # noqa
from tempest import test # noqa
from congress_tempest_tests.tests.scenario import manager_congress # noqa
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestGlanceV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestGlanceV2Driver, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(cls):
super(TestGlanceV2Driver, cls).setUp()
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
cls.os = clients.Manager()
cls.glancev2 = cls.os.image_client_v2
cls.datasource_id = manager_congress.get_datasource_id(
cls.admin_manager.congress_client, 'glancev2')
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('image')
def test_glancev2_images_table(self):
image_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'images')['columns'])
image_id_col = next(i for i, c in enumerate(image_schema)
if c['name'] == 'id')
def _check_data_table_glancev2_images():
# Fetch data from glance each time, because this test may start
# before glance has all the users.
images = self.glancev2.list_images()
image_map = {}
for image in images:
image_map[image['id']] = image
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'images'))
for row in results['results']:
try:
image_row = image_map[row['data'][image_id_col]]
except KeyError:
return False
for index in range(len(image_schema)):
# glancev2 doesn't return kernel_id/ramdisk_id if
# it isn't present...
if ((image_schema[index]['name'] == 'kernel_id' and
'kernel_id' not in row['data']) or
(image_schema[index]['name'] == 'ramdisk_id' and
'ramdisk_id' not in row['data'])):
continue
# FIXME(arosen): congress-server should retain the type
# but doesn't today.
if (str(row['data'][index]) !=
str(image_row[image_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data_table_glancev2_images,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('image')
def test_glancev2_tags_table(self):
def _check_data_table_glance_images():
# Fetch data from glance each time, because this test may start
# before glance has all the users.
images = self.glancev2.list_images()
image_tag_map = {}
for image in images:
image_tag_map[image['id']] = image['tags']
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'tags'))
for row in results['results']:
image_id, tag = row['data'][0], row['data'][1]
glance_image_tags = image_tag_map.get(image_id)
if not glance_image_tags:
# congress had image that glance doesn't know about.
return False
if tag not in glance_image_tags:
# congress had a tag that wasn't on the image.
return False
return True
if not test.call_until_true(func=_check_data_table_glance_images,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -0,0 +1,161 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import decorators
from tempest import clients # noqa
from tempest import config # noqa
from tempest import exceptions # noqa
from tempest import test # noqa
from congress_tempest_tests.tests.scenario import manager_congress # noqa
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestKeystoneV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestKeystoneV2Driver, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable or
CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or'
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(cls):
super(TestKeystoneV2Driver, cls).setUp()
cls.os = clients.Manager(cls.admin_manager.auth_provider.credentials)
cls.keystone = cls.os.identity_client
cls.datasource_id = manager_congress.get_datasource_id(
cls.admin_manager.congress_client, 'keystone')
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
def test_keystone_users_table(self):
user_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'users')['columns'])
user_id_col = next(i for i, c in enumerate(user_schema)
if c['name'] == 'id')
def _check_data_table_keystone_users():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
users = self.keystone.get_users()
user_map = {}
for user in users:
user_map[user['id']] = user
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'users'))
for row in results['results']:
try:
user_row = user_map[row['data'][user_id_col]]
except KeyError:
return False
for index in range(len(user_schema)):
if ((user_schema[index]['name'] == 'tenantId' and
'tenantId' not in user_row) or
(user_schema[index]['name'] == 'email' and
'email' not in user_row)):
# Keystone does not return the tenantId or email column
# if not present.
pass
elif (str(row['data'][index]) !=
str(user_row[user_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data_table_keystone_users,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
def test_keystone_roles_table(self):
role_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'roles')['columns'])
role_id_col = next(i for i, c in enumerate(role_schema)
if c['name'] == 'id')
def _check_data_table_keystone_roles():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
roles = self.keystone.list_roles()
roles_map = {}
for role in roles:
roles_map[role['id']] = role
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'roles'))
for row in results['results']:
try:
role_row = roles_map[row['data'][role_id_col]]
except KeyError:
return False
for index in range(len(role_schema)):
if (str(row['data'][index]) !=
str(role_row[role_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data_table_keystone_roles,
duration=100, sleep_for=4):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
def test_keystone_tenants_table(self):
tenant_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'tenants')['columns'])
tenant_id_col = next(i for i, c in enumerate(tenant_schema)
if c['name'] == 'id')
def _check_data_table_keystone_tenants():
# Fetch data from keystone each time, because this test may start
# before keystone has all the users.
tenants = self.keystone.list_tenants()
tenants_map = {}
for tenant in tenants:
tenants_map[tenant['id']] = tenant
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'tenants'))
for row in results['results']:
try:
tenant_row = tenants_map[row['data'][tenant_id_col]]
except KeyError:
return False
for index in range(len(tenant_schema)):
if (str(row['data'][index]) !=
str(tenant_row[tenant_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data_table_keystone_tenants,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -0,0 +1,189 @@
# Copyright (c) 2015 Hewlett-Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import string
from tempest import config
from tempest import test
from tempest_lib import decorators
from congress_tempest_tests.tests.scenario import manager_congress # noqa
CONF = config.CONF
class TestMuranoDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestMuranoDriver, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestMuranoDriver, self).setUp()
self.congress_client = (
self.admin_manager.congress_client)
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('compute')
def test_murano_predeployment(self):
def _delete_policy_rules(policy_name):
result = self.congress_client.list_policy_rules(
policy_name)['results']
for rule in result:
self.congress_client.delete_policy_rule(
policy_name,
rule['id'])
def _create_random_policy():
policy_name = "murano_%s" % ''.join(random.choice(string.lowercase)
for x in range(10))
body = {"name": policy_name}
resp = self.congress_client.create_policy(body)
self.addCleanup(_delete_policy_rules, resp['name'])
return resp['name']
def _create_datasource():
body = {"config": {"username": CONF.identity.admin_username,
"tenant_name": CONF.identity.admin_tenant_name,
"password": CONF.identity.admin_password,
"auth_url": CONF.identity.uri},
"driver": "murano",
"name": "murano"}
datasource = self.congress_client.create_datasource(body)['id']
self.addCleanup(self.congress_client.delete_datasource, datasource)
def _create_rule(policy_name, rule):
self.congress_client.create_policy_rule(policy_name, rule)
def _simulate_policy(policy_name, query):
resp = self.congress_client.execute_policy_action(
policy_name,
"simulate",
False,
False,
query)
return resp['result']
rule1 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.medium\")"
}
rule2 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.small\")"
}
rule3 = {
"rule": "allowed_flavors(flavor) :- nova:flavors(flavor_id,"
"flavor, vcpus, ram, disk, ephemeral, rxtx_factor),"
"equal(flavor, \"m1.tiny\")"
}
rule4 = {
"rule": "murano_pending_envs(env_id) :- "
"murano:objects(env_id, tenant_id, \"io.murano.Environment\"),"
"murano:states(env_id, env_state),"
"equal(env_state, \"pending\")"
}
rule5 = {
"rule": "murano_instances(env_id, instance_id) :- "
"murano:objects(env_id, tenant_id, \"io.murano.Environment\"),"
"murano:objects(service_id, env_id, service_type),"
"murano:parent_types(service_id, \"io.murano.Object\"),"
"murano:parent_types(service_id, \"io.murano.Application\"),"
"murano:parent_types(service_id, service_type),"
"murano:objects(instance_id, service_id, instance_type),"
"murano:parent_types(instance_id,"
"\"io.murano.resources.Instance\"),"
"murano:parent_types(instance_id, \"io.murano.Object\"),"
"murano:parent_types(instance_id, instance_type)"
}
rule6 = {
"rule": "murano_instance_flavors(instance_id, flavor) :- "
"murano:properties(instance_id, \"flavor\", flavor)"
}
rule7 = {
"rule": "predeploy_error(env_id) :- "
"murano_pending_envs(env_id),"
"murano_instances(env_id, instance_id),"
"murano_instance_flavors(instance_id, flavor),"
"not allowed_flavors(flavor)"
}
sim_query1 = {
"query": "predeploy_error(env_id)",
"action_policy": "action",
"sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\","
"\"io.murano.Environment\") murano:states+(\"env_uuid\", "
"\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", "
"\"service_type\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Object\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Application\") murano:parent_types+(\"service_uuid\","
"\"service_type\") murano:objects+(\"instance_uuid\", "
"\"service_uuid\", \"service_type\") murano:objects+(\""
"instance_uuid\", \"service_uuid\", \"instance_type\") "
"murano:parent_types+(\"instance_uuid\", "
"\"io.murano.resources.Instance\") murano:parent_types+(\""
"instance_uuid\", \"io.murano.Object\") murano:parent_types+(\""
"instance_uuid\", \"instance_type\") murano:properties+(\""
"instance_uuid\", \"flavor\", \"m1.small\")"
}
sim_query2 = {
"query": "predeploy_error(env_id)",
"action_policy": "action",
"sequence": "murano:objects+(\"env_uuid\", \"tenant_uuid\","
"\"io.murano.Environment\") murano:states+(\"env_uuid\", "
"\"pending\") murano:objects+(\"service_uuid\", \"env_uuid\", "
"\"service_type\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Object\") murano:parent_types+(\"service_uuid\", "
"\"io.murano.Application\") murano:parent_types+(\"service_uuid\","
"\"service_type\") murano:objects+(\"instance_uuid\", "
"\"service_uuid\", \"service_type\") murano:objects+(\""
"instance_uuid\", \"service_uuid\", \"instance_type\") "
"murano:parent_types+(\"instance_uuid\", "
"\"io.murano.resources.Instance\") murano:parent_types+(\""
"instance_uuid\", \"io.murano.Object\") murano:parent_types+(\""
"instance_uuid\", \"instance_type\") murano:properties+(\""
"instance_uuid\", \"flavor\", \"m1.large\")"
}
_create_datasource()
policy_name = _create_random_policy()
_create_rule(policy_name, rule1)
_create_rule(policy_name, rule2)
_create_rule(policy_name, rule3)
_create_rule(policy_name, rule4)
_create_rule(policy_name, rule5)
_create_rule(policy_name, rule6)
_create_rule(policy_name, rule7)
result = _simulate_policy(policy_name, sim_query1)
self.assertEqual([], result)
result = _simulate_policy(policy_name, sim_query2)
self.assertEqual('predeploy_error("env_uuid")', result[0])

View File

@ -0,0 +1,386 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from tempest_lib import decorators
from tempest import clients # noqa
from tempest import config # noqa
from tempest import exceptions # noqa
from tempest import test # noqa
from congress_tempest_tests.tests.scenario import manager_congress # noqa
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestNeutronV2Driver(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestNeutronV2Driver, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(cls):
super(TestNeutronV2Driver, cls).setUp()
if not CONF.service_available.neutron:
skip_msg = ("%s skipped as neutron is not available"
% cls.__name__)
raise cls.skipException(skip_msg)
cls.os = clients.Manager(cls.admin_manager.auth_provider.credentials)
cls.neutron_client = cls.os.network_client
cls.datasource_id = manager_congress.get_datasource_id(
cls.admin_manager.congress_client, 'neutronv2')
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('network')
def test_neutronv2_networks_table(self):
def _check_data():
networks = self.neutron_client.list_networks()
network_map = {}
for network in networks['networks']:
network_map[network['id']] = network
client = self.admin_manager.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
network_schema = (client.show_datasource_table_schema(
self.datasource_id, 'networks')['columns'])
results = (client.list_datasource_rows(
self.datasource_id, 'networks'))
for row in results['results']:
network_row = network_map[row['data'][0]]
for index in range(len(network_schema)):
if (str(row['data'][index]) !=
str(network_row[network_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('network')
def test_neutronv2_ports_tables(self):
port_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'ports')['columns'])
port_sec_binding_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_group_port_bindings')['columns'])
fixed_ips_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'fixed_ips')['columns'])
def _check_data():
ports_from_neutron = self.neutron_client.list_ports()
port_map = {}
for port in ports_from_neutron['ports']:
port_map[port['id']] = port
client = self.admin_manager.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
ports = (client.list_datasource_rows(self.datasource_id, 'ports'))
security_group_port_bindings = (
client.list_datasource_rows(
self.datasource_id, 'security_group_port_bindings'))
fixed_ips = (
client.list_datasource_rows(self.datasource_id, 'fixed_ips'))
# Validate ports table
for row in ports['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_schema)):
if (str(row['data'][index]) !=
str(port_row[port_schema[index]['name']])):
return False
# validate security_group_port_bindings table
for row in security_group_port_bindings['results']:
port_row = port_map[row['data'][0]]
for index in range(len(port_sec_binding_schema)):
row_index = port_sec_binding_schema[index]['name']
# Translate port_id -> id
if row_index == 'port_id':
if (str(row['data'][index]) !=
str(port_row['id'])):
return False
elif row_index == 'security_group_id':
if (str(row['data'][index]) not in
port_row['security_groups']):
return False
# validate fixed_ips
for row in fixed_ips['results']:
port_row = port_map[row['data'][0]]
for index in range(len(fixed_ips_schema)):
row_index = fixed_ips_schema[index]['name']
if row_index in ['subnet_id', 'ip_address']:
if not port_row['fixed_ips']:
continue
for fixed_ip in port_row['fixed_ips']:
if row['data'][index] == fixed_ip[row_index]:
break
else:
# no subnet_id/ip_address match found
return False
return True
if not test.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('network')
def test_neutronv2_subnets_tables(self):
subnet_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'subnets')['columns'])
host_routes_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'host_routes')['columns'])
dns_nameservers_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'dns_nameservers')['columns'])
allocation_pools_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'allocation_pools')['columns'])
def _check_data():
subnets_from_neutron = self.neutron_client.list_subnets()
subnet_map = {}
for subnet in subnets_from_neutron['subnets']:
subnet_map[subnet['id']] = subnet
client = self.admin_manager.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
subnets = (
client.list_datasource_rows(self.datasource_id, 'subnets'))
host_routes = (
client.list_datasource_rows(self.datasource_id, 'host_routes'))
dns_nameservers = (
client.list_datasource_rows(
self.datasource_id, 'dns_nameservers'))
allocation_pools = (
client.list_datasource_rows(
self.datasource_id, 'allocation_pools'))
# Validate subnets table
for row in subnets['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(subnet_schema)):
if (str(row['data'][index]) !=
str(subnet_row[subnet_schema[index]['name']])):
return False
# validate dns_nameservers
for row in dns_nameservers['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(dns_nameservers_schema)):
row_index = dns_nameservers_schema[index]['name']
if row_index in ['dns_nameserver']:
if (row['data'][index]
not in subnet_row['dns_nameservers']):
return False
# validate host_routes
for row in host_routes['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(host_routes_schema)):
row_index = host_routes_schema[index]['name']
if row_index in ['destination', 'nexthop']:
if not subnet_row['host_routes']:
continue
for host_route in subnet_row['host_routes']:
if row['data'][index] == host_route[row_index]:
break
else:
# no destination/nexthop match found
return False
# validate allocation_pools
for row in allocation_pools['results']:
subnet_row = subnet_map[row['data'][0]]
for index in range(len(allocation_pools_schema)):
row_index = allocation_pools_schema[index]['name']
if row_index in ['start', 'end']:
if not subnet_row['allocation_pools']:
continue
for allocation_pool in subnet_row['allocation_pools']:
if (row['data'][index] ==
allocation_pool[row_index]):
break
else:
# no destination/nexthop match found
return False
return True
if not test.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('network')
def test_neutronv2_routers_tables(self):
router_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'routers')['columns'])
ext_gw_info_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'external_gateway_infos')['columns'])
def _check_data():
routers_from_neutron = self.neutron_client.list_routers()
router_map = {}
for router in routers_from_neutron['routers']:
router_map[router['id']] = router
client = self.admin_manager.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
routers = (
client.list_datasource_rows(self.datasource_id, 'routers'))
ext_gw_info = (
client.list_datasource_rows(
self.datasource_id, 'external_gateway_infos'))
# Validate routers table
for row in routers['results']:
router_row = router_map[row['data'][0]]
for index in range(len(router_schema)):
if (str(row['data'][index]) !=
str(router_row[router_schema[index]['name']])):
return False
# validate external_gateway_infos
for row in ext_gw_info['results']:
router_ext_gw_info = (
router_map[row['data'][0]]['external_gateway_info'])
# populate router_id
router_ext_gw_info['router_id'] = row['data'][0]
for index in range(len(ext_gw_info_schema)):
val = router_ext_gw_info[ext_gw_info_schema[index]['name']]
if (str(row['data'][index]) != str(val)):
return False
return True
if not test.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('network')
def test_neutronv2_security_groups_table(self):
sg_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_groups')['columns'])
def _check_data():
client = self.neutron_client
security_groups_neutron = client.list_security_groups()
security_groups_map = {}
for security_group in security_groups_neutron['security_groups']:
security_groups_map[security_group['id']] = security_group
client = self.admin_manager.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
security_groups = (
client.list_datasource_rows(
self.datasource_id, 'security_groups'))
# Validate security_group table
for row in security_groups['results']:
sg_row = security_groups_map[row['data'][0]]
for index in range(len(sg_schema)):
if (str(row['data'][index]) !=
str(sg_row[sg_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('network')
def test_neutronv2_security_group_rules_table(self):
sgrs_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'security_group_rules')['columns'])
def _check_data():
client = self.neutron_client
security_groups_neutron = client.list_security_groups()
sgrs_map = {} # security_group_rules
for sg in security_groups_neutron['security_groups']:
for sgr in sg['security_group_rules']:
sgrs_map[sgr['id']] = sgr
client = self.admin_manager.congress_client
client.request_refresh(self.datasource_id)
time.sleep(1)
security_group_rules = (
client.list_datasource_rows(
self.datasource_id, 'security_group_rules'))
# Validate security_group_rules table
for row in security_group_rules['results']:
sg_rule_row = sgrs_map[row['data'][1]]
for index in range(len(sgrs_schema)):
if (str(row['data'][index]) !=
str(sg_rule_row[sgrs_schema[index]['name']])):
return False
return True
if not test.call_until_true(func=_check_data,
duration=200, sleep_for=10):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

@ -0,0 +1,130 @@
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import decorators
from tempest import config # noqa
from tempest import exceptions # noqa
from tempest import test # noqa
from congress_tempest_tests.tests.scenario import manager_congress # noqa
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestNovaDriver(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestNovaDriver, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestNovaDriver, self).setUp()
self.keypairs = {}
self.servers = []
self.datasource_id = manager_congress.get_datasource_id(
self.admin_manager.congress_client, 'nova')
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_nova_datasource_driver_servers(self):
self._setup_network_and_servers()
server_schema = (
self.admin_manager.congress_client.show_datasource_table_schema(
self.datasource_id, 'servers')['columns'])
# Convert some of the column names.
def convert_col(col):
if col == 'host_id':
return 'hostId'
elif col == 'image_id':
return 'image'
elif col == 'flavor_id':
return 'flavor'
else:
return col
keys = [convert_col(c['name']) for c in server_schema]
def _check_data_table_nova_servers():
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'servers'))
for row in results['results']:
match = True
for index in range(len(keys)):
if keys[index] in ['image', 'flavor']:
val = self.servers[0][keys[index]]['id']
else:
val = self.servers[0][keys[index]]
if row['data'][index] != val:
match = False
break
if match:
return True
return False
if not test.call_until_true(func=_check_data_table_nova_servers,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_nova_datasource_driver_flavors(self):
def _check_data_table_nova_flavors():
# Fetch data from nova each time, because this test may start
# before nova has all the users.
flavors = self.flavors_client.list_flavors(detail=True)
flavor_id_map = {}
for flavor in flavors:
flavor_id_map[flavor['id']] = flavor
results = (
self.admin_manager.congress_client.list_datasource_rows(
self.datasource_id, 'flavors'))
# TODO(alexsyip): Not sure what the following OS-FLV-EXT-DATA:
# prefix is for.
keys = ['id', 'name', 'vcpus', 'ram', 'disk',
'OS-FLV-EXT-DATA:ephemeral', 'rxtx_factor']
for row in results['results']:
match = True
try:
flavor_row = flavor_id_map[row['data'][0]]
except KeyError:
return False
for index in range(len(keys)):
if row['data'][index] != flavor_row[keys[index]]:
match = False
break
if match:
return True
return False
if not test.call_until_true(func=_check_data_table_nova_flavors,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")

View File

View File

@ -0,0 +1,299 @@
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import subprocess
import tempfile
from oslo_log import log as logging
from tempest.common import cred_provider
from tempest import config
from tempest import exceptions
from tempest import manager as tempestmanager
from tempest import test
from tempest_lib import decorators
from tempest_lib import exceptions as restexc
from congress_tempest_tests.services.policy import policy_client
from congress_tempest_tests.tests.scenario import manager_congress # noqa
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestHA(manager_congress.ScenarioPolicyBase):
def setUp(self):
super(TestHA, self).setUp()
self.keypairs = {}
self.servers = []
self.replicas = {}
def _prepare_replica(self, port_num):
replica_url = "http://127.0.0.1:%d" % port_num
ksclient = self.admin_manager.identity_client
resp = ksclient.create_service('congressha',
CONF.congressha.replica_type,
description='policy ha service')
self.replica_service_id = resp['id']
resp = ksclient.create_endpoint(self.replica_service_id,
CONF.identity.region,
publicurl=replica_url,
adminurl=replica_url,
internalurl=replica_url)
self.replica_endpoint_id = resp['id']
def _cleanup_replica(self):
ksclient = self.admin_manager.identity_client
ksclient.delete_endpoint(self.replica_endpoint_id)
ksclient.delete_service(self.replica_service_id)
def start_replica(self, port_num):
self._prepare_replica(port_num)
f = tempfile.NamedTemporaryFile(mode='w', suffix='.conf',
prefix='congress%d-' % port_num,
dir='/tmp', delete=False)
conf_file = f.name
template = open('/etc/congress/congress.conf')
conf = template.read()
conf = conf.replace('# bind_port = 1789',
'bind_port = %d\n' % port_num)
conf = conf.replace('# datasource_sync_period = 60',
'datasource_sync_period = 5')
f.write(conf)
f.close()
args = ['/usr/bin/python',
'bin/congress-server',
'--config-file',
conf_file]
out = tempfile.NamedTemporaryFile(mode='w', suffix='.out',
prefix='congress%d-' % port_num,
dir='/tmp', delete=False)
err = tempfile.NamedTemporaryFile(mode='w', suffix='.err',
prefix='congress%d-' % port_num,
dir='/tmp', delete=False)
p = subprocess.Popen(args, stdout=out, stderr=err,
cwd='/opt/stack/congress')
assert port_num not in self.replicas
self.replicas[port_num] = (p, conf_file)
def stop_replica(self, port_num):
proc, conf_file = self.replicas[port_num]
# Using proc.terminate() will block at proc.wait(), no idea why yet
proc.kill()
proc.wait()
os.unlink(conf_file)
self.replicas[port_num] = (None, conf_file)
self._cleanup_replica()
def create_client(self, client_type):
creds = cred_provider.get_configured_credentials('identity_admin')
auth_prov = tempestmanager.get_auth_provider(creds)
return policy_client.PolicyClient(
auth_prov, client_type,
CONF.identity.region)
def datasource_exists(self, client, datasource_id):
try:
LOG.debug("datasource_exists begin")
body = client.list_datasource_status(datasource_id)
LOG.debug("list_datasource_status: %s", str(body))
except restexc.NotFound as e:
LOG.debug("not found")
return False
except restexc.Unauthorized as e:
LOG.debug("connection refused")
return False
except socket.error as e:
LOG.debug("Replica server not ready")
return False
except Exception as e:
raise e
return True
def datasource_missing(self, client, datasource_id):
try:
LOG.debug("datasource_missing begin")
body = client.list_datasource_status(datasource_id)
LOG.debug("list_datasource_status: %s", str(body))
except restexc.NotFound as e:
LOG.debug("not found")
return True
except restexc.Unauthorized as e:
LOG.debug("connection refused")
return False
except socket.error as e:
LOG.debug("Replica server not ready")
return False
except Exception as e:
raise e
return False
def find_fake(self, client):
datasources = client.list_datasources()
for r in datasources['results']:
if r['name'] == 'fake':
LOG.debug('existing fake driver: %s', str(r['id']))
return r['id']
return None
def create_fake(self, client):
# Create fake datasource if it does not exist. Returns the
# fake datasource id.
fake_id = self.find_fake(client)
if fake_id:
return fake_id
item = {'id': None,
'name': 'fake',
'driver': 'fake_datasource',
'config': '{"username":"fakeu", "tenant_name": "faket",' +
'"password": "fakep",' +
'"auth_url": "http://127.0.0.1:5000/v2"}',
'description': 'bar',
'enabled': True}
ret = client.create_datasource(item)
LOG.debug('created fake driver: %s', str(ret['id']))
return ret['id']
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
def test_datasource_db_sync_add(self):
# Verify that a replica adds a datasource when a datasource
# appears in the database.
client1 = self.admin_manager.congress_client
# delete fake if it exists.
old_fake_id = self.find_fake(client1)
if old_fake_id:
client1.delete_datasource(old_fake_id)
# Verify that primary server has no fake datasource
if not test.call_until_true(
func=lambda: self.datasource_missing(client1, old_fake_id),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"primary should not have fake, but does")
need_to_delete_fake = False
try:
# Create a new fake datasource
fake_id = self.create_fake(client1)
need_to_delete_fake = True
# Verify that primary server has fake datasource
if not test.call_until_true(
func=lambda: self.datasource_exists(client1, fake_id),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"primary should have fake, but does not")
# start replica
self.start_replica(CONF.congressha.replica_port)
# Create session for second server.
client2 = self.create_client(CONF.congressha.replica_type)
# Verify that second server has fake datasource
if not test.call_until_true(
func=lambda: self.datasource_exists(client2, fake_id),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica should have fake, but does not")
# Remove fake from primary server instance.
LOG.debug("removing fake datasource %s", str(fake_id))
client1.delete_datasource(fake_id)
need_to_delete_fake = False
# Confirm that fake is gone from primary server instance.
if not test.call_until_true(
func=lambda: self.datasource_missing(client1, fake_id),
duration=60, sleep_for=1):
self.stop_replica(CONF.congressha.replica_port)
raise exceptions.TimeoutException(
"primary instance still has fake")
LOG.debug("removed fake datasource from primary instance")
# Confirm that second service instance removes fake.
if not test.call_until_true(
func=lambda: self.datasource_missing(client2, fake_id),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica should remove fake, but still has it")
finally:
self.stop_replica(CONF.congressha.replica_port)
if need_to_delete_fake:
self.admin_manager.congress_client.delete_datasource(fake_id)
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
def test_datasource_db_sync_remove(self):
# Verify that a replica removes a datasource when a datasource
# disappears from the database.
client1 = self.admin_manager.congress_client
fake_id = self.create_fake(client1)
need_to_delete_fake = True
try:
self.start_replica(CONF.congressha.replica_port)
# Verify that primary server has fake datasource
if not test.call_until_true(
func=lambda: self.datasource_exists(client1, fake_id),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"primary should have fake, but does not")
# Create session for second server.
client2 = self.create_client(CONF.congressha.replica_type)
# Verify that second server has fake datasource
if not test.call_until_true(
func=lambda: self.datasource_exists(client2, fake_id),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica should have fake, but does not")
# Remove fake from primary server instance.
LOG.debug("removing fake datasource %s", str(fake_id))
client1.delete_datasource(fake_id)
need_to_delete_fake = False
# Confirm that fake is gone from primary server instance.
if not test.call_until_true(
func=lambda: self.datasource_missing(client1, fake_id),
duration=60, sleep_for=1):
self.stop_replica(CONF.congressha.replica_port)
raise exceptions.TimeoutException(
"primary instance still has fake")
LOG.debug("removed fake datasource from primary instance")
# Confirm that second service instance removes fake.
if not test.call_until_true(
func=lambda: self.datasource_missing(client2, fake_id),
duration=60, sleep_for=1):
raise exceptions.TimeoutException(
"replica should remove fake, but still has it")
finally:
self.stop_replica(CONF.congressha.replica_port)
if need_to_delete_fake:
self.admin_manager.congress_client.delete_datasource(fake_id)

26
tests/scenario/helper.py Executable file
View File

@ -0,0 +1,26 @@
# Copyright (c) 2015 Hewlett Packard. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import retrying
@retrying.retry(stop_max_attempt_number=20, wait_fixed=1000)
def retry_check_function_return_value(f, expected_value, error_msg=None):
"""Check if function f returns expected value."""
if not error_msg:
error_msg = 'Expected value "%s" not found' % expected_value
r = f()
if r != expected_value:
raise Exception(error_msg)

View File

@ -0,0 +1,267 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from oslo_log import log as logging
from tempest.common import cred_provider
from tempest import config # noqa
from tempest import exceptions # noqa
from tempest import manager as tempestmanager
from tempest.scenario import manager # noqa
from tempest.services.network import resources as net_resources # noqa
from tempest import test # noqa
from tempest_lib.common.utils import data_utils
from congress_tempest_tests.services.policy import policy_client
CONF = config.CONF
LOG = logging.getLogger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
def get_datasource_id(client, name):
datasources = client.list_datasources()
for datasource in datasources['results']:
if datasource['name'] == name:
return datasource['id']
raise Exception("Datasource %s not found." % name)
# Note: these tests all use neutron today so we mix with that.
class ScenarioPolicyBase(manager.NetworkScenarioTest):
@classmethod
def setUpClass(cls):
super(ScenarioPolicyBase, cls).setUpClass()
# auth provider for admin credentials
creds = cred_provider.get_configured_credentials('identity_admin')
auth_prov = tempestmanager.get_auth_provider(creds)
cls.admin_manager.congress_client = policy_client.PolicyClient(
auth_prov, "policy", CONF.identity.region)
def _setup_network_and_servers(self):
self.security_group = (self._create_security_group
(tenant_id=self.tenant_id))
self.network, self.subnet, self.router = self.create_networks()
self.check_networks()
name = data_utils.rand_name('server-smoke')
server = self._create_server(name, self.network)
self._check_tenant_network_connectivity()
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def check_networks(self):
"""Check for newly created network/subnet/router.
Checks that we see the newly created network/subnet/router via
checking the result of list_[networks,routers,subnets].
"""
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(self.network.name, seen_names)
self.assertIn(self.network.id, seen_ids)
if self.subnet:
seen_subnets = self._list_subnets()
seen_net_ids = [n['network_id'] for n in seen_subnets]
seen_subnet_ids = [n['id'] for n in seen_subnets]
self.assertIn(self.network.id, seen_net_ids)
self.assertIn(self.subnet.id, seen_subnet_ids)
if self.router:
seen_routers = self._list_routers()
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(self.router.name,
seen_router_names)
self.assertIn(self.router.id,
seen_router_ids)
def _create_server(self, name, network):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [self.security_group]
create_kwargs = {
'networks': [
{'uuid': network.id},
],
'key_name': keypair['name'],
'security_groups': security_groups,
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.servers.append(server)
return server
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def _check_tenant_network_connectivity(self):
ssh_login = CONF.compute.image_ssh_user
for server in self.servers:
# call the common method in the parent class
super(ScenarioPolicyBase, self)._check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
def _create_and_associate_floating_ips(self, server):
public_network_id = CONF.network.public_network_id
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
ssh_login = CONF.compute.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = None
if should_connect:
private_key = self._get_server_key(server)
# call the common method in the parent class
super(ScenarioPolicyBase, self)._check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
self._disassociate_floating_ip(floating_ip)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
name = data_utils.rand_name('new_server-smoke-')
# create a new server for the floating ip
server = self._create_server(name, self.network)
self._associate_floating_ip(floating_ip, server)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, server)
def _create_new_network(self):
self.new_net = self._create_network(tenant_id=self.tenant_id)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
ip_address = old_floating_ip.floating_ip_address
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
old_nic_list = self._get_server_nics(ssh_client)
# get a port from a list of one item
port_list = self._list_ports(device_id=server['id'])
self.assertEqual(1, len(port_list))
old_port = port_list[0]
_, interface = self.interface_client.create_interface(
server=server['id'],
network_id=self.new_net.id)
self.addCleanup(self.network_client.wait_for_resource_deletion,
'port',
interface['port_id'])
self.addCleanup(self.delete_wrapper,
self.interface_client.delete_interface,
server['id'], interface['port_id'])
def check_ports():
self.new_port_list = [port for port in
self._list_ports(device_id=server['id'])
if port != old_port]
return len(self.new_port_list) == 1
if not test.call_until_true(check_ports, CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException("No new port attached to the "
"server in time (%s sec) !"
% CONF.network.build_timeout)
new_port = net_resources.DeletablePort(client=self.network_client,
**self.new_port_list[0])
def check_new_nic():
new_nic_list = self._get_server_nics(ssh_client)
self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
return len(self.diff_list) == 1
if not test.call_until_true(check_new_nic, CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException("Interface not visible on the "
"guest after %s sec"
% CONF.network.build_timeout)
num, new_nic = self.diff_list[0]
ssh_client.assign_static_ip(nic=new_nic,
addr=new_port.fixed_ips[0]['ip_address'])
ssh_client.turn_nic_on(nic=new_nic)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+):')
ipatxt = ssh_client.get_ip_list()
return reg.findall(ipatxt)
def _check_network_internal_connectivity(self, network):
"""via ssh check VM internal connectivity:
- ping internal gateway and DHCP port, implying in-tenant connectivity
pinging both, because L3 and DHCP agents might be on different nodes.
"""
floating_ip, server = self.floating_ip_tuple
# get internal ports' ips:
# get all network ports in the new network
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
self._list_ports(tenant_id=server['tenant_id'],
network_id=network.id)
if p['device_owner'].startswith('network'))
self._check_server_connectivity(floating_ip, internal_ips)
def _check_network_external_connectivity(self):
"""ping public network default gateway to imply external conn."""
if not CONF.network.public_network_id:
msg = 'public network not defined.'
LOG.info(msg)
return
subnet = self._list_subnets(
network_id=CONF.network.public_network_id)
self.assertEqual(1, len(subnet), "Found %d subnets" % len(subnet))
external_ips = [subnet[0]['gateway_ip']]
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
def _check_server_connectivity(self, floating_ip, address_list):
ip_address = floating_ip.floating_ip_address
private_key = self._get_server_key(self.floating_ip_tuple.server)
ssh_source = self._ssh_to_server(ip_address, private_key)
for remote_ip in address_list:
try:
self.assertTrue(self._check_remote_connectivity(ssh_source,
remote_ip),
"Timed out waiting for %s to become "
"reachable" % remote_ip)
except Exception:
LOG.exception("Unable to access {dest} via ssh to "
"floating-ip {src}".format(dest=remote_ip,
src=floating_ip))
raise

View File

@ -0,0 +1,238 @@
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest_lib import decorators
from tempest import config # noqa
from tempest import exceptions # noqa
from tempest import test # noqa
from congress_tempest_tests.tests.scenario import helper # noqa
from congress_tempest_tests.tests.scenario import manager_congress # noqa
import random
import string
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestPolicyBasicOps(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestPolicyBasicOps, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def setUp(self):
super(TestPolicyBasicOps, self).setUp()
self.keypairs = {}
self.servers = []
def _create_random_policy(self):
policy_name = "nova_%s" % ''.join(random.choice(string.lowercase)
for x in range(10))
body = {"name": policy_name}
resp = self.admin_manager.congress_client.create_policy(body)
self.addCleanup(self.admin_manager.congress_client.delete_policy,
resp['id'])
return resp['name']
def _create_policy_rule(self, policy_name, rule, rule_name=None,
comment=None):
body = {'rule': rule}
if rule_name:
body['name'] = rule_name
if comment:
body['comment'] = comment
client = self.admin_manager.congress_client
response = client.create_policy_rule(policy_name, body)
if response:
self.addCleanup(client.delete_policy_rule, policy_name,
response['id'])
return response
else:
raise Exception('Failed to create policy rule (%s, %s)'
% (policy_name, rule))
def _create_test_server(self, name=None):
image_ref = CONF.compute.image_ref
flavor_ref = CONF.compute.flavor_ref
keypair = self.create_keypair()
security_group = self._create_security_group()
security_groups = [{'name': security_group['name']}]
create_kwargs = {'key_name': keypair['name'],
'security_groups': security_groups}
instance = self.create_server(name=name,
image=image_ref,
flavor=flavor_ref,
create_kwargs=create_kwargs)
return instance
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_execution_action(self):
metadata = {'testkey1': 'value3'}
server = self._create_test_server()
congress_client = self.admin_manager.congress_client
servers_client = self.admin_manager.servers_client
policy = self._create_random_policy()
service = 'nova'
action = 'servers.set_meta'
action_args = {'args': {'positional': [],
'named': {'server': server['id'],
'metadata': metadata}}}
body = action_args
# execute via datasource api
body.update({'name': action})
congress_client.execute_datasource_action(service, "execute", body)
return_meta = servers_client.get_server_metadata_item(server["id"],
"testkey1")
self.assertEqual(metadata, return_meta,
"Failed to execute action via datasource API")
# execute via policy api
body.update({'name': service + ':' + action})
congress_client.execute_policy_action(policy, "execute", False,
False, body)
return_meta = servers_client.get_server_metadata_item(server["id"],
"testkey1")
self.assertEqual(metadata, return_meta,
"Failed to execute action via policy API")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_policy_basic_op(self):
self._setup_network_and_servers()
body = {"rule": "port_security_group(id, security_group_name) "
":-neutronv2:ports(id, tenant_id, name, network_id,"
"mac_address, admin_state_up, status, device_id, "
"device_owner),"
"neutronv2:security_group_port_bindings(id, "
"security_group_id), neutronv2:security_groups("
"security_group_id, tenant_id1, security_group_name,"
"description)"}
results = self.admin_manager.congress_client.create_policy_rule(
'classification', body)
rule_id = results['id']
self.addCleanup(
self.admin_manager.congress_client.delete_policy_rule,
'classification', rule_id)
# Find the ports of on this server
ports = self._list_ports(device_id=self.servers[0]['id'])
def check_data():
results = self.admin_manager.congress_client.list_policy_rows(
'classification', 'port_security_group')
for row in results['results']:
if (row['data'][0] == ports[0]['id'] and
row['data'][1] ==
self.servers[0]['security_groups'][0]['name']):
return True
else:
return False
if not test.call_until_true(func=check_data,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
@decorators.skip_because(bug='1486246')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_reactive_enforcement(self):
servers_client = self.admin_manager.servers_client
server_name = 'server_under_test'
server = self._create_test_server(name=server_name)
policy_name = self._create_random_policy()
meta_key = 'meta_test_key1'
meta_val = 'value1'
meta_data = {meta_key: meta_val}
rules = [
'execute[nova:servers_set_meta(id, "%s", "%s")] :- '
'test_servers(id)' % (meta_key, meta_val),
'test_servers(id) :- '
'nova:servers(id, name, host_id, status, '
'tenant_id, user_id, image_id, flavor_id),'
'equal(name, "%s")' % server_name]
for rule in rules:
self._create_policy_rule(policy_name, rule)
f = lambda: servers_client.get_server_metadata_item(server['id'],
meta_key)
helper.retry_check_function_return_value(f, meta_data)
class TestCongressDataSources(manager_congress.ScenarioPolicyBase):
@classmethod
def check_preconditions(cls):
super(TestCongressDataSources, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def test_all_loaded_datasources_are_initialized(self):
datasources = self.admin_manager.congress_client.list_datasources()
def _check_all_datasources_are_initialized():
for datasource in datasources['results']:
results = (
self.admin_manager.congress_client.list_datasource_status(
datasource['id']))
if results['initialized'] != 'True':
return False
return True
if not test.call_until_true(
func=_check_all_datasources_are_initialized,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")
def test_all_datasources_have_tables(self):
datasources = self.admin_manager.congress_client.list_datasources()
def check_data():
for datasource in datasources['results']:
results = (
self.admin_manager.congress_client.list_datasource_tables(
datasource['id']))
# NOTE(arosen): if there are no results here we return false as
# there is something wrong with a driver as it doesn't expose
# any tables.
if not results['results']:
return False
return True
if not test.call_until_true(func=check_data,
duration=100, sleep_for=5):
raise exceptions.TimeoutException("Data did not converge in time "
"or failure in server")