Migrate networking-sfc tests to neutron-tempest-plugin
As discussed in the neutron_ci meeting [1] the QA team would like to move the tempest tests for the stadium projects from their repos to repos specific to being tempest plugins. This is the first part of a two stage move, by copying over the tempest tests to the neutron-tempest-plugin repo [2] rather than spawning new repos to be separate. Updated some uuids to fix idempotent ids [1] http://eavesdrop.openstack.org/meetings/neutron_ci/2019/neutron_ci.2019-03-12-16.01.log.html#l-94 [2] https://etherpad.openstack.org/p/neutron_stadium_move_to_tempest_plugin_repo Change-Id: I80ff2daac44bd3a4ee179c7a6cf1d62a8fd2004c
This commit is contained in:
parent
cbec36256a
commit
1a987ecb4d
27
.zuul.yaml
27
.zuul.yaml
@ -690,6 +690,30 @@
|
||||
vars:
|
||||
branch_override: stable/stein
|
||||
|
||||
- job:
|
||||
name: neutron-tempest-plugin-sfc
|
||||
parent: neutron-tempest-plugin
|
||||
timeout: 10800
|
||||
required-projects:
|
||||
- openstack/devstack-gate
|
||||
- openstack/networking-sfc
|
||||
- openstack/neutron
|
||||
- openstack/neutron-tempest-plugin
|
||||
- openstack/tempest
|
||||
vars:
|
||||
tempest_test_regex: ^neutron_tempest_plugin\.sfc
|
||||
tox_envlist: all-plugin
|
||||
devstack_plugins:
|
||||
networking-sfc: https://opendev.org/openstack/networking-sfc
|
||||
neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin
|
||||
network_api_extensions_sfc:
|
||||
- flow_classifier
|
||||
- sfc
|
||||
devstack_localrc:
|
||||
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_sfc) | join(',') }}"
|
||||
files:
|
||||
- ^neutron_tempest_plugin/sfc/.*$
|
||||
|
||||
- project-template:
|
||||
name: neutron-tempest-plugin-jobs
|
||||
check:
|
||||
@ -750,3 +774,6 @@
|
||||
- check-requirements
|
||||
- tempest-plugin-jobs
|
||||
- release-notes-jobs-python3
|
||||
check:
|
||||
jobs:
|
||||
- neutron-tempest-plugin-sfc
|
||||
|
0
neutron_tempest_plugin/sfc/__init__.py
Normal file
0
neutron_tempest_plugin/sfc/__init__.py
Normal file
0
neutron_tempest_plugin/sfc/services/__init__.py
Normal file
0
neutron_tempest_plugin/sfc/services/__init__.py
Normal file
53
neutron_tempest_plugin/sfc/services/flowclassifier_client.py
Normal file
53
neutron_tempest_plugin/sfc/services/flowclassifier_client.py
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright 2016 Futurewei. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
|
||||
class FlowClassifierClient(base.BaseNetworkClient):
|
||||
|
||||
def create_flowclassifier(self, **kwargs):
|
||||
uri = '/sfc/flow_classifiers'
|
||||
post_data = {'flow_classifier': kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_flowclassifier(self, flowclassifier_id, **kwargs):
|
||||
uri = '/sfc/flow_classifiers/%s' % flowclassifier_id
|
||||
post_data = {'flow_classifier': kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_flowclassifier(self, flowclassifier_id, **fields):
|
||||
uri = '/sfc/flow_classifiers/%s' % flowclassifier_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_flowclassifier(self, flowclassifier_id):
|
||||
uri = '/sfc/flow_classifiers/%s' % flowclassifier_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_flowclassifiers(self, **filters):
|
||||
uri = '/sfc/flow_classifiers'
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def is_resource_deleted(self, id):
|
||||
try:
|
||||
self.show_flowclassifier(id)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def resource_type(self):
|
||||
"""Returns the primary type of resource this client works with."""
|
||||
return 'flow_classifier'
|
165
neutron_tempest_plugin/sfc/services/sfc_client.py
Normal file
165
neutron_tempest_plugin/sfc/services/sfc_client.py
Normal file
@ -0,0 +1,165 @@
|
||||
# Copyright 2016 Futurewei. All rights reserved.
|
||||
# Copyright 2017 Intel Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
|
||||
class PortChainClient(base.BaseNetworkClient):
|
||||
|
||||
def create_port_chain(self, **kwargs):
|
||||
uri = '/sfc/port_chains'
|
||||
post_data = {'port_chain': kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_port_chain(self, pc_id, **kwargs):
|
||||
uri = '/sfc/port_chains/%s' % pc_id
|
||||
post_data = {'port_chain': kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_port_chain(self, pc_id, **fields):
|
||||
uri = '/sfc/port_chains/%s' % pc_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_port_chain(self, pc_id):
|
||||
uri = '/sfc/port_chains/%s' % pc_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_port_chains(self, **filters):
|
||||
uri = '/sfc/port_chains'
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def is_resource_deleted(self, id):
|
||||
try:
|
||||
self.show_port_chain(id)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def resource_type(self):
|
||||
"""Returns the primary type of resource this client works with."""
|
||||
return 'sfc'
|
||||
|
||||
|
||||
class PortPairGroupClient(base.BaseNetworkClient):
|
||||
|
||||
def create_port_pair_group(self, **kwargs):
|
||||
uri = '/sfc/port_pair_groups'
|
||||
post_data = {'port_pair_group': kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_port_pair_group(self, pg_id, **kwargs):
|
||||
uri = '/sfc/port_pair_groups/%s' % pg_id
|
||||
post_data = {'port_pair_group': kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_port_pair_group(self, pg_id, **fields):
|
||||
uri = '/sfc/port_pair_groups/%s' % pg_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_port_pair_group(self, pg_id):
|
||||
uri = '/sfc/port_pair_groups/%s' % pg_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_port_pair_groups(self, **filters):
|
||||
uri = '/sfc/port_pair_groups'
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def is_resource_deleted(self, id):
|
||||
try:
|
||||
self.show_port_pair_group(id)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def resource_type(self):
|
||||
"""Returns the primary type of resource this client works with."""
|
||||
return 'sfc'
|
||||
|
||||
|
||||
class PortPairClient(base.BaseNetworkClient):
|
||||
|
||||
def create_port_pair(self, **kwargs):
|
||||
uri = '/sfc/port_pairs'
|
||||
post_data = {'port_pair': kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_port_pair(self, pp_id, **kwargs):
|
||||
uri = '/sfc/port_pairs/%s' % pp_id
|
||||
post_data = {'port_pair': kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_port_pair(self, pp_id, **fields):
|
||||
uri = '/sfc/port_pairs/%s' % pp_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_port_pair(self, pp_id):
|
||||
uri = '/sfc/port_pairs/%s' % pp_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_port_pairs(self, **filters):
|
||||
uri = '/sfc/port_pairs'
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def is_resource_deleted(self, id):
|
||||
try:
|
||||
self.show_port_pair(id)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def resource_type(self):
|
||||
"""Returns the primary type of resource this client works with."""
|
||||
return 'sfc'
|
||||
|
||||
|
||||
class ServiceGraphClient(base.BaseNetworkClient):
|
||||
|
||||
def create_service_graph(self, **kwargs):
|
||||
uri = '/sfc/service_graphs'
|
||||
post_data = {'service_graph': kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_service_graph(self, pp_id, **kwargs):
|
||||
uri = '/sfc/service_graphs/%s' % pp_id
|
||||
post_data = {'service_graph': kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_service_graph(self, pp_id, **fields):
|
||||
uri = '/sfc/service_graphs/%s' % pp_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_service_graph(self, pp_id):
|
||||
uri = '/sfc/service_graphs/%s' % pp_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_service_graphs(self, **filters):
|
||||
uri = '/sfc/service_graphs'
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def is_resource_deleted(self, id):
|
||||
try:
|
||||
self.show_service_graph(id)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def resource_type(self):
|
||||
"""Returns the primary type of resource this client works with."""
|
||||
return 'sfc'
|
0
neutron_tempest_plugin/sfc/tests/__init__.py
Normal file
0
neutron_tempest_plugin/sfc/tests/__init__.py
Normal file
0
neutron_tempest_plugin/sfc/tests/api/__init__.py
Normal file
0
neutron_tempest_plugin/sfc/tests/api/__init__.py
Normal file
194
neutron_tempest_plugin/sfc/tests/api/base.py
Normal file
194
neutron_tempest_plugin/sfc/tests/api/base.py
Normal file
@ -0,0 +1,194 @@
|
||||
# Copyright 2016 Futurewei. All rights reserved.
|
||||
# Copyright 2017 Intel Corporation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import socket
|
||||
|
||||
import netaddr
|
||||
from tempest.api.network import base
|
||||
from tempest.common import utils
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
|
||||
from neutron_tempest_plugin.sfc.tests import flowclassifier_client
|
||||
from neutron_tempest_plugin.sfc.tests import sfc_client
|
||||
|
||||
|
||||
class BaseFlowClassifierTest(
|
||||
flowclassifier_client.FlowClassifierClientMixin,
|
||||
base.BaseAdminNetworkTest
|
||||
):
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseFlowClassifierTest, cls).resource_setup()
|
||||
if not utils.is_extension_enabled('flow_classifier', 'network'):
|
||||
msg = "FlowClassifier Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.host_id = socket.gethostname()
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if not utils.is_extension_enabled('flow_classifier', 'network'):
|
||||
msg = "FlowClassifier Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
super(BaseFlowClassifierTest, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def _create_port(cls, network, **kwargs):
|
||||
body = cls.admin_ports_client.create_port(
|
||||
network_id=network['id'],
|
||||
**kwargs)
|
||||
port = body['port']
|
||||
return port
|
||||
|
||||
def _try_create_flowclassifier(self, **kwargs):
|
||||
if 'logical_source_port' not in kwargs:
|
||||
port_kwargs = {"binding:host_id": self.host_id}
|
||||
port = self._create_port(network=self.network, **port_kwargs)
|
||||
self.addCleanup(self._try_delete_port, port['id'])
|
||||
kwargs['logical_source_port'] = port['id']
|
||||
if 'source_ip_prefix' not in kwargs:
|
||||
port_ip_prefix = str(netaddr.IPNetwork(
|
||||
port['fixed_ips'][0]['ip_address']))
|
||||
kwargs['source_ip_prefix'] = port_ip_prefix
|
||||
fc = self.create_flowclassifier(**kwargs)
|
||||
self.addCleanup(self._try_delete_flowclassifier, fc['id'])
|
||||
return fc
|
||||
|
||||
def _try_delete_port(self, port_id):
|
||||
try:
|
||||
self.admin_ports_client.delete_port(port_id)
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
body = self.admin_ports_client.list_ports()
|
||||
ports_list = body['ports']
|
||||
self.assertNotIn(port_id, [n['id'] for n in ports_list])
|
||||
|
||||
def _try_delete_flowclassifier(self, fc_id):
|
||||
# delete flowclassifier, if it exists
|
||||
try:
|
||||
self.flowclassifier_client.delete_flowclassifier(fc_id)
|
||||
# if flowclassifier is not found, this means it was deleted
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
body = self.flowclassifier_client.list_flowclassifiers()
|
||||
fc_list = body['flow_classifiers']
|
||||
self.assertNotIn(fc_id, [n['id'] for n in fc_list])
|
||||
|
||||
|
||||
class BaseSfcTest(
|
||||
sfc_client.SfcClientMixin, BaseFlowClassifierTest
|
||||
):
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseSfcTest, cls).resource_setup()
|
||||
if not utils.is_extension_enabled('sfc', 'network'):
|
||||
msg = "Sfc Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if not utils.is_extension_enabled('sfc', 'network'):
|
||||
msg = "Sfc Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
super(BaseSfcTest, cls).resource_cleanup()
|
||||
|
||||
def _try_create_port_pair(self, **kwargs):
|
||||
if 'ingress' not in kwargs or 'egress' not in 'kwargs':
|
||||
router = self.admin_routers_client.create_router(
|
||||
name=data_utils.rand_name('router-'))['router']
|
||||
self.addCleanup(
|
||||
self.admin_routers_client.delete_router, router['id'])
|
||||
port_kwargs = {"binding:host_id": self.host_id}
|
||||
port = self._create_port(
|
||||
network=self.network, **port_kwargs)
|
||||
self.addCleanup(self._try_delete_port, port['id'])
|
||||
self.admin_routers_client.add_router_interface(
|
||||
router['id'], port_id=port['id'])
|
||||
self.addCleanup(self.admin_routers_client.remove_router_interface,
|
||||
router['id'],
|
||||
port_id=port['id'])
|
||||
if 'ingress' not in kwargs:
|
||||
kwargs['ingress'] = port['id']
|
||||
if 'egress' not in kwargs:
|
||||
kwargs['egress'] = port['id']
|
||||
pp = self.create_port_pair(**kwargs)
|
||||
self.addCleanup(self._try_delete_port_pair, pp['id'])
|
||||
return pp
|
||||
|
||||
def _try_delete_port_pair(self, pp_id):
|
||||
# delete port pair, if it exists
|
||||
try:
|
||||
self.portpair_client.delete_port_pair(pp_id)
|
||||
# if port pair is not found, this means it was deleted
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
body = self.portpair_client.list_port_pairs()
|
||||
pp_list = body['port_pairs']
|
||||
self.assertNotIn(pp_id, [n['id'] for n in pp_list])
|
||||
|
||||
def _try_create_port_pair_group(self, **kwargs):
|
||||
pg = self.create_port_pair_group(
|
||||
**kwargs)
|
||||
self.addCleanup(self._try_delete_port_pair_group, pg['id'])
|
||||
# self.pgs.append(pg)
|
||||
return pg
|
||||
|
||||
def _try_delete_port_pair_group(self, pg_id):
|
||||
# delete port pair group, if it exists
|
||||
try:
|
||||
self.portpairgroup_client.delete_port_pair_group(pg_id)
|
||||
# if port pair group is not found, this means it was deleted
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
body = self.portpairgroup_client.list_port_pair_groups()
|
||||
pg_list = body['port_pair_groups']
|
||||
self.assertNotIn(pg_id, [n['id'] for n in pg_list])
|
||||
|
||||
def _try_create_port_chain(self, **kwargs):
|
||||
pc = self.create_port_chain(
|
||||
**kwargs)
|
||||
self.addCleanup(self._try_delete_port_chain, pc['id'])
|
||||
# self.pcs.append(pc)
|
||||
return pc
|
||||
|
||||
def _try_delete_port_chain(self, pc_id):
|
||||
# delete port chain, if it exists
|
||||
try:
|
||||
self.portchain_client.delete_port_chain(pc_id)
|
||||
# if port chain is not found, this means it was deleted
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
body = self.portchain_client.list_port_chains()
|
||||
pc_list = body['port_chains']
|
||||
self.assertNotIn(pc_id, [n['id'] for n in pc_list])
|
||||
|
||||
def _try_create_service_graph(self, **kwargs):
|
||||
graph = self.create_service_graph(
|
||||
**kwargs)
|
||||
self.addCleanup(self._try_delete_service_graph, graph['id'])
|
||||
return graph
|
||||
|
||||
def _try_delete_service_graph(self, graph_id):
|
||||
# delete Service Graph, if it exists
|
||||
try:
|
||||
self.sfcgraph_client.delete_service_graph(graph_id)
|
||||
# if Service Graph is not found, this means it was deleted
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
body = self.sfcgraph_client.list_service_graphs()
|
||||
graph_list = body['service_graphs']
|
||||
self.assertNotIn(graph_id, [n['id'] for n in graph_list])
|
@ -0,0 +1,91 @@
|
||||
# Copyright 2016 Futurewei. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from neutron_tempest_plugin.sfc.tests.api import base
|
||||
|
||||
|
||||
class FlowClassifierExtensionTestJSON(base.BaseFlowClassifierTest):
|
||||
"""Tests the following operations in the Neutron API:
|
||||
|
||||
List flowclassifiers
|
||||
Create flowclassifier
|
||||
Update flowclassifier
|
||||
Delete flowclassifier
|
||||
Show flowclassifier
|
||||
"""
|
||||
|
||||
@decorators.idempotent_id('1b84cf01-9c09-4ce7-bc72-b15e39076468')
|
||||
def test_list_flowclassifier(self):
|
||||
# List flow classifiers
|
||||
fc = self._try_create_flowclassifier()
|
||||
fcs = self.flowclassifier_client.list_flowclassifiers()
|
||||
self.assertIn((
|
||||
fc['id'],
|
||||
fc['name'],
|
||||
fc['source_ip_prefix'],
|
||||
fc['logical_source_port']
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['source_ip_prefix'],
|
||||
m['logical_source_port'],
|
||||
) for m in fcs['flow_classifiers']])
|
||||
|
||||
@decorators.idempotent_id('b2ed2a37-fc64-4be5-819b-9cf2a13db70b')
|
||||
def test_list_flowclassifier_with_logical_destination_port(self):
|
||||
# List flow classifiers with logical_destination_port
|
||||
fc = self._try_create_flowclassifier()
|
||||
fcs = self.flowclassifier_client.list_flowclassifiers()
|
||||
self.assertIn((
|
||||
fc['id'],
|
||||
fc['name'],
|
||||
fc['source_ip_prefix'],
|
||||
fc['destination_ip_prefix'],
|
||||
fc['logical_source_port'],
|
||||
fc['logical_destination_port']
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['source_ip_prefix'],
|
||||
m['destination_ip_prefix'],
|
||||
m['logical_source_port'],
|
||||
m['logical_destination_port']
|
||||
) for m in fcs['flow_classifiers']])
|
||||
|
||||
@decorators.idempotent_id('563564f7-7077-4f5e-8cdc-51f37ae5a2b9')
|
||||
def test_update_flowclassifier(self):
|
||||
# Create flow classifier
|
||||
name1 = data_utils.rand_name('test')
|
||||
fc = self._try_create_flowclassifier(
|
||||
name=name1
|
||||
)
|
||||
fc_id = fc['id']
|
||||
|
||||
# Update flow classifier
|
||||
name2 = data_utils.rand_name('test')
|
||||
body = self.flowclassifier_client.update_flowclassifier(
|
||||
fc_id, name=name2)
|
||||
self.assertEqual(body['flow_classifier']['name'], name2)
|
||||
|
||||
@decorators.idempotent_id('3ff8c08e-26ff-4034-ae48-810ed213a998')
|
||||
def test_show_flowclassifier(self):
|
||||
# show a created flow classifier
|
||||
created = self._try_create_flowclassifier()
|
||||
fc = self.flowclassifier_client.show_flowclassifier(
|
||||
created['id'])
|
||||
for key, value in fc['flow_classifier'].items():
|
||||
self.assertEqual(created[key], value)
|
413
neutron_tempest_plugin/sfc/tests/api/test_sfc_extensions.py
Normal file
413
neutron_tempest_plugin/sfc/tests/api/test_sfc_extensions.py
Normal file
@ -0,0 +1,413 @@
|
||||
# Copyright 2016 Futurewei. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from neutron_tempest_plugin.sfc.tests.api import base
|
||||
|
||||
|
||||
class SfcExtensionTestJSON(base.BaseSfcTest):
|
||||
"""Tests the following operations in the Neutron API:
|
||||
|
||||
List port chains
|
||||
Create port chain
|
||||
Update port chain
|
||||
Delete port chain
|
||||
Show port chain
|
||||
List port pair groups
|
||||
Create port pair group
|
||||
Update port pair group
|
||||
Delete port pair group
|
||||
Show port pair groups
|
||||
List port pairs
|
||||
Create port pair
|
||||
Update port pair
|
||||
Delete port pair
|
||||
Show port pair
|
||||
List Service Graphs
|
||||
Create Service Graph
|
||||
Update Service Graph
|
||||
Delete Service Graph
|
||||
Show Service Graphs
|
||||
"""
|
||||
@decorators.idempotent_id('1a6067bf-b967-42a7-8b62-158a9ec185b4')
|
||||
def test_create_port_pair_different_ingress_egress(self):
|
||||
ingress_network = self.create_network()
|
||||
self.addCleanup(self.networks_client.delete_network,
|
||||
ingress_network['id'])
|
||||
cidr = netaddr.IPNetwork('192.168.1.0/24')
|
||||
allocation_pools = {'allocation_pools': [{'start': str(cidr[2]),
|
||||
'end': str(cidr[-2])}]}
|
||||
ingress_subnet = self.create_subnet(ingress_network, cidr=cidr,
|
||||
mask_bits=cidr.prefixlen,
|
||||
**allocation_pools)
|
||||
self.addCleanup(self.subnets_client.delete_subnet,
|
||||
ingress_subnet['id'])
|
||||
egress_network = self.create_network()
|
||||
self.addCleanup(self.networks_client.delete_network,
|
||||
egress_network['id'])
|
||||
cidr = netaddr.IPNetwork('192.168.2.0/24')
|
||||
allocation_pools = {'allocation_pools': [{'start': str(cidr[2]),
|
||||
'end': str(cidr[-2])}]}
|
||||
egress_subnet = self.create_subnet(egress_network, cidr=cidr,
|
||||
mask_bits=cidr.prefixlen,
|
||||
**allocation_pools)
|
||||
self.addCleanup(self.subnets_client.delete_subnet,
|
||||
egress_subnet['id'])
|
||||
router = self.admin_routers_client.create_router(
|
||||
name=data_utils.rand_name('router-'))['router']
|
||||
self.addCleanup(self.admin_routers_client.delete_router, router['id'])
|
||||
port_kwargs = {"binding:host_id": self.host_id}
|
||||
ingress = self._create_port(
|
||||
network=ingress_network, **port_kwargs)
|
||||
self.addCleanup(self._try_delete_port, ingress['id'])
|
||||
self.admin_routers_client.add_router_interface(
|
||||
router['id'], port_id=ingress['id'])
|
||||
self.addCleanup(self.admin_routers_client.remove_router_interface,
|
||||
router['id'],
|
||||
port_id=ingress['id'])
|
||||
egress = self._create_port(
|
||||
network=egress_network, **port_kwargs)
|
||||
self.addCleanup(self._try_delete_port, egress['id'])
|
||||
self.admin_routers_client.add_router_interface(
|
||||
router['id'], port_id=egress['id'])
|
||||
self.addCleanup(self.admin_routers_client.remove_router_interface,
|
||||
router['id'],
|
||||
port_id=egress['id'])
|
||||
pp = self._try_create_port_pair(
|
||||
ingress=ingress['id'],
|
||||
egress=egress['id'])
|
||||
pps = self.portpair_client.list_port_pairs()
|
||||
self.assertIn((
|
||||
pp['id'],
|
||||
pp['name'],
|
||||
pp['ingress'],
|
||||
pp['egress']
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['ingress'],
|
||||
m['egress'],
|
||||
) for m in pps['port_pairs']])
|
||||
|
||||
@decorators.idempotent_id('264cc4b8-aa17-4cea-88bf-26400e9751d9')
|
||||
def test_list_port_pair(self):
|
||||
# List port pairs
|
||||
pp = self._try_create_port_pair()
|
||||
pps = self.portpair_client.list_port_pairs()
|
||||
self.assertIn((
|
||||
pp['id'],
|
||||
pp['name'],
|
||||
pp['ingress'],
|
||||
pp['egress']
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['ingress'],
|
||||
m['egress'],
|
||||
) for m in pps['port_pairs']])
|
||||
|
||||
@decorators.idempotent_id('83018ad7-3666-4396-bf3a-288a2b6a0e7c')
|
||||
def test_show_port_pair(self):
|
||||
# show a created port pair
|
||||
created = self._try_create_port_pair()
|
||||
pp = self.portpair_client.show_port_pair(
|
||||
created['id'])
|
||||
for key, value in pp['port_pair'].items():
|
||||
self.assertEqual(created[key], value)
|
||||
|
||||
@decorators.idempotent_id('69d21fa4-bdd5-4142-b1cc-6578037f605a')
|
||||
def test_update_port_pair(self):
|
||||
# Create port pair
|
||||
name1 = data_utils.rand_name('test')
|
||||
pp = self._try_create_port_pair(
|
||||
name=name1
|
||||
)
|
||||
pp_id = pp['id']
|
||||
|
||||
# Update port pair
|
||||
name2 = data_utils.rand_name('test')
|
||||
body = self.portpair_client.update_port_pair(
|
||||
pp_id, name=name2)
|
||||
self.assertEqual(body['port_pair']['name'], name2)
|
||||
|
||||
@decorators.idempotent_id('4fff9a4a-a98a-42bd-b3f4-483b93e6f297')
|
||||
def test_create_port_pair_group_empty_port_pairs(self):
|
||||
pg = self._try_create_port_pair_group(
|
||||
port_pairs=[])
|
||||
pgs = self.portpairgroup_client.list_port_pair_groups()
|
||||
self.assertIn((
|
||||
pg['id'],
|
||||
pg['name'],
|
||||
set(pg['port_pairs']),
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
set(m['port_pairs'])
|
||||
) for m in pgs['port_pair_groups']])
|
||||
|
||||
@decorators.idempotent_id('1a1c98a0-ff54-4647-a798-011e902825fa')
|
||||
def test_create_port_pair_group_multi_port_pairs(self):
|
||||
pp1 = self._try_create_port_pair()
|
||||
pp2 = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(
|
||||
port_pairs=[pp1['id'], pp2['id']])
|
||||
pgs = self.portpairgroup_client.list_port_pair_groups()
|
||||
self.assertIn((
|
||||
pg['id'],
|
||||
pg['name'],
|
||||
set(pg['port_pairs']),
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
set(m['port_pairs'])
|
||||
) for m in pgs['port_pair_groups']])
|
||||
|
||||
@decorators.idempotent_id('e7d432c4-a7b4-444b-88cc-f420c5c1c29e')
|
||||
def test_list_port_pair_group(self):
|
||||
# List port pair groups
|
||||
pp = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
pgs = self.portpairgroup_client.list_port_pair_groups()
|
||||
self.assertIn((
|
||||
pg['id'],
|
||||
pg['name'],
|
||||
pg['port_pairs'],
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['port_pairs']
|
||||
) for m in pgs['port_pair_groups']])
|
||||
|
||||
@decorators.idempotent_id('f12faa84-8dcb-4fbb-b03a-9ab05040a350')
|
||||
def test_show_port_pair_group(self):
|
||||
# show a created port pair group
|
||||
pp = self._try_create_port_pair()
|
||||
created = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
pg = self.portpairgroup_client.show_port_pair_group(
|
||||
created['id'])
|
||||
for key, value in pg['port_pair_group'].items():
|
||||
self.assertEqual(created[key], value)
|
||||
|
||||
@decorators.idempotent_id('8991c2ef-71ba-4033-9037-5c8bf52a0c88')
|
||||
def test_update_port_pair_group(self):
|
||||
# Create port pair group
|
||||
pp = self._try_create_port_pair()
|
||||
name1 = data_utils.rand_name('test')
|
||||
pg = self._try_create_port_pair_group(
|
||||
name=name1, port_pairs=[pp['id']]
|
||||
)
|
||||
pg_id = pg['id']
|
||||
|
||||
# Update port pair group
|
||||
name2 = data_utils.rand_name('test')
|
||||
body = self.portpairgroup_client.update_port_pair_group(
|
||||
pg_id, name=name2)
|
||||
self.assertEqual(body['port_pair_group']['name'], name2)
|
||||
|
||||
@decorators.idempotent_id('d93d7ec3-f12e-4fad-b82b-759d358ff044')
|
||||
def test_create_port_chain_empty_flow_classifiers(self):
|
||||
# Create port chains
|
||||
pp = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
pc = self._try_create_port_chain(
|
||||
port_pair_groups=[pg['id']],
|
||||
flow_classifiers=[])
|
||||
pcs = self.portchain_client.list_port_chains()
|
||||
self.assertIn((
|
||||
pc['id'],
|
||||
pc['name'],
|
||||
pc['port_pair_groups'],
|
||||
pc['flow_classifiers']
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['port_pair_groups'],
|
||||
m['flow_classifiers']
|
||||
) for m in pcs['port_chains']])
|
||||
|
||||
@decorators.idempotent_id('0c5ac396-6027-4bd1-af21-79fda6df9b77')
|
||||
def test_create_port_chain_multi_flowclassifiers(self):
|
||||
# Create port chains
|
||||
pp = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
fc1 = self._try_create_flowclassifier()
|
||||
fc2 = self._try_create_flowclassifier()
|
||||
pc = self._try_create_port_chain(
|
||||
port_pair_groups=[pg['id']],
|
||||
flow_classifiers=[fc1['id'], fc2['id']])
|
||||
pcs = self.portchain_client.list_port_chains()
|
||||
self.assertIn((
|
||||
pc['id'],
|
||||
pc['name'],
|
||||
set(pc['flow_classifiers'])
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
set(m['flow_classifiers'])
|
||||
) for m in pcs['port_chains']])
|
||||
|
||||
@decorators.idempotent_id('81f0faba-49ae-435a-8454-566c1e0a929e')
|
||||
def test_create_port_chain_flowclassifiers_symmetric(self):
|
||||
# Create symmetric port chain
|
||||
router = self.admin_routers_client.create_router(
|
||||
name=data_utils.rand_name('router-'))['router']
|
||||
self.addCleanup(
|
||||
self.admin_routers_client.delete_router, router['id'])
|
||||
port_kwargs = {"binding:host_id": self.host_id}
|
||||
dst_port = self._create_port(
|
||||
network=self.network, **port_kwargs)
|
||||
self.addCleanup(self._try_delete_port, dst_port['id'])
|
||||
self.admin_routers_client.add_router_interface(
|
||||
router['id'], port_id=dst_port['id'])
|
||||
self.addCleanup(self.admin_routers_client.remove_router_interface,
|
||||
router['id'],
|
||||
port_id=dst_port['id'])
|
||||
pp = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
fc = self._try_create_flowclassifier(
|
||||
logical_destination_port=dst_port['id'])
|
||||
pc = self._try_create_port_chain(
|
||||
port_pair_groups=[pg['id']],
|
||||
flow_classifiers=[fc['id']],
|
||||
chain_parameters={'symmetric': True})
|
||||
pcs = self.portchain_client.list_port_chains()
|
||||
self.assertIn((
|
||||
pc['id'],
|
||||
pc['name'],
|
||||
pc['chain_parameters'],
|
||||
set(pc['flow_classifiers'])
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['chain_parameters'],
|
||||
set(m['flow_classifiers'])
|
||||
) for m in pcs['port_chains']])
|
||||
|
||||
@decorators.idempotent_id('3f82c78f-e119-449f-bf6c-a964db45be3a')
|
||||
def test_create_port_chain_multi_port_pair_groups(self):
|
||||
# Create port chain
|
||||
pp1 = self._try_create_port_pair()
|
||||
pg1 = self._try_create_port_pair_group(port_pairs=[pp1['id']])
|
||||
pp2 = self._try_create_port_pair()
|
||||
pg2 = self._try_create_port_pair_group(port_pairs=[pp2['id']])
|
||||
fc = self._try_create_flowclassifier()
|
||||
pc = self._try_create_port_chain(
|
||||
port_pair_groups=[pg1['id'], pg2['id']],
|
||||
flow_classifiers=[fc['id']])
|
||||
pcs = self.portchain_client.list_port_chains()
|
||||
self.assertIn((
|
||||
pc['id'],
|
||||
pc['name'],
|
||||
pc['port_pair_groups'],
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['port_pair_groups']
|
||||
) for m in pcs['port_chains']])
|
||||
|
||||
@decorators.idempotent_id('144629ec-7538-4595-93ea-89e28ba50724')
|
||||
def test_create_port_chain_port_pair_group_symmetric(self):
|
||||
# Create symmetric port chain with port_pair_group
|
||||
router = self.admin_routers_client.create_router(
|
||||
name=data_utils.rand_name('router-'))['router']
|
||||
self.addCleanup(
|
||||
self.admin_routers_client.delete_router, router['id'])
|
||||
port_kwargs = {"binding:host_id": self.host_id}
|
||||
dst_port = self._create_port(
|
||||
network=self.network, **port_kwargs)
|
||||
self.addCleanup(self._try_delete_port, dst_port['id'])
|
||||
self.admin_routers_client.add_router_interface(
|
||||
router['id'], port_id=dst_port['id'])
|
||||
self.addCleanup(self.admin_routers_client.remove_router_interface,
|
||||
router['id'],
|
||||
port_id=dst_port['id'])
|
||||
pp = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
fc = self._try_create_flowclassifier(
|
||||
logical_destination_port=dst_port['id'])
|
||||
pc = self._try_create_port_chain(
|
||||
port_pair_groups=[pg['id']],
|
||||
flow_classifiers=[fc['id']],
|
||||
chain_parameters={'symmetric': True})
|
||||
pcs = self.portchain_client.list_port_chains()
|
||||
self.assertIn((
|
||||
pc['id'],
|
||||
pc['name'],
|
||||
pc['port_pair_groups'],
|
||||
pc['chain_parameters']
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['port_pair_groups'],
|
||||
m['chain_parameters']
|
||||
) for m in pcs['port_chains']])
|
||||
|
||||
@decorators.idempotent_id('83cfceba-f9d9-41e2-b27f-f919d8ff83a9')
|
||||
def test_list_port_chain(self):
|
||||
# List port chains
|
||||
pp = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
fc = self._try_create_flowclassifier()
|
||||
pc = self._try_create_port_chain(
|
||||
port_pair_groups=[pg['id']],
|
||||
flow_classifiers=[fc['id']])
|
||||
pcs = self.portchain_client.list_port_chains()
|
||||
self.assertIn((
|
||||
pc['id'],
|
||||
pc['name'],
|
||||
pc['port_pair_groups'],
|
||||
set(pc['flow_classifiers'])
|
||||
), [(
|
||||
m['id'],
|
||||
m['name'],
|
||||
m['port_pair_groups'],
|
||||
set(m['flow_classifiers'])
|
||||
) for m in pcs['port_chains']])
|
||||
|
||||
@decorators.idempotent_id('0433ca11-dbc9-448d-8433-0df252e3d0cd')
|
||||
def test_show_port_chain(self):
|
||||
# show a created port chain
|
||||
pp = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
fc = self._try_create_flowclassifier()
|
||||
created = self._try_create_port_chain(
|
||||
port_pair_groups=[pg['id']],
|
||||
flow_classifiers=[fc['id']])
|
||||
pc = self.portchain_client.show_port_chain(
|
||||
created['id'])
|
||||
for key, value in pc['port_chain'].items():
|
||||
self.assertEqual(created[key], value)
|
||||
|
||||
@decorators.idempotent_id('4ad641d3-823f-4b25-9438-68970593253d')
|
||||
def test_update_port_chain(self):
|
||||
# Create port chain
|
||||
pp = self._try_create_port_pair()
|
||||
pg = self._try_create_port_pair_group(port_pairs=[pp['id']])
|
||||
fc = self._try_create_flowclassifier()
|
||||
name1 = data_utils.rand_name('test')
|
||||
pc = self._try_create_port_chain(
|
||||
name=name1, port_pair_groups=[pg['id']],
|
||||
flow_classifiers=[fc['id']]
|
||||
)
|
||||
pc_id = pc['id']
|
||||
|
||||
# Update port chain
|
||||
name2 = data_utils.rand_name('test')
|
||||
body = self.portchain_client.update_port_chain(
|
||||
pc_id, name=name2)
|
||||
self.assertEqual(body['port_chain']['name'], name2)
|
45
neutron_tempest_plugin/sfc/tests/flowclassifier_client.py
Normal file
45
neutron_tempest_plugin/sfc/tests/flowclassifier_client.py
Normal file
@ -0,0 +1,45 @@
|
||||
# Copyright 2016 Futurewei. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
|
||||
from neutron_tempest_plugin.sfc.services import flowclassifier_client
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class FlowClassifierClientMixin(object):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(FlowClassifierClientMixin, cls).resource_setup()
|
||||
manager = cls.os_admin
|
||||
cls.flowclassifier_client = (
|
||||
flowclassifier_client.FlowClassifierClient(
|
||||
manager.auth_provider,
|
||||
CONF.network.catalog_type,
|
||||
CONF.network.region or CONF.identity.region,
|
||||
endpoint_type=CONF.network.endpoint_type,
|
||||
build_interval=CONF.network.build_interval,
|
||||
build_timeout=CONF.network.build_timeout,
|
||||
**manager.default_params
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_flowclassifier(cls, **kwargs):
|
||||
body = cls.flowclassifier_client.create_flowclassifier(
|
||||
**kwargs)
|
||||
fc = body['flow_classifier']
|
||||
return fc
|
66
neutron_tempest_plugin/sfc/tests/scenario/base.py
Normal file
66
neutron_tempest_plugin/sfc/tests/scenario/base.py
Normal file
@ -0,0 +1,66 @@
|
||||
# Copyright 2016 Futurewei. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
|
||||
from neutron_tempest_plugin.sfc.tests import flowclassifier_client
|
||||
from neutron_tempest_plugin.sfc.tests.scenario import manager
|
||||
from neutron_tempest_plugin.sfc.tests import sfc_client
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SfcScenarioTest(
|
||||
flowclassifier_client.FlowClassifierClientMixin,
|
||||
sfc_client.SfcClientMixin,
|
||||
manager.NetworkScenarioTest
|
||||
):
|
||||
def _check_connectivity(
|
||||
self, source_ip, destination_ip, routes=None,
|
||||
username=None, private_key=None
|
||||
):
|
||||
msg = "ip address %r is reachable" % source_ip
|
||||
ok = self.ping_ip_address(source_ip, should_succeed=True)
|
||||
self.assertTrue(ok, msg=msg)
|
||||
client = self.get_remote_client(
|
||||
source_ip, username=username, private_key=private_key)
|
||||
cmd = 'traceroute -n -I %s' % destination_ip
|
||||
LOG.debug('exec command on %s: %s', source_ip, cmd)
|
||||
try:
|
||||
result = client.exec_command(cmd)
|
||||
LOG.debug(
|
||||
'traceroute from %s to %s:\n%s',
|
||||
source_ip, destination_ip, result)
|
||||
lines = result.split('\n')
|
||||
lines = [line for line in lines if line]
|
||||
lines = lines[1:-1]
|
||||
if len(lines) != len(routes):
|
||||
LOG.error('length mismatch:\n%s\nvs\n%s', lines, routes)
|
||||
self.assertEqual(len(lines), len(routes))
|
||||
for line, route_list in zip(lines, routes):
|
||||
found = any([route in line for route in route_list])
|
||||
if not found:
|
||||
LOG.error('did not found any route %s in %s',
|
||||
route_list, line)
|
||||
self.assertTrue(found)
|
||||
except lib_exc.SSHExecCommandFailed as e:
|
||||
LOG.exception(e)
|
||||
raise
|
||||
except lib_exc.SSHTimeout as e:
|
||||
LOG.exception(e)
|
||||
raise
|
875
neutron_tempest_plugin/sfc/tests/scenario/manager.py
Normal file
875
neutron_tempest_plugin/sfc/tests/scenario/manager.py
Normal file
@ -0,0 +1,875 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# Copyright 2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import subprocess
|
||||
|
||||
import netaddr
|
||||
from oslo_log import log
|
||||
from oslo_utils import netutils
|
||||
|
||||
from tempest.common import compute
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest.common.utils import net_utils
|
||||
from tempest.common import waiters
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
import tempest.test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class ScenarioTest(tempest.test.BaseTestCase):
|
||||
"""Base class for scenario tests. Uses tempest own clients. """
|
||||
|
||||
credentials = ['primary']
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(ScenarioTest, cls).setup_clients()
|
||||
# Clients (in alphabetical order)
|
||||
cls.keypairs_client = cls.os_primary.keypairs_client
|
||||
cls.servers_client = cls.os_primary.servers_client
|
||||
# Neutron network client
|
||||
cls.networks_client = cls.os_primary.networks_client
|
||||
cls.ports_client = cls.os_primary.ports_client
|
||||
cls.routers_client = cls.os_primary.routers_client
|
||||
cls.subnets_client = cls.os_primary.subnets_client
|
||||
cls.floating_ips_client = cls.os_primary.floating_ips_client
|
||||
cls.security_groups_client = cls.os_primary.security_groups_client
|
||||
cls.security_group_rules_client = (
|
||||
cls.os_primary.security_group_rules_client)
|
||||
|
||||
# ## Test functions library
|
||||
#
|
||||
# The create_[resource] functions only return body and discard the
|
||||
# resp part which is not used in scenario tests
|
||||
|
||||
def _create_port(self, network_id, client=None, namestart='port-quotatest',
|
||||
**kwargs):
|
||||
if not client:
|
||||
client = self.ports_client
|
||||
name = data_utils.rand_name(namestart)
|
||||
result = client.create_port(
|
||||
name=name,
|
||||
network_id=network_id,
|
||||
**kwargs)
|
||||
self.assertIsNotNone(result, 'Unable to allocate port')
|
||||
port = result['port']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
client.delete_port, port['id'])
|
||||
return port
|
||||
|
||||
def create_keypair(self, client=None):
|
||||
if not client:
|
||||
client = self.keypairs_client
|
||||
name = data_utils.rand_name(self.__class__.__name__)
|
||||
# We don't need to create a keypair by pubkey in scenario
|
||||
body = client.create_keypair(name=name)
|
||||
self.addCleanup(client.delete_keypair, name)
|
||||
return body['keypair']
|
||||
|
||||
def create_server(self, name=None, image_id=None, flavor=None,
|
||||
validatable=False, wait_until='ACTIVE',
|
||||
clients=None, **kwargs):
|
||||
"""Wrapper utility that returns a test server.
|
||||
|
||||
This wrapper utility calls the common create test server and
|
||||
returns a test server. The purpose of this wrapper is to minimize
|
||||
the impact on the code of the tests already using this
|
||||
function.
|
||||
"""
|
||||
|
||||
# NOTE(jlanoux): As a first step, ssh checks in the scenario
|
||||
# tests need to be run regardless of the run_validation and
|
||||
# validatable parameters and thus until the ssh validation job
|
||||
# becomes voting in CI. The test resources management and IP
|
||||
# association are taken care of in the scenario tests.
|
||||
# Therefore, the validatable parameter is set to false in all
|
||||
# those tests. In this way create_server just return a standard
|
||||
# server and the scenario tests always perform ssh checks.
|
||||
|
||||
# Needed for the cross_tenant_traffic test:
|
||||
if clients is None:
|
||||
clients = self.os_primary
|
||||
|
||||
if name is None:
|
||||
name = data_utils.rand_name(self.__class__.__name__ + "-server")
|
||||
|
||||
vnic_type = CONF.network.port_vnic_type
|
||||
|
||||
# If vnic_type is configured create port for
|
||||
# every network
|
||||
if vnic_type:
|
||||
ports = []
|
||||
|
||||
create_port_body = {'binding:vnic_type': vnic_type,
|
||||
'namestart': 'port-smoke'}
|
||||
if kwargs:
|
||||
# Convert security group names to security group ids
|
||||
# to pass to create_port
|
||||
if 'security_groups' in kwargs:
|
||||
security_groups = \
|
||||
clients.security_groups_client.list_security_groups(
|
||||
).get('security_groups')
|
||||
sec_dict = dict([(s['name'], s['id'])
|
||||
for s in security_groups])
|
||||
|
||||
sec_groups_names = [s['name'] for s in kwargs.pop(
|
||||
'security_groups')]
|
||||
security_groups_ids = [sec_dict[s]
|
||||
for s in sec_groups_names]
|
||||
|
||||
if security_groups_ids:
|
||||
create_port_body[
|
||||
'security_groups'] = security_groups_ids
|
||||
networks = kwargs.pop('networks', [])
|
||||
else:
|
||||
networks = []
|
||||
|
||||
# If there are no networks passed to us we look up
|
||||
# for the project's private networks and create a port.
|
||||
# The same behaviour as we would expect when passing
|
||||
# the call to the clients with no networks
|
||||
if not networks:
|
||||
networks = clients.networks_client.list_networks(
|
||||
**{'router:external': False, 'fields': 'id'})['networks']
|
||||
|
||||
# It's net['uuid'] if networks come from kwargs
|
||||
# and net['id'] if they come from
|
||||
# clients.networks_client.list_networks
|
||||
for net in networks:
|
||||
net_id = net.get('uuid', net.get('id'))
|
||||
if 'port' not in net:
|
||||
port = self._create_port(network_id=net_id,
|
||||
client=clients.ports_client,
|
||||
**create_port_body)
|
||||
ports.append({'port': port['id']})
|
||||
else:
|
||||
ports.append({'port': net['port']})
|
||||
if ports:
|
||||
kwargs['networks'] = ports
|
||||
self.ports = ports
|
||||
|
||||
tenant_network = self.get_tenant_network()
|
||||
|
||||
body, _ = compute.create_test_server(
|
||||
clients,
|
||||
tenant_network=tenant_network,
|
||||
wait_until=wait_until,
|
||||
name=name, flavor=flavor,
|
||||
image_id=image_id, **kwargs)
|
||||
|
||||
self.addCleanup(waiters.wait_for_server_termination,
|
||||
clients.servers_client, body['id'])
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
clients.servers_client.delete_server, body['id'])
|
||||
server = clients.servers_client.show_server(body['id'])['server']
|
||||
return server
|
||||
|
||||
def get_remote_client(self, ip_address, username=None, private_key=None,
|
||||
server=None):
|
||||
"""Get a SSH client to a remote server
|
||||
|
||||
@param ip_address the server floating or fixed IP address to use
|
||||
for ssh validation
|
||||
@param username name of the Linux account on the remote server
|
||||
@param private_key the SSH private key to use
|
||||
@param server: server dict, used for debugging purposes
|
||||
@return a RemoteClient object
|
||||
"""
|
||||
|
||||
if username is None:
|
||||
username = CONF.validation.image_ssh_user
|
||||
# Set this with 'keypair' or others to log in with keypair or
|
||||
# username/password.
|
||||
if CONF.validation.auth_method == 'keypair':
|
||||
password = None
|
||||
if private_key is None:
|
||||
private_key = self.keypair['private_key']
|
||||
else:
|
||||
password = CONF.validation.image_ssh_password
|
||||
private_key = None
|
||||
linux_client = remote_client.RemoteClient(
|
||||
ip_address, username, pkey=private_key, password=password,
|
||||
server=server, servers_client=self.servers_client)
|
||||
linux_client.validate_authentication()
|
||||
return linux_client
|
||||
|
||||
def _log_console_output(self, servers=None, client=None):
|
||||
if not CONF.compute_feature_enabled.console_output:
|
||||
LOG.debug('Console output not supported, cannot log')
|
||||
return
|
||||
client = client or self.servers_client
|
||||
if not servers:
|
||||
servers = client.list_servers()
|
||||
servers = servers['servers']
|
||||
for server in servers:
|
||||
try:
|
||||
console_output = client.get_console_output(
|
||||
server['id'])['output']
|
||||
LOG.debug('Console output for %s\nbody=\n%s',
|
||||
server['id'], console_output)
|
||||
except lib_exc.NotFound:
|
||||
LOG.debug("Server %s disappeared(deleted) while looking "
|
||||
"for the console log", server['id'])
|
||||
|
||||
def _log_net_info(self, exc):
|
||||
# network debug is called as part of ssh init
|
||||
if not isinstance(exc, lib_exc.SSHTimeout):
|
||||
LOG.debug('Network information on a devstack host')
|
||||
|
||||
def ping_ip_address(self, ip_address, should_succeed=True,
|
||||
ping_timeout=None, mtu=None):
|
||||
timeout = ping_timeout or CONF.validation.ping_timeout
|
||||
cmd = ['ping', '-c1', '-w1']
|
||||
|
||||
if mtu:
|
||||
cmd += [
|
||||
# don't fragment
|
||||
'-M', 'do',
|
||||
# ping receives just the size of ICMP payload
|
||||
'-s', str(net_utils.get_ping_payload_size(mtu, 4))
|
||||
]
|
||||
cmd.append(ip_address)
|
||||
|
||||
def ping():
|
||||
proc = subprocess.Popen(cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
proc.communicate()
|
||||
|
||||
return (proc.returncode == 0) == should_succeed
|
||||
|
||||
caller = test_utils.find_test_caller()
|
||||
LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
|
||||
' expected result is %(should_succeed)s', {
|
||||
'caller': caller, 'ip': ip_address, 'timeout': timeout,
|
||||
'should_succeed':
|
||||
'reachable' if should_succeed else 'unreachable'
|
||||
})
|
||||
result = test_utils.call_until_true(ping, timeout, 1)
|
||||
LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
|
||||
'ping result is %(result)s', {
|
||||
'caller': caller, 'ip': ip_address, 'timeout': timeout,
|
||||
'result': 'expected' if result else 'unexpected'
|
||||
})
|
||||
return result
|
||||
|
||||
def check_vm_connectivity(self, ip_address,
|
||||
username=None,
|
||||
private_key=None,
|
||||
should_connect=True,
|
||||
mtu=None):
|
||||
"""Check server connectivity
|
||||
|
||||
:param ip_address: server to test against
|
||||
:param username: server's ssh username
|
||||
:param private_key: server's ssh private key to be used
|
||||
:param should_connect: True/False indicates positive/negative test
|
||||
positive - attempt ping and ssh
|
||||
negative - attempt ping and fail if succeed
|
||||
:param mtu: network MTU to use for connectivity validation
|
||||
|
||||
:raises: AssertError if the result of the connectivity check does
|
||||
not match the value of the should_connect param
|
||||
"""
|
||||
if should_connect:
|
||||
msg = "Timed out waiting for %s to become reachable" % ip_address
|
||||
else:
|
||||
msg = "ip address %s is reachable" % ip_address
|
||||
self.assertTrue(self.ping_ip_address(ip_address,
|
||||
should_succeed=should_connect,
|
||||
mtu=mtu),
|
||||
msg=msg)
|
||||
if should_connect:
|
||||
# no need to check ssh for negative connectivity
|
||||
self.get_remote_client(ip_address, username, private_key)
|
||||
|
||||
def check_public_network_connectivity(self, ip_address, username,
|
||||
private_key, should_connect=True,
|
||||
msg=None, servers=None, mtu=None):
|
||||
# The target login is assumed to have been configured for
|
||||
# key-based authentication by cloud-init.
|
||||
LOG.debug('checking network connections to IP %s with user: %s',
|
||||
ip_address, username)
|
||||
try:
|
||||
self.check_vm_connectivity(ip_address,
|
||||
username,
|
||||
private_key,
|
||||
should_connect=should_connect,
|
||||
mtu=mtu)
|
||||
except Exception:
|
||||
ex_msg = 'Public network connectivity check failed'
|
||||
if msg:
|
||||
ex_msg += ": " + msg
|
||||
LOG.exception(ex_msg)
|
||||
self._log_console_output(servers)
|
||||
raise
|
||||
|
||||
|
||||
class NetworkScenarioTest(ScenarioTest):
|
||||
"""Base class for network scenario tests.
|
||||
|
||||
This class provide helpers for network scenario tests, using the neutron
|
||||
API. Helpers from ancestor which use the nova network API are overridden
|
||||
with the neutron API.
|
||||
|
||||
This Class also enforces using Neutron instead of novanetwork.
|
||||
Subclassed tests will be skipped if Neutron is not enabled
|
||||
|
||||
"""
|
||||
|
||||