Initial copy of api tests from tempest
This change is the result of running tools/copy_api_tests_from_tempest.sh. Change-Id: Ica02dbe1ed26f1bc9526ea9682756ebc5877cf4a
This commit is contained in:
parent
deb4631fa6
commit
b096d9f85d
neutron/tests/tempest
__init__.py
api
__init__.py
auth.pynetwork
__init__.py
admin
__init__.pytest_agent_management.pytest_dhcp_agent_scheduler.pytest_external_network_extension.pytest_external_networks_negative.pytest_floating_ips_admin_actions.pytest_l3_agent_scheduler.pytest_lbaas_agent_scheduler.pytest_load_balancer_admin_actions.pytest_quotas.pytest_routers_dvr.py
base.pybase_routers.pybase_security_groups.pytest_allowed_address_pair.pytest_dhcp_ipv6.pytest_extensions.pytest_extra_dhcp_options.pytest_floating_ips.pytest_floating_ips_negative.pytest_fwaas_extensions.pytest_load_balancer.pytest_metering_extensions.pytest_networks.pytest_networks_negative.pytest_ports.pytest_routers.pytest_routers_negative.pytest_security_groups.pytest_security_groups_negative.pytest_service_type_management.pytest_vpnaas_extensions.pycommon
__init__.pyaccounts.pycommands.pycred_provider.pycredentials.pycustom_matchers.py
config.pyexceptions.pymanager.pygenerator
glance_http.pyisolated_creds.pynegative_rest_client.pyservice_client.pyssh.pytempest_fixtures.pyutils
waiters.pyservices
test.py
0
neutron/tests/tempest/__init__.py
Normal file
0
neutron/tests/tempest/__init__.py
Normal file
0
neutron/tests/tempest/api/__init__.py
Normal file
0
neutron/tests/tempest/api/__init__.py
Normal file
0
neutron/tests/tempest/api/network/__init__.py
Normal file
0
neutron/tests/tempest/api/network/__init__.py
Normal file
0
neutron/tests/tempest/api/network/admin/__init__.py
Normal file
0
neutron/tests/tempest/api/network/admin/__init__.py
Normal file
@ -0,0 +1,89 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common import tempest_fixtures as fixtures
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class AgentManagementTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(AgentManagementTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('agent', 'network'):
|
||||
msg = "agent extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
body = cls.admin_client.list_agents()
|
||||
agents = body['agents']
|
||||
cls.agent = agents[0]
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('9c80f04d-11f3-44a4-8738-ed2f879b0ff4')
|
||||
def test_list_agent(self):
|
||||
body = self.admin_client.list_agents()
|
||||
agents = body['agents']
|
||||
# Hearthbeats must be excluded from comparison
|
||||
self.agent.pop('heartbeat_timestamp', None)
|
||||
self.agent.pop('configurations', None)
|
||||
for agent in agents:
|
||||
agent.pop('heartbeat_timestamp', None)
|
||||
agent.pop('configurations', None)
|
||||
self.assertIn(self.agent, agents)
|
||||
|
||||
@test.attr(type=['smoke'])
|
||||
@test.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
|
||||
def test_list_agents_non_admin(self):
|
||||
body = self.client.list_agents()
|
||||
self.assertEqual(len(body["agents"]), 0)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f')
|
||||
def test_show_agent(self):
|
||||
body = self.admin_client.show_agent(self.agent['id'])
|
||||
agent = body['agent']
|
||||
self.assertEqual(agent['id'], self.agent['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('371dfc5b-55b9-4cb5-ac82-c40eadaac941')
|
||||
def test_update_agent_status(self):
|
||||
origin_status = self.agent['admin_state_up']
|
||||
# Try to update the 'admin_state_up' to the original
|
||||
# one to avoid the negative effect.
|
||||
agent_status = {'admin_state_up': origin_status}
|
||||
body = self.admin_client.update_agent(agent_id=self.agent['id'],
|
||||
agent_info=agent_status)
|
||||
updated_status = body['agent']['admin_state_up']
|
||||
self.assertEqual(origin_status, updated_status)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('68a94a14-1243-46e6-83bf-157627e31556')
|
||||
def test_update_agent_description(self):
|
||||
self.useFixture(fixtures.LockFixture('agent_description'))
|
||||
description = 'description for update agent.'
|
||||
agent_description = {'description': description}
|
||||
body = self.admin_client.update_agent(agent_id=self.agent['id'],
|
||||
agent_info=agent_description)
|
||||
self.addCleanup(self._restore_agent)
|
||||
updated_description = body['agent']['description']
|
||||
self.assertEqual(updated_description, description)
|
||||
|
||||
def _restore_agent(self):
|
||||
"""
|
||||
Restore the agent description after update test.
|
||||
"""
|
||||
description = self.agent['description'] or ''
|
||||
origin_agent = {'description': description}
|
||||
self.admin_client.update_agent(agent_id=self.agent['id'],
|
||||
agent_info=origin_agent)
|
@ -0,0 +1,97 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class DHCPAgentSchedulersTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(DHCPAgentSchedulersTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('dhcp_agent_scheduler', 'network'):
|
||||
msg = "dhcp_agent_scheduler extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
# Create a network and make sure it will be hosted by a
|
||||
# dhcp agent: this is done by creating a regular port
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.cidr = cls.subnet['cidr']
|
||||
cls.port = cls.create_port(cls.network)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d')
|
||||
def test_list_dhcp_agent_hosting_network(self):
|
||||
self.admin_client.list_dhcp_agent_hosting_network(
|
||||
self.network['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('30c48f98-e45d-4ffb-841c-b8aad57c7587')
|
||||
def test_list_networks_hosted_by_one_dhcp(self):
|
||||
body = self.admin_client.list_dhcp_agent_hosting_network(
|
||||
self.network['id'])
|
||||
agents = body['agents']
|
||||
self.assertIsNotNone(agents)
|
||||
agent = agents[0]
|
||||
self.assertTrue(self._check_network_in_dhcp_agent(
|
||||
self.network['id'], agent))
|
||||
|
||||
def _check_network_in_dhcp_agent(self, network_id, agent):
|
||||
network_ids = []
|
||||
body = self.admin_client.list_networks_hosted_by_one_dhcp_agent(
|
||||
agent['id'])
|
||||
networks = body['networks']
|
||||
for network in networks:
|
||||
network_ids.append(network['id'])
|
||||
return network_id in network_ids
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('a0856713-6549-470c-a656-e97c8df9a14d')
|
||||
def test_add_remove_network_from_dhcp_agent(self):
|
||||
# The agent is now bound to the network, we can free the port
|
||||
self.client.delete_port(self.port['id'])
|
||||
self.ports.remove(self.port)
|
||||
agent = dict()
|
||||
agent['agent_type'] = None
|
||||
body = self.admin_client.list_agents()
|
||||
agents = body['agents']
|
||||
for a in agents:
|
||||
if a['agent_type'] == 'DHCP agent':
|
||||
agent = a
|
||||
break
|
||||
self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '
|
||||
'DHCP agent in agent list though dhcp_agent_scheduler'
|
||||
' is enabled.')
|
||||
network = self.create_network()
|
||||
network_id = network['id']
|
||||
if self._check_network_in_dhcp_agent(network_id, agent):
|
||||
self._remove_network_from_dhcp_agent(network_id, agent)
|
||||
self._add_dhcp_agent_to_network(network_id, agent)
|
||||
else:
|
||||
self._add_dhcp_agent_to_network(network_id, agent)
|
||||
self._remove_network_from_dhcp_agent(network_id, agent)
|
||||
|
||||
def _remove_network_from_dhcp_agent(self, network_id, agent):
|
||||
self.admin_client.remove_network_from_dhcp_agent(
|
||||
agent_id=agent['id'],
|
||||
network_id=network_id)
|
||||
self.assertFalse(self._check_network_in_dhcp_agent(
|
||||
network_id, agent))
|
||||
|
||||
def _add_dhcp_agent_to_network(self, network_id, agent):
|
||||
self.admin_client.add_dhcp_agent_to_network(agent['id'],
|
||||
network_id)
|
||||
self.assertTrue(self._check_network_in_dhcp_agent(
|
||||
network_id, agent))
|
@ -0,0 +1,126 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class ExternalNetworksTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ExternalNetworksTestJSON, cls).resource_setup()
|
||||
cls.network = cls.create_network()
|
||||
|
||||
def _create_network(self, external=True):
|
||||
post_body = {'name': data_utils.rand_name('network-')}
|
||||
if external:
|
||||
post_body['router:external'] = external
|
||||
body = self.admin_client.create_network(**post_body)
|
||||
network = body['network']
|
||||
self.addCleanup(self.admin_client.delete_network, network['id'])
|
||||
return network
|
||||
|
||||
@test.idempotent_id('462be770-b310-4df9-9c42-773217e4c8b1')
|
||||
def test_create_external_network(self):
|
||||
# Create a network as an admin user specifying the
|
||||
# external network extension attribute
|
||||
ext_network = self._create_network()
|
||||
# Verifies router:external parameter
|
||||
self.assertIsNotNone(ext_network['id'])
|
||||
self.assertTrue(ext_network['router:external'])
|
||||
|
||||
@test.idempotent_id('4db5417a-e11c-474d-a361-af00ebef57c5')
|
||||
def test_update_external_network(self):
|
||||
# Update a network as an admin user specifying the
|
||||
# external network extension attribute
|
||||
network = self._create_network(external=False)
|
||||
self.assertFalse(network.get('router:external', False))
|
||||
update_body = {'router:external': True}
|
||||
body = self.admin_client.update_network(network['id'],
|
||||
**update_body)
|
||||
updated_network = body['network']
|
||||
# Verify that router:external parameter was updated
|
||||
self.assertTrue(updated_network['router:external'])
|
||||
|
||||
@test.idempotent_id('39be4c9b-a57e-4ff9-b7c7-b218e209dfcc')
|
||||
def test_list_external_networks(self):
|
||||
# Create external_net
|
||||
external_network = self._create_network()
|
||||
# List networks as a normal user and confirm the external
|
||||
# network extension attribute is returned for those networks
|
||||
# that were created as external
|
||||
body = self.client.list_networks()
|
||||
networks_list = [net['id'] for net in body['networks']]
|
||||
self.assertIn(external_network['id'], networks_list)
|
||||
self.assertIn(self.network['id'], networks_list)
|
||||
for net in body['networks']:
|
||||
if net['id'] == self.network['id']:
|
||||
self.assertFalse(net['router:external'])
|
||||
elif net['id'] == external_network['id']:
|
||||
self.assertTrue(net['router:external'])
|
||||
|
||||
@test.idempotent_id('2ac50ab2-7ebd-4e27-b3ce-a9e399faaea2')
|
||||
def test_show_external_networks_attribute(self):
|
||||
# Create external_net
|
||||
external_network = self._create_network()
|
||||
# Show an external network as a normal user and confirm the
|
||||
# external network extension attribute is returned.
|
||||
body = self.client.show_network(external_network['id'])
|
||||
show_ext_net = body['network']
|
||||
self.assertEqual(external_network['name'], show_ext_net['name'])
|
||||
self.assertEqual(external_network['id'], show_ext_net['id'])
|
||||
self.assertTrue(show_ext_net['router:external'])
|
||||
body = self.client.show_network(self.network['id'])
|
||||
show_net = body['network']
|
||||
# Verify with show that router:external is False for network
|
||||
self.assertEqual(self.network['name'], show_net['name'])
|
||||
self.assertEqual(self.network['id'], show_net['id'])
|
||||
self.assertFalse(show_net['router:external'])
|
||||
|
||||
@test.idempotent_id('82068503-2cf2-4ed4-b3be-ecb89432e4bb')
|
||||
def test_delete_external_networks_with_floating_ip(self):
|
||||
"""Verifies external network can be deleted while still holding
|
||||
(unassociated) floating IPs
|
||||
|
||||
"""
|
||||
# Set cls.client to admin to use base.create_subnet()
|
||||
client = self.admin_client
|
||||
body = client.create_network(**{'router:external': True})
|
||||
external_network = body['network']
|
||||
self.addCleanup(self._try_delete_resource,
|
||||
client.delete_network,
|
||||
external_network['id'])
|
||||
subnet = self.create_subnet(external_network, client=client,
|
||||
enable_dhcp=False)
|
||||
body = client.create_floatingip(
|
||||
floating_network_id=external_network['id'])
|
||||
created_floating_ip = body['floatingip']
|
||||
self.addCleanup(self._try_delete_resource,
|
||||
client.delete_floatingip,
|
||||
created_floating_ip['id'])
|
||||
floatingip_list = client.list_floatingips(
|
||||
network=external_network['id'])
|
||||
self.assertIn(created_floating_ip['id'],
|
||||
(f['id'] for f in floatingip_list['floatingips']))
|
||||
client.delete_network(external_network['id'])
|
||||
# Verifies floating ip is deleted
|
||||
floatingip_list = client.list_floatingips()
|
||||
self.assertNotIn(created_floating_ip['id'],
|
||||
(f['id'] for f in floatingip_list['floatingips']))
|
||||
# Verifies subnet is deleted
|
||||
subnet_list = client.list_subnets()
|
||||
self.assertNotIn(subnet['id'],
|
||||
(s['id'] for s in subnet_list))
|
||||
# Removes subnet from the cleanup list
|
||||
self.subnets.remove(subnet)
|
@ -0,0 +1,54 @@
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class ExternalNetworksAdminNegativeTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
@test.attr(type=['negative'])
|
||||
@test.idempotent_id('d402ae6c-0be0-4d8e-833b-a738895d98d0')
|
||||
def test_create_port_with_precreated_floatingip_as_fixed_ip(self):
|
||||
"""
|
||||
External networks can be used to create both floating-ip as well
|
||||
as instance-ip. So, creating an instance-ip with a value of a
|
||||
pre-created floating-ip should be denied.
|
||||
"""
|
||||
|
||||
# create a floating ip
|
||||
client = self.admin_client
|
||||
body = client.create_floatingip(
|
||||
floating_network_id=CONF.network.public_network_id)
|
||||
created_floating_ip = body['floatingip']
|
||||
self.addCleanup(self._try_delete_resource,
|
||||
client.delete_floatingip,
|
||||
created_floating_ip['id'])
|
||||
floating_ip_address = created_floating_ip['floating_ip_address']
|
||||
self.assertIsNotNone(floating_ip_address)
|
||||
|
||||
# use the same value of floatingip as fixed-ip to create_port()
|
||||
fixed_ips = [{'ip_address': floating_ip_address}]
|
||||
|
||||
# create a port which will internally create an instance-ip
|
||||
self.assertRaises(lib_exc.Conflict,
|
||||
client.create_port,
|
||||
network_id=CONF.network.public_network_id,
|
||||
fixed_ips=fixed_ips)
|
@ -0,0 +1,110 @@
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.api.contrib import clients
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class FloatingIPAdminTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
force_tenant_isolation = True
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(FloatingIPAdminTestJSON, cls).resource_setup()
|
||||
cls.ext_net_id = CONF.network.public_network_id
|
||||
cls.floating_ip = cls.create_floatingip(cls.ext_net_id)
|
||||
cls.alt_manager = clients.Manager(cls.isolated_creds.get_alt_creds())
|
||||
cls.alt_client = cls.alt_manager.network_client
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.router = cls.create_router(data_utils.rand_name('router-'),
|
||||
external_network_id=cls.ext_net_id)
|
||||
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
|
||||
cls.port = cls.create_port(cls.network)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('64f2100b-5471-4ded-b46c-ddeeeb4f231b')
|
||||
def test_list_floating_ips_from_admin_and_nonadmin(self):
|
||||
# Create floating ip from admin user
|
||||
floating_ip_admin = self.admin_client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id)
|
||||
self.addCleanup(self.admin_client.delete_floatingip,
|
||||
floating_ip_admin['floatingip']['id'])
|
||||
# Create floating ip from alt user
|
||||
body = self.alt_client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id)
|
||||
floating_ip_alt = body['floatingip']
|
||||
self.addCleanup(self.alt_client.delete_floatingip,
|
||||
floating_ip_alt['id'])
|
||||
# List floating ips from admin
|
||||
body = self.admin_client.list_floatingips()
|
||||
floating_ip_ids_admin = [f['id'] for f in body['floatingips']]
|
||||
# Check that admin sees all floating ips
|
||||
self.assertIn(self.floating_ip['id'], floating_ip_ids_admin)
|
||||
self.assertIn(floating_ip_admin['floatingip']['id'],
|
||||
floating_ip_ids_admin)
|
||||
self.assertIn(floating_ip_alt['id'], floating_ip_ids_admin)
|
||||
# List floating ips from nonadmin
|
||||
body = self.client.list_floatingips()
|
||||
floating_ip_ids = [f['id'] for f in body['floatingips']]
|
||||
# Check that nonadmin user doesn't see floating ip created from admin
|
||||
# and floating ip that is created in another tenant (alt user)
|
||||
self.assertIn(self.floating_ip['id'], floating_ip_ids)
|
||||
self.assertNotIn(floating_ip_admin['floatingip']['id'],
|
||||
floating_ip_ids)
|
||||
self.assertNotIn(floating_ip_alt['id'], floating_ip_ids)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('32727cc3-abe2-4485-a16e-48f2d54c14f2')
|
||||
def test_create_list_show_floating_ip_with_tenant_id_by_admin(self):
|
||||
# Creates a floating IP
|
||||
body = self.admin_client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id,
|
||||
tenant_id=self.network['tenant_id'],
|
||||
port_id=self.port['id'])
|
||||
created_floating_ip = body['floatingip']
|
||||
self.addCleanup(self.client.delete_floatingip,
|
||||
created_floating_ip['id'])
|
||||
self.assertIsNotNone(created_floating_ip['id'])
|
||||
self.assertIsNotNone(created_floating_ip['tenant_id'])
|
||||
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
|
||||
self.assertEqual(created_floating_ip['port_id'], self.port['id'])
|
||||
self.assertEqual(created_floating_ip['floating_network_id'],
|
||||
self.ext_net_id)
|
||||
port = self.port['fixed_ips']
|
||||
self.assertEqual(created_floating_ip['fixed_ip_address'],
|
||||
port[0]['ip_address'])
|
||||
# Verifies the details of a floating_ip
|
||||
floating_ip = self.admin_client.show_floatingip(
|
||||
created_floating_ip['id'])
|
||||
shown_floating_ip = floating_ip['floatingip']
|
||||
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
|
||||
self.assertEqual(shown_floating_ip['floating_network_id'],
|
||||
self.ext_net_id)
|
||||
self.assertEqual(shown_floating_ip['tenant_id'],
|
||||
self.network['tenant_id'])
|
||||
self.assertEqual(shown_floating_ip['floating_ip_address'],
|
||||
created_floating_ip['floating_ip_address'])
|
||||
self.assertEqual(shown_floating_ip['port_id'], self.port['id'])
|
||||
# Verify the floating ip exists in the list of all floating_ips
|
||||
floating_ips = self.admin_client.list_floatingips()
|
||||
floatingip_id_list = [f['id'] for f in floating_ips['floatingips']]
|
||||
self.assertIn(created_floating_ip['id'], floatingip_id_list)
|
@ -0,0 +1,79 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class L3AgentSchedulerTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
List routers that the given L3 agent is hosting.
|
||||
List L3 agents hosting the given router.
|
||||
Add and Remove Router to L3 agent
|
||||
|
||||
v2.0 of the Neutron API is assumed.
|
||||
|
||||
The l3_agent_scheduler extension is required for these tests.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(L3AgentSchedulerTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('l3_agent_scheduler', 'network'):
|
||||
msg = "L3 Agent Scheduler Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
# Trying to get agent details for L3 Agent
|
||||
body = cls.admin_client.list_agents()
|
||||
agents = body['agents']
|
||||
for agent in agents:
|
||||
if agent['agent_type'] == 'L3 agent':
|
||||
cls.agent = agent
|
||||
break
|
||||
else:
|
||||
msg = "L3 Agent not found"
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a')
|
||||
def test_list_routers_on_l3_agent(self):
|
||||
self.admin_client.list_routers_on_l3_agent(self.agent['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('9464e5e7-8625-49c3-8fd1-89c52be59d66')
|
||||
def test_add_list_remove_router_on_l3_agent(self):
|
||||
l3_agent_ids = list()
|
||||
name = data_utils.rand_name('router1-')
|
||||
router = self.client.create_router(name)
|
||||
self.addCleanup(self.client.delete_router, router['router']['id'])
|
||||
self.admin_client.add_router_to_l3_agent(
|
||||
self.agent['id'],
|
||||
router['router']['id'])
|
||||
body = self.admin_client.list_l3_agents_hosting_router(
|
||||
router['router']['id'])
|
||||
for agent in body['agents']:
|
||||
l3_agent_ids.append(agent['id'])
|
||||
self.assertIn('agent_type', agent)
|
||||
self.assertEqual('L3 agent', agent['agent_type'])
|
||||
self.assertIn(self.agent['id'], l3_agent_ids)
|
||||
del l3_agent_ids[:]
|
||||
body = self.admin_client.remove_router_from_l3_agent(
|
||||
self.agent['id'],
|
||||
router['router']['id'])
|
||||
# NOTE(afazekas): The deletion not asserted, because neutron
|
||||
# is not forbidden to reschedule the router to the same agent
|
@ -0,0 +1,72 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class LBaaSAgentSchedulerTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
List pools the given LBaaS agent is hosting.
|
||||
Show a LBaaS agent hosting the given pool.
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the following
|
||||
options are defined in the [networki-feature-enabled] section of
|
||||
etc/tempest.conf:
|
||||
|
||||
api_extensions
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LBaaSAgentSchedulerTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('lbaas_agent_scheduler', 'network'):
|
||||
msg = "LBaaS Agent Scheduler Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
pool_name = data_utils.rand_name('pool-')
|
||||
cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
|
||||
"HTTP", cls.subnet)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e5ea8b15-4f44-4350-963c-e0fcb533ee79')
|
||||
def test_list_pools_on_lbaas_agent(self):
|
||||
found = False
|
||||
body = self.admin_client.list_agents(
|
||||
agent_type="Loadbalancer agent")
|
||||
agents = body['agents']
|
||||
for a in agents:
|
||||
msg = 'Load Balancer agent expected'
|
||||
self.assertEqual(a['agent_type'], 'Loadbalancer agent', msg)
|
||||
body = (
|
||||
self.admin_client.list_pools_hosted_by_one_lbaas_agent(
|
||||
a['id']))
|
||||
pools = body['pools']
|
||||
if self.pool['id'] in [p['id'] for p in pools]:
|
||||
found = True
|
||||
msg = 'Unable to find Load Balancer agent hosting pool'
|
||||
self.assertTrue(found, msg)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e2745593-fd79-4b98-a262-575fd7865796')
|
||||
def test_show_lbaas_agent_hosting_pool(self):
|
||||
body = self.admin_client.show_lbaas_agent_hosting_pool(
|
||||
self.pool['id'])
|
||||
self.assertEqual('Loadbalancer agent', body['agent']['agent_type'])
|
@ -0,0 +1,114 @@
|
||||
# Copyright 2014 Mirantis.inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class LoadBalancerAdminTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
"""
|
||||
Test admin actions for load balancer.
|
||||
|
||||
Create VIP for another tenant
|
||||
Create health monitor for another tenant
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LoadBalancerAdminTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('lbaas', 'network'):
|
||||
msg = "lbaas extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.force_tenant_isolation = True
|
||||
manager = cls.get_client_manager()
|
||||
cls.client = manager.network_client
|
||||
cls.tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.pool = cls.create_pool(data_utils.rand_name('pool-'),
|
||||
"ROUND_ROBIN", "HTTP", cls.subnet)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('6b0a20d8-4fcd-455e-b54f-ec4db5199518')
|
||||
def test_create_vip_as_admin_for_another_tenant(self):
|
||||
name = data_utils.rand_name('vip-')
|
||||
body = self.admin_client.create_pool(
|
||||
name=data_utils.rand_name('pool-'),
|
||||
lb_method="ROUND_ROBIN",
|
||||
protocol="HTTP",
|
||||
subnet_id=self.subnet['id'],
|
||||
tenant_id=self.tenant_id)
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.admin_client.delete_pool, pool['id'])
|
||||
body = self.admin_client.create_vip(name=name,
|
||||
protocol="HTTP",
|
||||
protocol_port=80,
|
||||
subnet_id=self.subnet['id'],
|
||||
pool_id=pool['id'],
|
||||
tenant_id=self.tenant_id)
|
||||
vip = body['vip']
|
||||
self.addCleanup(self.admin_client.delete_vip, vip['id'])
|
||||
self.assertIsNotNone(vip['id'])
|
||||
self.assertEqual(self.tenant_id, vip['tenant_id'])
|
||||
body = self.client.show_vip(vip['id'])
|
||||
show_vip = body['vip']
|
||||
self.assertEqual(vip['id'], show_vip['id'])
|
||||
self.assertEqual(vip['name'], show_vip['name'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('74552cfc-ab78-4fb6-825b-f67bca379921')
|
||||
def test_create_health_monitor_as_admin_for_another_tenant(self):
|
||||
body = (
|
||||
self.admin_client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type="TCP",
|
||||
timeout=1,
|
||||
tenant_id=self.tenant_id))
|
||||
health_monitor = body['health_monitor']
|
||||
self.addCleanup(self.admin_client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
self.assertIsNotNone(health_monitor['id'])
|
||||
self.assertEqual(self.tenant_id, health_monitor['tenant_id'])
|
||||
body = self.client.show_health_monitor(health_monitor['id'])
|
||||
show_health_monitor = body['health_monitor']
|
||||
self.assertEqual(health_monitor['id'], show_health_monitor['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('266a192d-3c22-46c4-a8fb-802450301e82')
|
||||
def test_create_pool_from_admin_user_other_tenant(self):
|
||||
body = self.admin_client.create_pool(
|
||||
name=data_utils.rand_name('pool-'),
|
||||
lb_method="ROUND_ROBIN",
|
||||
protocol="HTTP",
|
||||
subnet_id=self.subnet['id'],
|
||||
tenant_id=self.tenant_id)
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.admin_client.delete_pool, pool['id'])
|
||||
self.assertIsNotNone(pool['id'])
|
||||
self.assertEqual(self.tenant_id, pool['tenant_id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('158bb272-b9ed-4cfc-803c-661dac46f783')
|
||||
def test_create_member_from_admin_user_other_tenant(self):
|
||||
body = self.admin_client.create_member(address="10.0.9.47",
|
||||
protocol_port=80,
|
||||
pool_id=self.pool['id'],
|
||||
tenant_id=self.tenant_id)
|
||||
member = body['member']
|
||||
self.addCleanup(self.admin_client.delete_member, member['id'])
|
||||
self.assertIsNotNone(member['id'])
|
||||
self.assertEqual(self.tenant_id, member['tenant_id'])
|
97
neutron/tests/tempest/api/network/admin/test_quotas.py
Normal file
97
neutron/tests/tempest/api/network/admin/test_quotas.py
Normal file
@ -0,0 +1,97 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class QuotasTest(base.BaseAdminNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
list quotas for tenants who have non-default quota values
|
||||
show quotas for a specified tenant
|
||||
update quotas for a specified tenant
|
||||
reset quotas to default values for a specified tenant
|
||||
|
||||
v2.0 of the API is assumed.
|
||||
It is also assumed that the per-tenant quota extension API is configured
|
||||
in /etc/neutron/neutron.conf as follows:
|
||||
|
||||
quota_driver = neutron.db.quota_db.DbQuotaDriver
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(QuotasTest, cls).resource_setup()
|
||||
if not test.is_extension_enabled('quotas', 'network'):
|
||||
msg = "quotas extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.identity_admin_client = cls.os_adm.identity_client
|
||||
|
||||
def _check_quotas(self, new_quotas):
|
||||
# Add a tenant to conduct the test
|
||||
test_tenant = data_utils.rand_name('test_tenant_')
|
||||
test_description = data_utils.rand_name('desc_')
|
||||
tenant = self.identity_admin_client.create_tenant(
|
||||
name=test_tenant,
|
||||
description=test_description)
|
||||
tenant_id = tenant['id']
|
||||
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
|
||||
|
||||
# Change quotas for tenant
|
||||
quota_set = self.admin_client.update_quotas(tenant_id,
|
||||
**new_quotas)
|
||||
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
|
||||
for key, value in new_quotas.iteritems():
|
||||
self.assertEqual(value, quota_set[key])
|
||||
|
||||
# Confirm our tenant is listed among tenants with non default quotas
|
||||
non_default_quotas = self.admin_client.list_quotas()
|
||||
found = False
|
||||
for qs in non_default_quotas['quotas']:
|
||||
if qs['tenant_id'] == tenant_id:
|
||||
found = True
|
||||
self.assertTrue(found)
|
||||
|
||||
# Confirm from API quotas were changed as requested for tenant
|
||||
quota_set = self.admin_client.show_quotas(tenant_id)
|
||||
quota_set = quota_set['quota']
|
||||
for key, value in new_quotas.iteritems():
|
||||
self.assertEqual(value, quota_set[key])
|
||||
|
||||
# Reset quotas to default and confirm
|
||||
self.admin_client.reset_quotas(tenant_id)
|
||||
non_default_quotas = self.admin_client.list_quotas()
|
||||
for q in non_default_quotas['quotas']:
|
||||
self.assertNotEqual(tenant_id, q['tenant_id'])
|
||||
|
||||
@test.attr(type='gate')
|
||||
@test.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb')
|
||||
def test_quotas(self):
|
||||
new_quotas = {'network': 0, 'security_group': 0}
|
||||
self._check_quotas(new_quotas)
|
||||
|
||||
@test.idempotent_id('a7add2b1-691e-44d6-875f-697d9685f091')
|
||||
@test.requires_ext(extension='lbaas', service='network')
|
||||
@test.attr(type='gate')
|
||||
def test_lbaas_quotas(self):
|
||||
new_quotas = {'vip': 1, 'pool': 2,
|
||||
'member': 3, 'health_monitor': 4}
|
||||
self._check_quotas(new_quotas)
|
101
neutron/tests/tempest/api/network/admin/test_routers_dvr.py
Normal file
101
neutron/tests/tempest/api/network/admin/test_routers_dvr.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base_routers as base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class RoutersTestDVR(base.BaseRouterTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
for ext in ['router', 'dvr']:
|
||||
if not test.is_extension_enabled(ext, 'network'):
|
||||
msg = "%s extension not enabled." % ext
|
||||
raise cls.skipException(msg)
|
||||
# The check above will pass if api_extensions=all, which does
|
||||
# not mean DVR extension itself is present.
|
||||
# Instead, we have to check whether DVR is actually present by using
|
||||
# admin credentials to create router with distributed=True attribute
|
||||
# and checking for BadRequest exception and that the resulting router
|
||||
# has a distributed attribute.
|
||||
super(RoutersTestDVR, cls).resource_setup()
|
||||
name = data_utils.rand_name('pretest-check')
|
||||
router = cls.admin_client.create_router(name)
|
||||
if 'distributed' not in router['router']:
|
||||
msg = "'distributed' attribute not found. DVR Possibly not enabled"
|
||||
raise cls.skipException(msg)
|
||||
cls.admin_client.delete_router(router['router']['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('08a2a0a8-f1e4-4b34-8e30-e522e836c44e')
|
||||
def test_distributed_router_creation(self):
|
||||
"""
|
||||
Test uses administrative credentials to creates a
|
||||
DVR (Distributed Virtual Routing) router using the
|
||||
distributed=True.
|
||||
|
||||
Acceptance
|
||||
The router is created and the "distributed" attribute is
|
||||
set to True
|
||||
"""
|
||||
name = data_utils.rand_name('router')
|
||||
router = self.admin_client.create_router(name, distributed=True)
|
||||
self.addCleanup(self.admin_client.delete_router,
|
||||
router['router']['id'])
|
||||
self.assertTrue(router['router']['distributed'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('8a0a72b4-7290-4677-afeb-b4ffe37bc352')
|
||||
def test_centralized_router_creation(self):
|
||||
"""
|
||||
Test uses administrative credentials to creates a
|
||||
CVR (Centralized Virtual Routing) router using the
|
||||
distributed=False.
|
||||
|
||||
Acceptance
|
||||
The router is created and the "distributed" attribute is
|
||||
set to False, thus making it a "Centralized Virtual Router"
|
||||
as opposed to a "Distributed Virtual Router"
|
||||
"""
|
||||
name = data_utils.rand_name('router')
|
||||
router = self.admin_client.create_router(name, distributed=False)
|
||||
self.addCleanup(self.admin_client.delete_router,
|
||||
router['router']['id'])
|
||||
self.assertFalse(router['router']['distributed'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('acd43596-c1fb-439d-ada8-31ad48ae3c2e')
|
||||
def test_centralized_router_update_to_dvr(self):
|
||||
"""
|
||||
Test uses administrative credentials to creates a
|
||||
CVR (Centralized Virtual Routing) router using the
|
||||
distributed=False.Then it will "update" the router
|
||||
distributed attribute to True
|
||||
|
||||
Acceptance
|
||||
The router is created and the "distributed" attribute is
|
||||
set to False. Once the router is updated, the distributed
|
||||
attribute will be set to True
|
||||
"""
|
||||
name = data_utils.rand_name('router')
|
||||
router = self.admin_client.create_router(name, distributed=False)
|
||||
self.addCleanup(self.admin_client.delete_router,
|
||||
router['router']['id'])
|
||||
self.assertFalse(router['router']['distributed'])
|
||||
router = self.admin_client.update_router(router['router']['id'],
|
||||
distributed=True)
|
||||
self.assertTrue(router['router']['distributed'])
|
449
neutron/tests/tempest/api/network/base.py
Normal file
449
neutron/tests/tempest/api/network/base.py
Normal file
@ -0,0 +1,449 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.api.contrib import clients
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
from neutron.openstack.common import log as logging
|
||||
import neutron.tests.tempest.test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseNetworkTest(neutron.tests.tempest.test.BaseTestCase):
|
||||
|
||||
"""
|
||||
Base class for the Neutron tests that use the Tempest Neutron REST client
|
||||
|
||||
Per the Neutron API Guide, API v1.x was removed from the source code tree
|
||||
(docs.openstack.org/api/openstack-network/2.0/content/Overview-d1e71.html)
|
||||
Therefore, v2.x of the Neutron API is assumed. It is also assumed that the
|
||||
following options are defined in the [network] section of etc/tempest.conf:
|
||||
|
||||
tenant_network_cidr with a block of cidr's from which smaller blocks
|
||||
can be allocated for tenant networks
|
||||
|
||||
tenant_network_mask_bits with the mask bits to be used to partition the
|
||||
block defined by tenant-network_cidr
|
||||
|
||||
Finally, it is assumed that the following option is defined in the
|
||||
[service_available] section of etc/tempest.conf
|
||||
|
||||
neutron as True
|
||||
"""
|
||||
|
||||
force_tenant_isolation = False
|
||||
|
||||
# Default to ipv4.
|
||||
_ip_version = 4
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
# Create no network resources for these test.
|
||||
cls.set_network_resources()
|
||||
super(BaseNetworkTest, cls).resource_setup()
|
||||
if not CONF.service_available.neutron:
|
||||
raise cls.skipException("Neutron support is required")
|
||||
if cls._ip_version == 6 and not CONF.network_feature_enabled.ipv6:
|
||||
raise cls.skipException("IPv6 Tests are disabled.")
|
||||
|
||||
os = cls.get_client_manager()
|
||||
|
||||
cls.network_cfg = CONF.network
|
||||
cls.client = os.network_client
|
||||
cls.networks = []
|
||||
cls.subnets = []
|
||||
cls.ports = []
|
||||
cls.routers = []
|
||||
cls.pools = []
|
||||
cls.vips = []
|
||||
cls.members = []
|
||||
cls.health_monitors = []
|
||||
cls.vpnservices = []
|
||||
cls.ikepolicies = []
|
||||
cls.floating_ips = []
|
||||
cls.metering_labels = []
|
||||
cls.metering_label_rules = []
|
||||
cls.fw_rules = []
|
||||
cls.fw_policies = []
|
||||
cls.ipsecpolicies = []
|
||||
cls.ethertype = "IPv" + str(cls._ip_version)
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if CONF.service_available.neutron:
|
||||
# Clean up ipsec policies
|
||||
for ipsecpolicy in cls.ipsecpolicies:
|
||||
cls._try_delete_resource(cls.client.delete_ipsecpolicy,
|
||||
ipsecpolicy['id'])
|
||||
# Clean up firewall policies
|
||||
for fw_policy in cls.fw_policies:
|
||||
cls._try_delete_resource(cls.client.delete_firewall_policy,
|
||||
fw_policy['id'])
|
||||
# Clean up firewall rules
|
||||
for fw_rule in cls.fw_rules:
|
||||
cls._try_delete_resource(cls.client.delete_firewall_rule,
|
||||
fw_rule['id'])
|
||||
# Clean up ike policies
|
||||
for ikepolicy in cls.ikepolicies:
|
||||
cls._try_delete_resource(cls.client.delete_ikepolicy,
|
||||
ikepolicy['id'])
|
||||
# Clean up vpn services
|
||||
for vpnservice in cls.vpnservices:
|
||||
cls._try_delete_resource(cls.client.delete_vpnservice,
|
||||
vpnservice['id'])
|
||||
# Clean up floating IPs
|
||||
for floating_ip in cls.floating_ips:
|
||||
cls._try_delete_resource(cls.client.delete_floatingip,
|
||||
floating_ip['id'])
|
||||
# Clean up routers
|
||||
for router in cls.routers:
|
||||
cls._try_delete_resource(cls.delete_router,
|
||||
router)
|
||||
|
||||
# Clean up health monitors
|
||||
for health_monitor in cls.health_monitors:
|
||||
cls._try_delete_resource(cls.client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
# Clean up members
|
||||
for member in cls.members:
|
||||
cls._try_delete_resource(cls.client.delete_member,
|
||||
member['id'])
|
||||
# Clean up vips
|
||||
for vip in cls.vips:
|
||||
cls._try_delete_resource(cls.client.delete_vip,
|
||||
vip['id'])
|
||||
# Clean up pools
|
||||
for pool in cls.pools:
|
||||
cls._try_delete_resource(cls.client.delete_pool,
|
||||
pool['id'])
|
||||
# Clean up metering label rules
|
||||
for metering_label_rule in cls.metering_label_rules:
|
||||
cls._try_delete_resource(
|
||||
cls.admin_client.delete_metering_label_rule,
|
||||
metering_label_rule['id'])
|
||||
# Clean up metering labels
|
||||
for metering_label in cls.metering_labels:
|
||||
cls._try_delete_resource(
|
||||
cls.admin_client.delete_metering_label,
|
||||
metering_label['id'])
|
||||
# Clean up ports
|
||||
for port in cls.ports:
|
||||
cls._try_delete_resource(cls.client.delete_port,
|
||||
port['id'])
|
||||
# Clean up subnets
|
||||
for subnet in cls.subnets:
|
||||
cls._try_delete_resource(cls.client.delete_subnet,
|
||||
subnet['id'])
|
||||
# Clean up networks
|
||||
for network in cls.networks:
|
||||
cls._try_delete_resource(cls.client.delete_network,
|
||||
network['id'])
|
||||
cls.clear_isolated_creds()
|
||||
super(BaseNetworkTest, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def _try_delete_resource(self, delete_callable, *args, **kwargs):
|
||||
"""Cleanup resources in case of test-failure
|
||||
|
||||
Some resources are explicitly deleted by the test.
|
||||
If the test failed to delete a resource, this method will execute
|
||||
the appropriate delete methods. Otherwise, the method ignores NotFound
|
||||
exceptions thrown for resources that were correctly deleted by the
|
||||
test.
|
||||
|
||||
:param delete_callable: delete method
|
||||
:param args: arguments for delete method
|
||||
:param kwargs: keyword arguments for delete method
|
||||
"""
|
||||
try:
|
||||
delete_callable(*args, **kwargs)
|
||||
# if resource is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def create_network(cls, network_name=None):
|
||||
"""Wrapper utility that returns a test network."""
|
||||
network_name = network_name or data_utils.rand_name('test-network-')
|
||||
|
||||
body = cls.client.create_network(name=network_name)
|
||||
network = body['network']
|
||||
cls.networks.append(network)
|
||||
return network
|
||||
|
||||
@classmethod
|
||||
def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
|
||||
ip_version=None, client=None, **kwargs):
|
||||
"""Wrapper utility that returns a test subnet."""
|
||||
|
||||
# allow tests to use admin client
|
||||
if not client:
|
||||
client = cls.client
|
||||
|
||||
# The cidr and mask_bits depend on the ip version.
|
||||
ip_version = ip_version if ip_version is not None else cls._ip_version
|
||||
gateway_not_set = gateway == ''
|
||||
if ip_version == 4:
|
||||
cidr = cidr or netaddr.IPNetwork(CONF.network.tenant_network_cidr)
|
||||
mask_bits = mask_bits or CONF.network.tenant_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = (
|
||||
cidr or netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr))
|
||||
mask_bits = mask_bits or CONF.network.tenant_network_v6_mask_bits
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
for subnet_cidr in cidr.subnet(mask_bits):
|
||||
if gateway_not_set:
|
||||
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
|
||||
else:
|
||||
gateway_ip = gateway
|
||||
try:
|
||||
body = client.create_subnet(
|
||||
network_id=network['id'],
|
||||
cidr=str(subnet_cidr),
|
||||
ip_version=ip_version,
|
||||
gateway_ip=gateway_ip,
|
||||
**kwargs)
|
||||
break
|
||||
except lib_exc.BadRequest as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise exceptions.BuildErrorException(message)
|
||||
subnet = body['subnet']
|
||||
cls.subnets.append(subnet)
|
||||
return subnet
|
||||
|
||||
@classmethod
|
||||
def create_port(cls, network, **kwargs):
|
||||
"""Wrapper utility that returns a test port."""
|
||||
body = cls.client.create_port(network_id=network['id'],
|
||||
**kwargs)
|
||||
port = body['port']
|
||||
cls.ports.append(port)
|
||||
return port
|
||||
|
||||
@classmethod
|
||||
def update_port(cls, port, **kwargs):
|
||||
"""Wrapper utility that updates a test port."""
|
||||
body = cls.client.update_port(port['id'],
|
||||
**kwargs)
|
||||
return body['port']
|
||||
|
||||
@classmethod
|
||||
def create_router(cls, router_name=None, admin_state_up=False,
|
||||
external_network_id=None, enable_snat=None):
|
||||
ext_gw_info = {}
|
||||
if external_network_id:
|
||||
ext_gw_info['network_id'] = external_network_id
|
||||
if enable_snat:
|
||||
ext_gw_info['enable_snat'] = enable_snat
|
||||
body = cls.client.create_router(
|
||||
router_name, external_gateway_info=ext_gw_info,
|
||||
admin_state_up=admin_state_up)
|
||||
router = body['router']
|
||||
cls.routers.append(router)
|
||||
return router
|
||||
|
||||
@classmethod
|
||||
def create_floatingip(cls, external_network_id):
|
||||
"""Wrapper utility that returns a test floating IP."""
|
||||
body = cls.client.create_floatingip(
|
||||
floating_network_id=external_network_id)
|
||||
fip = body['floatingip']
|
||||
cls.floating_ips.append(fip)
|
||||
return fip
|
||||
|
||||
@classmethod
|
||||
def create_pool(cls, name, lb_method, protocol, subnet):
|
||||
"""Wrapper utility that returns a test pool."""
|
||||
body = cls.client.create_pool(
|
||||
name=name,
|
||||
lb_method=lb_method,
|
||||
protocol=protocol,
|
||||
subnet_id=subnet['id'])
|
||||
pool = body['pool']
|
||||
cls.pools.append(pool)
|
||||
return pool
|
||||
|
||||
@classmethod
|
||||
def update_pool(cls, name):
|
||||
"""Wrapper utility that returns a test pool."""
|
||||
body = cls.client.update_pool(name=name)
|
||||
pool = body['pool']
|
||||
return pool
|
||||
|
||||
@classmethod
|
||||
def create_vip(cls, name, protocol, protocol_port, subnet, pool):
|
||||
"""Wrapper utility that returns a test vip."""
|
||||
body = cls.client.create_vip(name=name,
|
||||
protocol=protocol,
|
||||
protocol_port=protocol_port,
|
||||
subnet_id=subnet['id'],
|
||||
pool_id=pool['id'])
|
||||
vip = body['vip']
|
||||
cls.vips.append(vip)
|
||||
return vip
|
||||
|
||||
@classmethod
|
||||
def update_vip(cls, name):
|
||||
body = cls.client.update_vip(name=name)
|
||||
vip = body['vip']
|
||||
return vip
|
||||
|
||||
@classmethod
|
||||
def create_member(cls, protocol_port, pool, ip_version=None):
|
||||
"""Wrapper utility that returns a test member."""
|
||||
ip_version = ip_version if ip_version is not None else cls._ip_version
|
||||
member_address = "fd00::abcd" if ip_version == 6 else "10.0.9.46"
|
||||
body = cls.client.create_member(address=member_address,
|
||||
protocol_port=protocol_port,
|
||||
pool_id=pool['id'])
|
||||
member = body['member']
|
||||
cls.members.append(member)
|
||||
return member
|
||||
|
||||
@classmethod
|
||||
def update_member(cls, admin_state_up):
|
||||
body = cls.client.update_member(admin_state_up=admin_state_up)
|
||||
member = body['member']
|
||||
return member
|
||||
|
||||
@classmethod
|
||||
def create_health_monitor(cls, delay, max_retries, Type, timeout):
|
||||
"""Wrapper utility that returns a test health monitor."""
|
||||
body = cls.client.create_health_monitor(delay=delay,
|
||||
max_retries=max_retries,
|
||||
type=Type,
|
||||
timeout=timeout)
|
||||
health_monitor = body['health_monitor']
|
||||
cls.health_monitors.append(health_monitor)
|
||||
return health_monitor
|
||||
|
||||
@classmethod
|
||||
def update_health_monitor(cls, admin_state_up):
|
||||
body = cls.client.update_vip(admin_state_up=admin_state_up)
|
||||
health_monitor = body['health_monitor']
|
||||
return health_monitor
|
||||
|
||||
@classmethod
|
||||
def create_router_interface(cls, router_id, subnet_id):
|
||||
"""Wrapper utility that returns a router interface."""
|
||||
interface = cls.client.add_router_interface_with_subnet_id(
|
||||
router_id, subnet_id)
|
||||
return interface
|
||||
|
||||
@classmethod
|
||||
def create_vpnservice(cls, subnet_id, router_id):
|
||||
"""Wrapper utility that returns a test vpn service."""
|
||||
body = cls.client.create_vpnservice(
|
||||
subnet_id=subnet_id, router_id=router_id, admin_state_up=True,
|
||||
name=data_utils.rand_name("vpnservice-"))
|
||||
vpnservice = body['vpnservice']
|
||||
cls.vpnservices.append(vpnservice)
|
||||
return vpnservice
|
||||
|
||||
@classmethod
|
||||
def create_ikepolicy(cls, name):
|
||||
"""Wrapper utility that returns a test ike policy."""
|
||||
body = cls.client.create_ikepolicy(name=name)
|
||||
ikepolicy = body['ikepolicy']
|
||||
cls.ikepolicies.append(ikepolicy)
|
||||
return ikepolicy
|
||||
|
||||
@classmethod
|
||||
def create_firewall_rule(cls, action, protocol):
|
||||
"""Wrapper utility that returns a test firewall rule."""
|
||||
body = cls.client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action=action,
|
||||
protocol=protocol)
|
||||
fw_rule = body['firewall_rule']
|
||||
cls.fw_rules.append(fw_rule)
|
||||
return fw_rule
|
||||
|
||||
@classmethod
|
||||
def create_firewall_policy(cls):
|
||||
"""Wrapper utility that returns a test firewall policy."""
|
||||
body = cls.client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"))
|
||||
fw_policy = body['firewall_policy']
|
||||
cls.fw_policies.append(fw_policy)
|
||||
return fw_policy
|
||||
|
||||
@classmethod
|
||||
def delete_router(cls, router):
|
||||
body = cls.client.list_router_interfaces(router['id'])
|
||||
interfaces = body['ports']
|
||||
for i in interfaces:
|
||||
try:
|
||||
cls.client.remove_router_interface_with_subnet_id(
|
||||
router['id'], i['fixed_ips'][0]['subnet_id'])
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
cls.client.delete_router(router['id'])
|
||||
|
||||
@classmethod
|
||||
def create_ipsecpolicy(cls, name):
|
||||
"""Wrapper utility that returns a test ipsec policy."""
|
||||
body = cls.client.create_ipsecpolicy(name=name)
|
||||
ipsecpolicy = body['ipsecpolicy']
|
||||
cls.ipsecpolicies.append(ipsecpolicy)
|
||||
return ipsecpolicy
|
||||
|
||||
|
||||
class BaseAdminNetworkTest(BaseNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseAdminNetworkTest, cls).resource_setup()
|
||||
|
||||
try:
|
||||
creds = cls.isolated_creds.get_admin_creds()
|
||||
cls.os_adm = clients.Manager(credentials=creds)
|
||||
except NotImplementedError:
|
||||
msg = ("Missing Administrative Network API credentials "
|
||||
"in configuration.")
|
||||
raise cls.skipException(msg)
|
||||
cls.admin_client = cls.os_adm.network_client
|
||||
|
||||
@classmethod
|
||||
def create_metering_label(cls, name, description):
|
||||
"""Wrapper utility that returns a test metering label."""
|
||||
body = cls.admin_client.create_metering_label(
|
||||
description=description,
|
||||
name=data_utils.rand_name("metering-label"))
|
||||
metering_label = body['metering_label']
|
||||
cls.metering_labels.append(metering_label)
|
||||
return metering_label
|
||||
|
||||
@classmethod
|
||||
def create_metering_label_rule(cls, remote_ip_prefix, direction,
|
||||
metering_label_id):
|
||||
"""Wrapper utility that returns a test metering label rule."""
|
||||
body = cls.admin_client.create_metering_label_rule(
|
||||
remote_ip_prefix=remote_ip_prefix, direction=direction,
|
||||
metering_label_id=metering_label_id)
|
||||
metering_label_rule = body['metering_label_rule']
|
||||
cls.metering_label_rules.append(metering_label_rule)
|
||||
return metering_label_rule
|
54
neutron/tests/tempest/api/network/base_routers.py
Normal file
54
neutron/tests/tempest/api/network/base_routers.py
Normal file
@ -0,0 +1,54 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
|
||||
|
||||
class BaseRouterTest(base.BaseAdminNetworkTest):
|
||||
# NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest
|
||||
# as some router operations, such as enabling or disabling SNAT
|
||||
# require admin credentials by default
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseRouterTest, cls).resource_setup()
|
||||
|
||||
def _delete_router(self, router_id):
|
||||
self.client.delete_router(router_id)
|
||||
# Asserting that the router is not found in the list
|
||||
# after deletion
|
||||
list_body = self.client.list_routers()
|
||||
routers_list = list()
|
||||
for router in list_body['routers']:
|
||||
routers_list.append(router['id'])
|
||||
self.assertNotIn(router_id, routers_list)
|
||||
|
||||
def _add_router_interface_with_subnet_id(self, router_id, subnet_id):
|
||||
interface = self.client.add_router_interface_with_subnet_id(
|
||||
router_id, subnet_id)
|
||||
self.addCleanup(self._remove_router_interface_with_subnet_id,
|
||||
router_id, subnet_id)
|
||||
self.assertEqual(subnet_id, interface['subnet_id'])
|
||||
return interface
|
||||
|
||||
def _remove_router_interface_with_subnet_id(self, router_id, subnet_id):
|
||||
body = self.client.remove_router_interface_with_subnet_id(
|
||||
router_id, subnet_id)
|
||||
self.assertEqual(subnet_id, body['subnet_id'])
|
||||
|
||||
def _remove_router_interface_with_port_id(self, router_id, port_id):
|
||||
body = self.client.remove_router_interface_with_port_id(router_id,
|
||||
port_id)
|
||||
self.assertEqual(port_id, body['port_id'])
|
53
neutron/tests/tempest/api/network/base_security_groups.py
Normal file
53
neutron/tests/tempest/api/network/base_security_groups.py
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
|
||||
|
||||
class BaseSecGroupTest(base.BaseNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseSecGroupTest, cls).resource_setup()
|
||||
|
||||
def _create_security_group(self):
|
||||
# Create a security group
|
||||
name = data_utils.rand_name('secgroup-')
|
||||
group_create_body = self.client.create_security_group(name=name)
|
||||
self.addCleanup(self._delete_security_group,
|
||||
group_create_body['security_group']['id'])
|
||||
self.assertEqual(group_create_body['security_group']['name'], name)
|
||||
return group_create_body, name
|
||||
|
||||
def _delete_security_group(self, secgroup_id):
|
||||
self.client.delete_security_group(secgroup_id)
|
||||
# Asserting that the security group is not found in the list
|
||||
# after deletion
|
||||
list_body = self.client.list_security_groups()
|
||||
secgroup_list = list()
|
||||
for secgroup in list_body['security_groups']:
|
||||
secgroup_list.append(secgroup['id'])
|
||||
self.assertNotIn(secgroup_id, secgroup_list)
|
||||
|
||||
def _delete_security_group_rule(self, rule_id):
|
||||
self.client.delete_security_group_rule(rule_id)
|
||||
# Asserting that the security group is not found in the list
|
||||
# after deletion
|
||||
list_body = self.client.list_security_group_rules()
|
||||
rules_list = list()
|
||||
for rule in list_body['security_group_rules']:
|
||||
rules_list.append(rule['id'])
|
||||
self.assertNotIn(rule_id, rules_list)
|
134
neutron/tests/tempest/api/network/test_allowed_address_pair.py
Normal file
134
neutron/tests/tempest/api/network/test_allowed_address_pair.py
Normal file
@ -0,0 +1,134 @@
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class AllowedAddressPairTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the Neutron Allowed Address Pair API extension using the Tempest
|
||||
ReST client. The following API operations are tested with this extension:
|
||||
|
||||
create port
|
||||
list ports
|
||||
update port
|
||||
show port
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the following
|
||||
options are defined in the [network-feature-enabled] section of
|
||||
etc/tempest.conf
|
||||
|
||||
api_extensions
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(AllowedAddressPairTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('allowed-address-pairs', 'network'):
|
||||
msg = "Allowed Address Pairs extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.network = cls.create_network()
|
||||
cls.create_subnet(cls.network)
|
||||
port = cls.create_port(cls.network)
|
||||
cls.ip_address = port['fixed_ips'][0]['ip_address']
|
||||
cls.mac_address = port['mac_address']
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('86c3529b-1231-40de-803c-00e40882f043')
|
||||
def test_create_list_port_with_address_pair(self):
|
||||
# Create port with allowed address pair attribute
|
||||
allowed_address_pairs = [{'ip_address': self.ip_address,
|
||||
'mac_address': self.mac_address}]
|
||||
body = self.client.create_port(
|
||||
network_id=self.network['id'],
|
||||
allowed_address_pairs=allowed_address_pairs)
|
||||
port_id = body['port']['id']
|
||||
self.addCleanup(self.client.delete_port, port_id)
|
||||
|
||||
# Confirm port was created with allowed address pair attribute
|
||||
body = self.client.list_ports()
|
||||
ports = body['ports']
|
||||
port = [p for p in ports if p['id'] == port_id]
|
||||
msg = 'Created port not found in list of ports returned by Neutron'
|
||||
self.assertTrue(port, msg)
|
||||
self._confirm_allowed_address_pair(port[0], self.ip_address)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
def _update_port_with_address(self, address, mac_address=None, **kwargs):
|
||||
# Create a port without allowed address pair
|
||||
body = self.client.create_port(network_id=self.network['id'])
|
||||
port_id = body['port']['id']
|
||||
self.addCleanup(self.client.delete_port, port_id)
|
||||
if mac_address is None:
|
||||
mac_address = self.mac_address
|
||||
|
||||
# Update allowed address pair attribute of port
|
||||
allowed_address_pairs = [{'ip_address': address,
|
||||
'mac_address': mac_address}]
|
||||
if kwargs:
|
||||
allowed_address_pairs.append(kwargs['allowed_address_pairs'])
|
||||
body = self.client.update_port(
|
||||
port_id, allowed_address_pairs=allowed_address_pairs)
|
||||
allowed_address_pair = body['port']['allowed_address_pairs']
|
||||
self.assertEqual(allowed_address_pair, allowed_address_pairs)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('9599b337-272c-47fd-b3cf-509414414ac4')
|
||||
def test_update_port_with_address_pair(self):
|
||||
# Update port with allowed address pair
|
||||
self._update_port_with_address(self.ip_address)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4')
|
||||
def test_update_port_with_cidr_address_pair(self):
|
||||
# Update allowed address pair with cidr
|
||||
cidr = str(netaddr.IPNetwork(CONF.network.tenant_network_cidr))
|
||||
self._update_port_with_address(cidr)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
|
||||
def test_update_port_with_multiple_ip_mac_address_pair(self):
|
||||
# Create an ip _address and mac_address through port create
|
||||
resp = self.client.create_port(network_id=self.network['id'])
|
||||
newportid = resp['port']['id']
|
||||
self.addCleanup(self.client.delete_port, newportid)
|
||||
ipaddress = resp['port']['fixed_ips'][0]['ip_address']
|
||||
macaddress = resp['port']['mac_address']
|
||||
|
||||
# Update allowed address pair port with multiple ip and mac
|
||||
allowed_address_pairs = {'ip_address': ipaddress,
|
||||
'mac_address': macaddress}
|
||||
self._update_port_with_address(
|
||||
self.ip_address, self.mac_address,
|
||||
allowed_address_pairs=allowed_address_pairs)
|
||||
|
||||
def _confirm_allowed_address_pair(self, port, ip):
|
||||
msg = 'Port allowed address pairs should not be empty'
|
||||
self.assertTrue(port['allowed_address_pairs'], msg)
|
||||
ip_address = port['allowed_address_pairs'][0]['ip_address']
|
||||
mac_address = port['allowed_address_pairs'][0]['mac_address']
|
||||
self.assertEqual(ip_address, ip)
|
||||
self.assertEqual(mac_address, self.mac_address)
|
||||
|
||||
|
||||
class AllowedAddressPairIpV6TestJSON(AllowedAddressPairTestJSON):
|
||||
_ip_version = 6
|
402
neutron/tests/tempest/api/network/test_dhcp_ipv6.py
Normal file
402
neutron/tests/tempest/api/network/test_dhcp_ipv6.py
Normal file
@ -0,0 +1,402 @@
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
import random
|
||||
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class NetworksTestDHCPv6(base.BaseNetworkTest):
|
||||
_ip_version = 6
|
||||
|
||||
""" Test DHCPv6 specific features using SLAAC, stateless and
|
||||
stateful settings for subnets. Also it shall check dual-stack
|
||||
functionality (IPv4 + IPv6 together).
|
||||
The tests include:
|
||||
generating of SLAAC EUI-64 address in subnets with various settings
|
||||
receiving SLAAC addresses in combinations of various subnets
|
||||
receiving stateful IPv6 addresses
|
||||
addressing in subnets with router
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
msg = None
|
||||
if not CONF.network_feature_enabled.ipv6:
|
||||
msg = "IPv6 is not enabled"
|
||||
elif not CONF.network_feature_enabled.ipv6_subnet_attributes:
|
||||
msg = "DHCPv6 attributes are not enabled."
|
||||
if msg:
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(NetworksTestDHCPv6, cls).resource_setup()
|
||||
cls.network = cls.create_network()
|
||||
|
||||
def _remove_from_list_by_index(self, things_list, elem):
|
||||
for index, i in enumerate(things_list):
|
||||
if i['id'] == elem['id']:
|
||||
break
|
||||
del things_list[index]
|
||||
|
||||
def _clean_network(self):
|
||||
body = self.client.list_ports()
|
||||
ports = body['ports']
|
||||
for port in ports:
|
||||
if (port['device_owner'].startswith('network:router_interface')
|
||||
and port['device_id'] in [r['id'] for r in self.routers]):
|
||||
self.client.remove_router_interface_with_port_id(
|
||||
port['device_id'], port['id']
|
||||
)
|
||||
else:
|
||||
if port['id'] in [p['id'] for p in self.ports]:
|
||||
self.client.delete_port(port['id'])
|
||||
self._remove_from_list_by_index(self.ports, port)
|
||||
body = self.client.list_subnets()
|
||||
subnets = body['subnets']
|
||||
for subnet in subnets:
|
||||
if subnet['id'] in [s['id'] for s in self.subnets]:
|
||||
self.client.delete_subnet(subnet['id'])
|
||||
self._remove_from_list_by_index(self.subnets, subnet)
|
||||
body = self.client.list_routers()
|
||||
routers = body['routers']
|
||||
for router in routers:
|
||||
if router['id'] in [r['id'] for r in self.routers]:
|
||||
self.client.delete_router(router['id'])
|
||||
self._remove_from_list_by_index(self.routers, router)
|
||||
|
||||
def _get_ips_from_subnet(self, **kwargs):
|
||||
subnet = self.create_subnet(self.network, **kwargs)
|
||||
port_mac = data_utils.rand_mac_address()
|
||||
port = self.create_port(self.network, mac_address=port_mac)
|
||||
real_ip = next(iter(port['fixed_ips']), None)['ip_address']
|
||||
eui_ip = data_utils.get_ipv6_addr_by_EUI64(subnet['cidr'],
|
||||
port_mac).format()
|
||||
return real_ip, eui_ip
|
||||
|
||||
@test.idempotent_id('e5517e62-6f16-430d-a672-f80875493d4c')
|
||||
def test_dhcpv6_stateless_eui64(self):
|
||||
"""When subnets configured with RAs SLAAC (AOM=100) and DHCP stateless
|
||||
(AOM=110) both for radvd and dnsmasq, port shall receive IP address
|
||||
calculated from its MAC.
|
||||
"""
|
||||
for ra_mode, add_mode in (
|
||||
('slaac', 'slaac'),
|
||||
('dhcpv6-stateless', 'dhcpv6-stateless'),
|
||||
):
|
||||
kwargs = {'ipv6_ra_mode': ra_mode,
|
||||
'ipv6_address_mode': add_mode}
|
||||
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
|
||||
self._clean_network()
|
||||
self.assertEqual(eui_ip, real_ip,
|
||||
('Real port IP is %s, but shall be %s when '
|
||||
'ipv6_ra_mode=%s and ipv6_address_mode=%s') % (
|
||||
real_ip, eui_ip, ra_mode, add_mode))
|
||||
|
||||
@test.idempotent_id('ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832')
|
||||
def test_dhcpv6_stateless_no_ra(self):
|
||||
"""When subnets configured with dnsmasq SLAAC and DHCP stateless
|
||||
and there is no radvd, port shall receive IP address calculated
|
||||
from its MAC and mask of subnet.
|
||||
"""
|
||||
for ra_mode, add_mode in (
|
||||
(None, 'slaac'),
|
||||
(None, 'dhcpv6-stateless'),
|
||||
):
|
||||
kwargs = {'ipv6_ra_mode': ra_mode,
|
||||
'ipv6_address_mode': add_mode}
|
||||
kwargs = {k: v for k, v in kwargs.iteritems() if v}
|
||||
real_ip, eui_ip = self._get_ips_from_subnet(**kwargs)
|
||||
self._clean_network()
|
||||
self.assertEqual(eui_ip, real_ip,
|
||||
('Real port IP %s shall be equal to EUI-64 %s'
|
||||
'when ipv6_ra_mode=%s,ipv6_address_mode=%s') % (
|
||||
real_ip, eui_ip,
|
||||
ra_mode if ra_mode else "Off",
|
||||
add_mode if add_mode else "Off"))
|
||||
|
||||
@test.idempotent_id('81f18ef6-95b5-4584-9966-10d480b7496a')
|
||||
def test_dhcpv6_invalid_options(self):
|
||||
"""Different configurations for radvd and dnsmasq are not allowed"""
|
||||
for ra_mode, add_mode in (
|
||||
('dhcpv6-stateless', 'dhcpv6-stateful'),
|
||||
('dhcpv6-stateless', 'slaac'),
|
||||
('slaac', 'dhcpv6-stateful'),
|
||||
('dhcpv6-stateful', 'dhcpv6-stateless'),
|
||||
('dhcpv6-stateful', 'slaac'),
|
||||
('slaac', 'dhcpv6-stateless'),
|
||||
):
|
||||
kwargs = {'ipv6_ra_mode': ra_mode,
|
||||
'ipv6_address_mode': add_mode}
|
||||
self.assertRaises(lib_exc.BadRequest,
|
||||
self.create_subnet,
|
||||
self.network,
|
||||
**kwargs)
|
||||
|
||||
@test.idempotent_id('21635b6f-165a-4d42-bf49-7d195e47342f')
|
||||
def test_dhcpv6_stateless_no_ra_no_dhcp(self):
|
||||
"""If no radvd option and no dnsmasq option is configured
|
||||
port shall receive IP from fixed IPs list of subnet.
|
||||
"""
|
||||
real_ip, eui_ip = self._get_ips_from_subnet()
|
||||
self._clean_network()
|
||||
self.assertNotEqual(eui_ip, real_ip,
|
||||
('Real port IP %s equal to EUI-64 %s when '
|
||||
'ipv6_ra_mode=Off and ipv6_address_mode=Off,'
|
||||
'but shall be taken from fixed IPs') % (
|
||||
real_ip, eui_ip))
|
||||
|
||||
@test.idempotent_id('4544adf7-bb5f-4bdc-b769-b3e77026cef2')
|
||||
def test_dhcpv6_two_subnets(self):
|
||||
"""When one IPv6 subnet configured with dnsmasq SLAAC or DHCP stateless
|
||||
and other IPv6 is with DHCP stateful, port shall receive EUI-64 IP
|
||||
addresses from first subnet and DHCP address from second one.
|
||||
Order of subnet creating should be unimportant.
|
||||
"""
|
||||
for order in ("slaac_first", "dhcp_first"):
|
||||
for ra_mode, add_mode in (
|
||||
('slaac', 'slaac'),
|
||||
('dhcpv6-stateless', 'dhcpv6-stateless'),
|
||||
):
|
||||
kwargs = {'ipv6_ra_mode': ra_mode,
|
||||
'ipv6_address_mode': add_mode}
|
||||
kwargs_dhcp = {'ipv6_address_mode': 'dhcpv6-stateful'}
|
||||
if order == "slaac_first":
|
||||
subnet_slaac = self.create_subnet(self.network, **kwargs)
|
||||
subnet_dhcp = self.create_subnet(
|
||||
self.network, **kwargs_dhcp)
|
||||
else:
|
||||
subnet_dhcp = self.create_subnet(
|
||||
self.network, **kwargs_dhcp)
|
||||
subnet_slaac = self.create_subnet(self.network, **kwargs)
|
||||
port_mac = data_utils.rand_mac_address()
|
||||
dhcp_ip = subnet_dhcp["allocation_pools"][0]["start"]
|
||||
eui_ip = data_utils.get_ipv6_addr_by_EUI64(
|
||||
subnet_slaac['cidr'],
|
||||
port_mac
|
||||
).format()
|
||||
# TODO(sergsh): remove this when 1219795 is fixed
|
||||
dhcp_ip = [dhcp_ip, (netaddr.IPAddress(dhcp_ip) + 1).format()]
|
||||
port = self.create_port(self.network, mac_address=port_mac)
|
||||
real_ips = dict([(k['subnet_id'], k['ip_address'])
|
||||
for k in port['fixed_ips']])
|
||||
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
|
||||
for sub in subnet_dhcp,
|
||||
subnet_slaac]
|
||||
self.client.delete_port(port['id'])
|
||||
self.ports.pop()
|
||||
body = self.client.list_ports()
|
||||
ports_id_list = [i['id'] for i in body['ports']]
|
||||
self.assertNotIn(port['id'], ports_id_list)
|
||||
self._clean_network()
|
||||
self.assertEqual(real_eui_ip,
|
||||
eui_ip,
|
||||
'Real IP is {0}, but shall be {1}'.format(
|
||||
real_eui_ip,
|
||||
eui_ip))
|
||||
self.assertIn(
|
||||
real_dhcp_ip, dhcp_ip,
|
||||
'Real IP is {0}, but shall be one from {1}'.format(
|
||||
real_dhcp_ip,
|
||||
str(dhcp_ip)))
|
||||
|
||||
@test.idempotent_id('4256c61d-c538-41ea-9147-3c450c36669e')
|
||||
def test_dhcpv6_64_subnets(self):
|
||||
"""When one IPv6 subnet configured with dnsmasq SLAAC or DHCP stateless
|
||||
and other IPv4 is with DHCP of IPv4, port shall receive EUI-64 IP
|
||||
addresses from first subnet and IPv4 DHCP address from second one.
|
||||
Order of subnet creating should be unimportant.
|
||||
"""
|
||||
for order in ("slaac_first", "dhcp_first"):
|
||||
for ra_mode, add_mode in (
|
||||
('slaac', 'slaac'),
|
||||
('dhcpv6-stateless', 'dhcpv6-stateless'),
|
||||
):
|
||||
kwargs = {'ipv6_ra_mode': ra_mode,
|
||||
'ipv6_address_mode': add_mode}
|
||||
if order == "slaac_first":
|
||||
subnet_slaac = self.create_subnet(self.network, **kwargs)
|
||||
subnet_dhcp = self.create_subnet(
|
||||
self.network, ip_version=4)
|
||||
else:
|
||||
subnet_dhcp = self.create_subnet(
|
||||
self.network, ip_version=4)
|
||||
subnet_slaac = self.create_subnet(self.network, **kwargs)
|
||||
port_mac = data_utils.rand_mac_address()
|
||||
dhcp_ip = subnet_dhcp["allocation_pools"][0]["start"]
|
||||
eui_ip = data_utils.get_ipv6_addr_by_EUI64(
|
||||
subnet_slaac['cidr'],
|
||||
port_mac
|
||||
).format()
|
||||
# TODO(sergsh): remove this when 1219795 is fixed
|
||||
dhcp_ip = [dhcp_ip, (netaddr.IPAddress(dhcp_ip) + 1).format()]
|
||||
port = self.create_port(self.network, mac_address=port_mac)
|
||||
real_ips = dict([(k['subnet_id'], k['ip_address'])
|
||||
for k in port['fixed_ips']])
|
||||
real_dhcp_ip, real_eui_ip = [real_ips[sub['id']]
|
||||
for sub in subnet_dhcp,
|
||||
subnet_slaac]
|
||||
self._clean_network()
|
||||
self.assertTrue({real_eui_ip,
|
||||
real_dhcp_ip}.issubset([eui_ip] + dhcp_ip))
|
||||
self.assertEqual(real_eui_ip,
|
||||
eui_ip,
|
||||
'Real IP is {0}, but shall be {1}'.format(
|
||||
real_eui_ip,
|
||||
eui_ip))
|
||||
self.assertIn(
|
||||
real_dhcp_ip, dhcp_ip,
|
||||
'Real IP is {0}, but shall be one from {1}'.format(
|
||||
real_dhcp_ip,
|
||||
str(dhcp_ip)))
|
||||
|
||||
@test.idempotent_id('4ab211a0-276f-4552-9070-51e27f58fecf')
|
||||
def test_dhcp_stateful(self):
|
||||
"""With all options below, DHCPv6 shall allocate first
|
||||
address from subnet pool to port.
|
||||
"""
|
||||
for ra_mode, add_mode in (
|
||||
('dhcpv6-stateful', 'dhcpv6-stateful'),
|
||||
('dhcpv6-stateful', None),
|
||||
(None, 'dhcpv6-stateful'),
|
||||
):
|
||||
kwargs = {'ipv6_ra_mode': ra_mode,
|
||||
'ipv6_address_mode': add_mode}
|
||||
kwargs = {k: v for k, v in kwargs.iteritems() if v}
|
||||
subnet = self.create_subnet(self.network, **kwargs)
|
||||
port = self.create_port(self.network)
|
||||
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
|
||||
dhcp_ip = subnet["allocation_pools"][0]["start"]
|
||||
# TODO(sergsh): remove this when 1219795 is fixed
|
||||
dhcp_ip = [dhcp_ip, (netaddr.IPAddress(dhcp_ip) + 1).format()]
|
||||
self._clean_network()
|
||||
self.assertIn(
|
||||
port_ip, dhcp_ip,
|
||||
'Real IP is {0}, but shall be one from {1}'.format(
|
||||
port_ip,
|
||||
str(dhcp_ip)))
|
||||
|
||||
@test.idempotent_id('51a5e97f-f02e-4e4e-9a17-a69811d300e3')
|
||||
def test_dhcp_stateful_fixedips(self):
|
||||
"""With all options below, port shall be able to get
|
||||
requested IP from fixed IP range not depending on
|
||||
DHCP stateful (not SLAAC!) settings configured.
|
||||
"""
|
||||
for ra_mode, add_mode in (
|
||||
('dhcpv6-stateful', 'dhcpv6-stateful'),
|
||||
('dhcpv6-stateful', None),
|
||||
(None, 'dhcpv6-stateful'),
|
||||
):
|
||||
kwargs = {'ipv6_ra_mode': ra_mode,
|
||||
'ipv6_address_mode': add_mode}
|
||||
kwargs = {k: v for k, v in kwargs.iteritems() if v}
|
||||
subnet = self.create_subnet(self.network, **kwargs)
|
||||
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
|
||||
subnet["allocation_pools"][0]["end"])
|
||||
ip = netaddr.IPAddress(random.randrange(ip_range.first,
|
||||
ip_range.last)).format()
|
||||
port = self.create_port(self.network,
|
||||
fixed_ips=[{'subnet_id': subnet['id'],
|
||||
'ip_address': ip}])
|
||||
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
|
||||
self._clean_network()
|
||||
self.assertEqual(port_ip, ip,
|
||||
("Port IP %s is not as fixed IP from "
|
||||
"port create request: %s") % (
|
||||
port_ip, ip))
|
||||
|
||||
@test.idempotent_id('98244d88-d990-4570-91d4-6b25d70d08af')
|
||||
def test_dhcp_stateful_fixedips_outrange(self):
|
||||
"""When port gets IP address from fixed IP range it
|
||||
shall be checked if it's from subnets range.
|
||||
"""
|
||||
kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
|
||||
'ipv6_address_mode': 'dhcpv6-stateful'}
|
||||
subnet = self.create_subnet(self.network, **kwargs)
|
||||
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
|
||||
subnet["allocation_pools"][0]["end"])
|
||||
ip = netaddr.IPAddress(random.randrange(
|
||||
ip_range.last + 1, ip_range.last + 10)).format()
|
||||
self.assertRaises(lib_exc.BadRequest,
|
||||
self.create_port,
|
||||
self.network,
|
||||
fixed_ips=[{'subnet_id': subnet['id'],
|
||||
'ip_address': ip}])
|
||||
|
||||
@test.idempotent_id('57b8302b-cba9-4fbb-8835-9168df029051')
|
||||
def test_dhcp_stateful_fixedips_duplicate(self):
|
||||
"""When port gets IP address from fixed IP range it
|
||||
shall be checked if it's not duplicate.
|
||||
"""
|
||||
kwargs = {'ipv6_ra_mode': 'dhcpv6-stateful',
|
||||
'ipv6_address_mode': 'dhcpv6-stateful'}
|
||||
subnet = self.create_subnet(self.network, **kwargs)
|
||||
ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"],
|
||||
subnet["allocation_pools"][0]["end"])
|
||||
ip = netaddr.IPAddress(random.randrange(
|
||||
ip_range.first, ip_range.last)).format()
|
||||
self.create_port(self.network,
|
||||
fixed_ips=[
|
||||
{'subnet_id': subnet['id'],
|
||||
'ip_address': ip}])
|
||||
self.assertRaisesRegexp(lib_exc.Conflict,
|
||||
"object with that identifier already exists",
|
||||
self.create_port,
|
||||
self.network,
|
||||
fixed_ips=[{'subnet_id': subnet['id'],
|
||||
'ip_address': ip}])
|
||||
|
||||
def _create_subnet_router(self, kwargs):
|
||||
subnet = self.create_subnet(self.network, **kwargs)
|
||||
router = self.create_router(
|
||||
router_name=data_utils.rand_name("routerv6-"),
|
||||
admin_state_up=True)
|
||||
port = self.create_router_interface(router['id'],
|
||||
subnet['id'])
|
||||
body = self.client.show_port(port['port_id'])
|
||||
return subnet, body['port']
|
||||
|
||||
@test.idempotent_id('e98f65db-68f4-4330-9fea-abd8c5192d4d')
|
||||
def test_dhcp_stateful_router(self):
|
||||
"""With all options below the router interface shall
|
||||
receive DHCPv6 IP address from allocation pool.
|
||||
"""
|
||||
for ra_mode, add_mode in (
|
||||
('dhcpv6-stateful', 'dhcpv6-stateful'),
|
||||
('dhcpv6-stateful', None),
|
||||
):
|
||||
kwargs = {'ipv6_ra_mode': ra_mode,
|
||||
'ipv6_address_mode': add_mode}
|
||||
kwargs = {k: v for k, v in kwargs.iteritems() if v}
|
||||
subnet, port = self._create_subnet_router(kwargs)
|
||||
port_ip = next(iter(port['fixed_ips']), None)['ip_address']
|
||||
self._clean_network()
|
||||
self.assertEqual(port_ip, subnet['gateway_ip'],
|
||||
("Port IP %s is not as first IP from "
|
||||
"subnets allocation pool: %s") % (
|
||||
port_ip, subnet['gateway_ip']))
|
||||
|
||||
def tearDown(self):
|
||||
self._clean_network()
|
||||
super(NetworksTestDHCPv6, self).tearDown()
|
75
neutron/tests/tempest/api/network/test_extensions.py
Normal file
75
neutron/tests/tempest/api/network/test_extensions.py
Normal file
@ -0,0 +1,75 @@
|
||||
# Copyright 2013 OpenStack, Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class ExtensionsTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
List all available extensions
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the following
|
||||
options are defined in the [network] section of etc/tempest.conf:
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ExtensionsTestJSON, cls).resource_setup()
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('ef28c7e6-e646-4979-9d67-deb207bc5564')
|
||||
def test_list_show_extensions(self):
|
||||
# List available extensions for the tenant
|
||||
expected_alias = ['security-group', 'l3_agent_scheduler',
|
||||
'ext-gw-mode', 'binding', 'quotas',
|
||||
'agent', 'dhcp_agent_scheduler', 'provider',
|
||||
'router', 'extraroute', 'external-net',
|
||||
'allowed-address-pairs', 'extra_dhcp_opt']
|
||||
expected_alias = [ext for ext in expected_alias if
|
||||
test.is_extension_enabled(ext, 'network')]
|
||||
actual_alias = list()
|
||||
extensions = self.client.list_extensions()
|
||||
list_extensions = extensions['extensions']
|
||||
# Show and verify the details of the available extensions
|
||||
for ext in list_extensions:
|
||||
ext_name = ext['name']
|
||||
ext_alias = ext['alias']
|
||||
actual_alias.append(ext['alias'])
|
||||
ext_details = self.client.show_extension(ext_alias)
|
||||
ext_details = ext_details['extension']
|
||||
|
||||
self.assertIsNotNone(ext_details)
|
||||
self.assertIn('updated', ext_details.keys())
|
||||
self.assertIn('name', ext_details.keys())
|
||||
self.assertIn('description', ext_details.keys())
|
||||
self.assertIn('namespace', ext_details.keys())
|
||||
self.assertIn('links', ext_details.keys())
|
||||
self.assertIn('alias', ext_details.keys())
|
||||
self.assertEqual(ext_details['name'], ext_name)
|
||||
self.assertEqual(ext_details['alias'], ext_alias)
|
||||
self.assertEqual(ext_details, ext)
|
||||
# Verify if expected extensions are present in the actual list
|
||||
# of extensions returned, but only for those that have been
|
||||
# enabled via configuration
|
||||
for e in expected_alias:
|
||||
if test.is_extension_enabled(e, 'network'):
|
||||
self.assertIn(e, actual_alias)
|
100
neutron/tests/tempest/api/network/test_extra_dhcp_options.py
Normal file
100
neutron/tests/tempest/api/network/test_extra_dhcp_options.py
Normal file
@ -0,0 +1,100 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class ExtraDHCPOptionsTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations with the Extra DHCP Options Neutron API
|
||||
extension:
|
||||
|
||||
port create
|
||||
port list
|
||||
port show
|
||||
port update
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the Extra
|
||||
DHCP Options extension is enabled in the [network-feature-enabled]
|
||||
section of etc/tempest.conf
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ExtraDHCPOptionsTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('extra_dhcp_opt', 'network'):
|
||||
msg = "Extra DHCP Options extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.port = cls.create_port(cls.network)
|
||||
cls.ip_tftp = ('123.123.123.123' if cls._ip_version == 4
|
||||
else '2015::dead')
|
||||
cls.ip_server = ('123.123.123.45' if cls._ip_version == 4
|
||||
else '2015::badd')
|
||||
cls.extra_dhcp_opts = [
|
||||
{'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'},
|
||||
{'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'},
|
||||
{'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'}
|
||||
]
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
|
||||
def test_create_list_port_with_extra_dhcp_options(self):
|
||||
# Create a port with Extra DHCP Options
|
||||
body = self.client.create_port(
|
||||
network_id=self.network['id'],
|
||||
extra_dhcp_opts=self.extra_dhcp_opts)
|
||||
port_id = body['port']['id']
|
||||
self.addCleanup(self.client.delete_port, port_id)
|
||||
|
||||
# Confirm port created has Extra DHCP Options
|
||||
body = self.client.list_ports()
|
||||
ports = body['ports']
|
||||
port = [p for p in ports if p['id'] == port_id]
|
||||
self.assertTrue(port)
|
||||
self._confirm_extra_dhcp_options(port[0], self.extra_dhcp_opts)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('9a6aebf4-86ee-4f47-b07a-7f7232c55607')
|
||||
def test_update_show_port_with_extra_dhcp_options(self):
|
||||
# Update port with extra dhcp options
|
||||
name = data_utils.rand_name('new-port-name')
|
||||
body = self.client.update_port(
|
||||
self.port['id'],
|
||||
name=name,
|
||||
extra_dhcp_opts=self.extra_dhcp_opts)
|
||||
# Confirm extra dhcp options were added to the port
|
||||
body = self.client.show_port(self.port['id'])
|
||||
self._confirm_extra_dhcp_options(body['port'], self.extra_dhcp_opts)
|
||||
|
||||
def _confirm_extra_dhcp_options(self, port, extra_dhcp_opts):
|
||||
retrieved = port['extra_dhcp_opts']
|
||||
self.assertEqual(len(retrieved), len(extra_dhcp_opts))
|
||||
for retrieved_option in retrieved:
|
||||
for option in extra_dhcp_opts:
|
||||
if (retrieved_option['opt_value'] == option['opt_value'] and
|
||||
retrieved_option['opt_name'] == option['opt_name']):
|
||||
break
|
||||
else:
|
||||
self.fail('Extra DHCP option not found in port %s' %
|
||||
str(retrieved_option))
|
||||
|
||||
|
||||
class ExtraDHCPOptionsIpV6TestJSON(ExtraDHCPOptionsTestJSON):
|
||||
_ip_version = 6
|
219
neutron/tests/tempest/api/network/test_floating_ips.py
Normal file
219
neutron/tests/tempest/api/network/test_floating_ips.py
Normal file
@ -0,0 +1,219 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class FloatingIPTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Quantum API using the REST client for
|
||||
Neutron:
|
||||
|
||||
Create a Floating IP
|
||||
Update a Floating IP
|
||||
Delete a Floating IP
|
||||
List all Floating IPs
|
||||
Show Floating IP details
|
||||
Associate a Floating IP with a port and then delete that port
|
||||
Associate a Floating IP with a port and then with a port on another
|
||||
router
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the following
|
||||
options are defined in the [network] section of etc/tempest.conf:
|
||||
|
||||
public_network_id which is the id for the external network present
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(FloatingIPTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('router', 'network'):
|
||||
msg = "router extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.ext_net_id = CONF.network.public_network_id
|
||||
|
||||
# Create network, subnet, router and add interface
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.router = cls.create_router(data_utils.rand_name('router-'),
|
||||
external_network_id=cls.ext_net_id)
|
||||
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
|
||||
cls.port = list()
|
||||
# Create two ports one each for Creation and Updating of floatingIP
|
||||
for i in range(2):
|
||||
cls.create_port(cls.network)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e8718')
|
||||
def test_create_list_show_update_delete_floating_ip(self):
|
||||
# Creates a floating IP
|
||||
body = self.client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id,
|
||||
port_id=self.ports[0]['id'])
|
||||
created_floating_ip = body['floatingip']
|
||||
self.addCleanup(self.client.delete_floatingip,
|
||||
created_floating_ip['id'])
|
||||
self.assertIsNotNone(created_floating_ip['id'])
|
||||
self.assertIsNotNone(created_floating_ip['tenant_id'])
|
||||
self.assertIsNotNone(created_floating_ip['floating_ip_address'])
|
||||
self.assertEqual(created_floating_ip['port_id'], self.ports[0]['id'])
|
||||
self.assertEqual(created_floating_ip['floating_network_id'],
|
||||
self.ext_net_id)
|
||||
self.assertIn(created_floating_ip['fixed_ip_address'],
|
||||
[ip['ip_address'] for ip in self.ports[0]['fixed_ips']])
|
||||
# Verifies the details of a floating_ip
|
||||
floating_ip = self.client.show_floatingip(created_floating_ip['id'])
|
||||
shown_floating_ip = floating_ip['floatingip']
|
||||
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
|
||||
self.assertEqual(shown_floating_ip['floating_network_id'],
|
||||
self.ext_net_id)
|
||||
self.assertEqual(shown_floating_ip['tenant_id'],
|
||||
created_floating_ip['tenant_id'])
|
||||
self.assertEqual(shown_floating_ip['floating_ip_address'],
|
||||
created_floating_ip['floating_ip_address'])
|
||||
self.assertEqual(shown_floating_ip['port_id'], self.ports[0]['id'])
|
||||
|
||||
# Verify the floating ip exists in the list of all floating_ips
|
||||
floating_ips = self.client.list_floatingips()
|
||||
floatingip_id_list = list()
|
||||
for f in floating_ips['floatingips']:
|
||||
floatingip_id_list.append(f['id'])
|
||||
self.assertIn(created_floating_ip['id'], floatingip_id_list)
|
||||
# Associate floating IP to the other port
|
||||
floating_ip = self.client.update_floatingip(
|
||||
created_floating_ip['id'],
|
||||
port_id=self.ports[1]['id'])
|
||||
updated_floating_ip = floating_ip['floatingip']
|
||||
self.assertEqual(updated_floating_ip['port_id'], self.ports[1]['id'])
|
||||
self.assertEqual(updated_floating_ip['fixed_ip_address'],
|
||||
self.ports[1]['fixed_ips'][0]['ip_address'])
|
||||
self.assertEqual(updated_floating_ip['router_id'], self.router['id'])
|
||||
|
||||
# Disassociate floating IP from the port
|
||||
floating_ip = self.client.update_floatingip(
|
||||
created_floating_ip['id'],
|
||||
port_id=None)
|
||||
updated_floating_ip = floating_ip['floatingip']
|
||||
self.assertIsNone(updated_floating_ip['port_id'])
|
||||
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
|
||||
self.assertIsNone(updated_floating_ip['router_id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
|
||||
def test_floating_ip_delete_port(self):
|
||||
# Create a floating IP
|
||||
body = self.client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id)
|
||||
created_floating_ip = body['floatingip']
|
||||
self.addCleanup(self.client.delete_floatingip,
|
||||
created_floating_ip['id'])
|
||||
# Create a port
|
||||
port = self.client.create_port(network_id=self.network['id'])
|
||||
created_port = port['port']
|
||||
floating_ip = self.client.update_floatingip(
|
||||
created_floating_ip['id'],
|
||||
port_id=created_port['id'])
|
||||
# Delete port
|
||||
self.client.delete_port(created_port['id'])
|
||||
# Verifies the details of the floating_ip
|
||||
floating_ip = self.client.show_floatingip(created_floating_ip['id'])
|
||||
shown_floating_ip = floating_ip['floatingip']
|
||||
# Confirm the fields are back to None
|
||||
self.assertEqual(shown_floating_ip['id'], created_floating_ip['id'])
|
||||
self.assertIsNone(shown_floating_ip['port_id'])
|
||||
self.assertIsNone(shown_floating_ip['fixed_ip_address'])
|
||||
self.assertIsNone(shown_floating_ip['router_id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('1bb2f731-fe5a-4b8c-8409-799ade1bed4d')
|
||||
def test_floating_ip_update_different_router(self):
|
||||
# Associate a floating IP to a port on a router
|
||||
body = self.client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id,
|
||||
port_id=self.ports[1]['id'])
|
||||
created_floating_ip = body['floatingip']
|
||||
self.addCleanup(self.client.delete_floatingip,
|
||||
created_floating_ip['id'])
|
||||
self.assertEqual(created_floating_ip['router_id'], self.router['id'])
|
||||
network2 = self.create_network()
|
||||
subnet2 = self.create_subnet(network2)
|
||||
router2 = self.create_router(data_utils.rand_name('router-'),
|
||||
external_network_id=self.ext_net_id)
|
||||
self.create_router_interface(router2['id'], subnet2['id'])
|
||||
port_other_router = self.create_port(network2)
|
||||
# Associate floating IP to the other port on another router
|
||||
floating_ip = self.client.update_floatingip(
|
||||
created_floating_ip['id'],
|
||||
port_id=port_other_router['id'])
|
||||
updated_floating_ip = floating_ip['floatingip']
|
||||
self.assertEqual(updated_floating_ip['router_id'], router2['id'])
|
||||
self.assertEqual(updated_floating_ip['port_id'],
|
||||
port_other_router['id'])
|
||||
self.assertIsNotNone(updated_floating_ip['fixed_ip_address'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5')
|
||||
def test_create_floating_ip_specifying_a_fixed_ip_address(self):
|
||||
body = self.client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id,
|
||||
port_id=self.ports[1]['id'],
|
||||
fixed_ip_address=self.ports[1]['fixed_ips'][0]['ip_address'])
|
||||
created_floating_ip = body['floatingip']
|
||||
self.addCleanup(self.client.delete_floatingip,
|
||||
created_floating_ip['id'])
|
||||
self.assertIsNotNone(created_floating_ip['id'])
|
||||
self.assertEqual(created_floating_ip['fixed_ip_address'],
|
||||
self.ports[1]['fixed_ips'][0]['ip_address'])
|
||||
floating_ip = self.client.update_floatingip(
|
||||
created_floating_ip['id'],
|
||||
port_id=None)
|
||||
self.assertIsNone(floating_ip['floatingip']['port_id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('45c4c683-ea97-41ef-9c51-5e9802f2f3d7')
|
||||
def test_create_update_floatingip_with_port_multiple_ip_address(self):
|
||||
# Find out ips that can be used for tests
|
||||
ips = list(netaddr.IPNetwork(self.subnet['cidr']))
|
||||
list_ips = [str(ip) for ip in ips[-3:-1]]
|
||||
fixed_ips = [{'ip_address': list_ips[0]}, {'ip_address': list_ips[1]}]
|
||||
# Create port
|
||||
body = self.client.create_port(network_id=self.network['id'],
|
||||
fixed_ips=fixed_ips)
|
||||
port = body['port']
|
||||
self.addCleanup(self.client.delete_port, port['id'])
|
||||
# Create floating ip
|
||||
body = self.client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id,
|
||||
port_id=port['id'],
|
||||
fixed_ip_address=list_ips[0])
|
||||
floating_ip = body['floatingip']
|
||||
self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
|
||||
self.assertIsNotNone(floating_ip['id'])
|
||||
self.assertEqual(floating_ip['fixed_ip_address'], list_ips[0])
|
||||
# Update floating ip
|
||||
body = self.client.update_floatingip(floating_ip['id'],
|
||||
port_id=port['id'],
|
||||
fixed_ip_address=list_ips[1])
|
||||
update_floating_ip = body['floatingip']
|
||||
self.assertEqual(update_floating_ip['fixed_ip_address'],
|
||||
list_ips[1])
|
@ -0,0 +1,82 @@
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class FloatingIPNegativeTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Test the following negative operations for floating ips:
|
||||
|
||||
Create floatingip with a port that is unreachable to external network
|
||||
Create floatingip in private network
|
||||
Associate floatingip with port that is unreachable to external network
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(FloatingIPNegativeTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('router', 'network'):
|
||||
msg = "router extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.ext_net_id = CONF.network.public_network_id
|
||||
# Create a network with a subnet connected to a router.
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.router = cls.create_router(data_utils.rand_name('router'))
|
||||
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
|
||||
cls.port = cls.create_port(cls.network)
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('22996ea8-4a81-4b27-b6e1-fa5df92fa5e8')
|
||||
def test_create_floatingip_with_port_ext_net_unreachable(self):
|
||||
self.assertRaises(lib_exc.NotFound, self.client.create_floatingip,
|
||||
floating_network_id=self.ext_net_id,
|
||||
port_id=self.port['id'],
|
||||
fixed_ip_address=self.port['fixed_ips'][0]
|
||||
['ip_address'])
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('50b9aeb4-9f0b-48ee-aa31-fa955a48ff54')
|
||||
def test_create_floatingip_in_private_network(self):
|
||||
self.assertRaises(lib_exc.BadRequest,
|
||||
self.client.create_floatingip,
|
||||
floating_network_id=self.network['id'],
|
||||
port_id=self.port['id'],
|
||||
fixed_ip_address=self.port['fixed_ips'][0]
|
||||
['ip_address'])
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('6b3b8797-6d43-4191-985c-c48b773eb429')
|
||||
def test_associate_floatingip_port_ext_net_unreachable(self):
|
||||
# Create floating ip
|
||||
body = self.client.create_floatingip(
|
||||
floating_network_id=self.ext_net_id)
|
||||
floating_ip = body['floatingip']
|
||||
self.addCleanup(self.client.delete_floatingip, floating_ip['id'])
|
||||
# Associate floating IP to the other port
|
||||
self.assertRaises(lib_exc.NotFound, self.client.update_floatingip,
|
||||
floating_ip['id'], port_id=self.port['id'],
|
||||
fixed_ip_address=self.port['fixed_ips'][0]
|
||||
['ip_address'])
|
325
neutron/tests/tempest/api/network/test_fwaas_extensions.py
Normal file
325
neutron/tests/tempest/api/network/test_fwaas_extensions.py
Normal file
@ -0,0 +1,325 @@
|
||||
# Copyright 2014 NEC Corporation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class FWaaSExtensionTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
List firewall rules
|
||||
Create firewall rule
|
||||
Update firewall rule
|
||||
Delete firewall rule
|
||||
Show firewall rule
|
||||
List firewall policies
|
||||
Create firewall policy
|
||||
Update firewall policy
|
||||
Insert firewall rule to policy
|
||||
Remove firewall rule from policy
|
||||
Insert firewall rule after/before rule in policy
|
||||
Update firewall policy audited attribute
|
||||
Delete firewall policy
|
||||
Show firewall policy
|
||||
List firewall
|
||||
Create firewall
|
||||
Update firewall
|
||||
Delete firewall
|
||||
Show firewall
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(FWaaSExtensionTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('fwaas', 'network'):
|
||||
msg = "FWaaS Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.fw_rule = cls.create_firewall_rule("allow", "tcp")
|
||||
cls.fw_policy = cls.create_firewall_policy()
|
||||
|
||||
def _try_delete_policy(self, policy_id):
|
||||
# delete policy, if it exists
|
||||
try:
|
||||
self.client.delete_firewall_policy(policy_id)
|
||||
# if policy is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
def _try_delete_rule(self, rule_id):
|
||||
# delete rule, if it exists
|
||||
try:
|
||||
self.client.delete_firewall_rule(rule_id)
|
||||
# if rule is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
def _try_delete_firewall(self, fw_id):
|
||||
# delete firewall, if it exists
|
||||
try:
|
||||
self.client.delete_firewall(fw_id)
|
||||
# if firewall is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
self.client.wait_for_resource_deletion('firewall', fw_id)
|
||||
|
||||
def _wait_until_ready(self, fw_id):
|
||||
target_states = ('ACTIVE', 'CREATED')
|
||||
|
||||
def _wait():
|
||||
firewall = self.client.show_firewall(fw_id)
|
||||
firewall = firewall['firewall']
|
||||
return firewall['status'] in target_states
|
||||
|
||||
if not test.call_until_true(_wait, CONF.network.build_timeout,
|
||||
CONF.network.build_interval):
|
||||
m = ("Timed out waiting for firewall %s to reach %s state(s)" %
|
||||
(fw_id, target_states))
|
||||
raise exceptions.TimeoutException(m)
|
||||
|
||||
@test.idempotent_id('1b84cf01-9c09-4ce7-bc72-b15e39076468')
|
||||
def test_list_firewall_rules(self):
|
||||
# List firewall rules
|
||||
fw_rules = self.client.list_firewall_rules()
|
||||
fw_rules = fw_rules['firewall_rules']
|
||||
self.assertIn((self.fw_rule['id'],
|
||||
self.fw_rule['name'],
|
||||
self.fw_rule['action'],
|
||||
self.fw_rule['protocol'],
|
||||
self.fw_rule['ip_version'],
|
||||
self.fw_rule['enabled']),
|
||||
[(m['id'],
|
||||
m['name'],
|
||||
m['action'],
|
||||
m['protocol'],
|
||||
m['ip_version'],
|
||||
m['enabled']) for m in fw_rules])
|
||||
|
||||
@test.idempotent_id('563564f7-7077-4f5e-8cdc-51f37ae5a2b9')
|
||||
def test_create_update_delete_firewall_rule(self):
|
||||
# Create firewall rule
|
||||
body = self.client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="tcp")
|
||||
fw_rule_id = body['firewall_rule']['id']
|
||||
|
||||
# Update firewall rule
|
||||
body = self.client.update_firewall_rule(fw_rule_id,
|
||||
shared=True)
|
||||
self.assertTrue(body["firewall_rule"]['shared'])
|
||||
|
||||
# Delete firewall rule
|
||||
self.client.delete_firewall_rule(fw_rule_id)
|
||||
# Confirm deletion
|
||||
fw_rules = self.client.list_firewall_rules()
|
||||
self.assertNotIn(fw_rule_id,
|
||||
[m['id'] for m in fw_rules['firewall_rules']])
|
||||
|
||||
@test.idempotent_id('3ff8c08e-26ff-4034-ae48-810ed213a998')
|
||||
def test_show_firewall_rule(self):
|
||||
# show a created firewall rule
|
||||
fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
|
||||
for key, value in fw_rule['firewall_rule'].iteritems():
|
||||
self.assertEqual(self.fw_rule[key], value)
|
||||
|
||||
@test.idempotent_id('1086dd93-a4c0-4bbb-a1bd-6d4bc62c199f')
|
||||
def test_list_firewall_policies(self):
|
||||
fw_policies = self.client.list_firewall_policies()
|
||||
fw_policies = fw_policies['firewall_policies']
|
||||
self.assertIn((self.fw_policy['id'],
|
||||
self.fw_policy['name'],
|
||||
self.fw_policy['firewall_rules']),
|
||||
[(m['id'],
|
||||
m['name'],
|
||||
m['firewall_rules']) for m in fw_policies])
|
||||
|
||||
@test.idempotent_id('bbf37b6c-498c-421e-9c95-45897d3ed775')
|
||||
def test_create_update_delete_firewall_policy(self):
|
||||
# Create firewall policy
|
||||
body = self.client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"))
|
||||
fw_policy_id = body['firewall_policy']['id']
|
||||
self.addCleanup(self._try_delete_policy, fw_policy_id)
|
||||
|
||||
# Update firewall policy
|
||||
body = self.client.update_firewall_policy(fw_policy_id,
|
||||
shared=True,
|
||||
name="updated_policy")
|
||||
updated_fw_policy = body["firewall_policy"]
|
||||
self.assertTrue(updated_fw_policy['shared'])
|
||||
self.assertEqual("updated_policy", updated_fw_policy['name'])
|
||||
|
||||
# Delete firewall policy
|
||||
self.client.delete_firewall_policy(fw_policy_id)
|
||||
# Confirm deletion
|
||||
fw_policies = self.client.list_firewall_policies()
|
||||
fw_policies = fw_policies['firewall_policies']
|
||||
self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
|
||||
|
||||
@test.idempotent_id('1df59b3a-517e-41d4-96f6-fc31cf4ecff2')
|
||||
def test_show_firewall_policy(self):
|
||||
# show a created firewall policy
|
||||
fw_policy = self.client.show_firewall_policy(self.fw_policy['id'])
|
||||
fw_policy = fw_policy['firewall_policy']
|
||||
for key, value in fw_policy.iteritems():
|
||||
self.assertEqual(self.fw_policy[key], value)
|
||||
|
||||
@test.idempotent_id('02082a03-3cdd-4789-986a-1327dd80bfb7')
|
||||
def test_create_show_delete_firewall(self):
|
||||
# Create tenant network resources required for an ACTIVE firewall
|
||||
network = self.create_network()
|
||||
subnet = self.create_subnet(network)
|
||||
router = self.create_router(
|
||||
data_utils.rand_name('router-'),
|
||||
admin_state_up=True)
|
||||
self.client.add_router_interface_with_subnet_id(
|
||||
router['id'], subnet['id'])
|
||||
|
||||
# Create firewall
|
||||
body = self.client.create_firewall(
|
||||
name=data_utils.rand_name("firewall"),
|
||||
firewall_policy_id=self.fw_policy['id'])
|
||||
created_firewall = body['firewall']
|
||||
firewall_id = created_firewall['id']
|
||||
self.addCleanup(self._try_delete_firewall, firewall_id)
|
||||
|
||||
# Wait for the firewall resource to become ready
|
||||
self._wait_until_ready(firewall_id)
|
||||
|
||||
# show a created firewall
|
||||
firewall = self.client.show_firewall(firewall_id)
|
||||
firewall = firewall['firewall']
|
||||
|
||||
for key, value in firewall.iteritems():
|
||||
if key == 'status':
|
||||
continue
|
||||
self.assertEqual(created_firewall[key], value)
|
||||
|
||||
# list firewall
|
||||
firewalls = self.client.list_firewalls()
|
||||
firewalls = firewalls['firewalls']
|
||||
self.assertIn((created_firewall['id'],
|
||||
created_firewall['name'],
|
||||
created_firewall['firewall_policy_id']),
|
||||
[(m['id'],
|
||||
m['name'],
|
||||
m['firewall_policy_id']) for m in firewalls])
|
||||
|
||||
# Delete firewall
|
||||
self.client.delete_firewall(firewall_id)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('53305b4b-9897-4e01-87c0-2ae386083180')
|
||||
def test_firewall_rule_insertion_position_removal_rule_from_policy(self):
|
||||
# Create firewall rule
|
||||
body = self.client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="tcp")
|
||||
fw_rule_id1 = body['firewall_rule']['id']
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id1)
|
||||
# Create firewall policy
|
||||
body = self.client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"))
|
||||
fw_policy_id = body['firewall_policy']['id']
|
||||
self.addCleanup(self._try_delete_policy, fw_policy_id)
|
||||
|
||||
# Insert rule to firewall policy
|
||||
self.client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id1, '', '')
|
||||
|
||||
# Verify insertion of rule in policy
|
||||
self.assertIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
|
||||
# Create another firewall rule
|
||||
body = self.client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="icmp")
|
||||
fw_rule_id2 = body['firewall_rule']['id']
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id2)
|
||||
|
||||
# Insert rule to firewall policy after the first rule
|
||||
self.client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id2, fw_rule_id1, '')
|
||||
|
||||
# Verify the posiition of rule after insertion
|
||||
fw_rule = self.client.show_firewall_rule(
|
||||
fw_rule_id2)
|
||||
|
||||
self.assertEqual(int(fw_rule['firewall_rule']['position']), 2)
|
||||
# Remove rule from the firewall policy
|
||||
self.client.remove_firewall_rule_from_policy(
|
||||
fw_policy_id, fw_rule_id2)
|
||||
# Insert rule to firewall policy before the first rule
|
||||
self.client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id2, '', fw_rule_id1)
|
||||
# Verify the posiition of rule after insertion
|
||||
fw_rule = self.client.show_firewall_rule(
|
||||
fw_rule_id2)
|
||||
self.assertEqual(int(fw_rule['firewall_rule']['position']), 1)
|
||||
# Remove rule from the firewall policy
|
||||
self.client.remove_firewall_rule_from_policy(
|
||||
fw_policy_id, fw_rule_id2)
|
||||
# Verify removal of rule from firewall policy
|
||||
self.assertNotIn(fw_rule_id2, self._get_list_fw_rule_ids(fw_policy_id))
|
||||
|
||||
# Remove rule from the firewall policy
|
||||
self.client.remove_firewall_rule_from_policy(
|
||||
fw_policy_id, fw_rule_id1)
|
||||
|
||||
# Verify removal of rule from firewall policy
|
||||
self.assertNotIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
|
||||
|
||||
def _get_list_fw_rule_ids(self, fw_policy_id):
|
||||
fw_policy = self.client.show_firewall_policy(
|
||||
fw_policy_id)
|
||||
return [ruleid for ruleid in fw_policy['firewall_policy']
|
||||
['firewall_rules']]
|
||||
|
||||
@test.idempotent_id('8515ca8a-0d2f-4298-b5ff-6f924e4587ca')
|
||||
def test_update_firewall_policy_audited_attribute(self):
|
||||
# Create firewall rule
|
||||
body = self.client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="icmp")
|
||||
fw_rule_id = body['firewall_rule']['id']
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id)
|
||||
# Create firewall policy
|
||||
body = self.client.create_firewall_policy(
|
||||
name=data_utils.rand_name('fw-policy'))
|
||||
fw_policy_id = body['firewall_policy']['id']
|
||||
self.addCleanup(self._try_delete_policy, fw_policy_id)
|
||||
self.assertFalse(body['firewall_policy']['audited'])
|
||||
# Update firewall policy audited attribute to ture
|
||||
self.client.update_firewall_policy(fw_policy_id,
|
||||
audited=True)
|
||||
# Insert Firewall rule to firewall policy
|
||||
self.client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id, '', '')
|
||||
body = self.client.show_firewall_policy(
|
||||
fw_policy_id)
|
||||
self.assertFalse(body['firewall_policy']['audited'])
|
453
neutron/tests/tempest/api/network/test_load_balancer.py
Normal file
453
neutron/tests/tempest/api/network/test_load_balancer.py
Normal file
@ -0,0 +1,453 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib import decorators
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class LoadBalancerTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
create vIP, and Pool
|
||||
show vIP
|
||||
list vIP
|
||||
update vIP
|
||||
delete vIP
|
||||
update pool
|
||||
delete pool
|
||||
show pool
|
||||
list pool
|
||||
health monitoring operations
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LoadBalancerTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('lbaas', 'network'):
|
||||
msg = "lbaas extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.network = cls.create_network()
|
||||
cls.name = cls.network['name']
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
pool_name = data_utils.rand_name('pool-')
|
||||
vip_name = data_utils.rand_name('vip-')
|
||||
cls.pool = cls.create_pool(pool_name, "ROUND_ROBIN",
|
||||
"HTTP", cls.subnet)
|
||||
cls.vip = cls.create_vip(name=vip_name,
|
||||
protocol="HTTP",
|
||||
protocol_port=80,
|
||||
subnet=cls.subnet,
|
||||
pool=cls.pool)
|
||||
cls.member = cls.create_member(80, cls.pool, cls._ip_version)
|
||||
cls.member_address = ("10.0.9.47" if cls._ip_version == 4
|
||||
else "2015::beef")
|
||||
cls.health_monitor = cls.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
Type="TCP",
|
||||
timeout=1)
|
||||
|
||||
def _check_list_with_filter(self, obj_name, attr_exceptions, **kwargs):
|
||||
create_obj = getattr(self.client, 'create_' + obj_name)
|
||||
delete_obj = getattr(self.client, 'delete_' + obj_name)
|
||||
list_objs = getattr(self.client, 'list_' + obj_name + 's')
|
||||
|
||||
body = create_obj(**kwargs)
|
||||
obj = body[obj_name]
|
||||
self.addCleanup(delete_obj, obj['id'])
|
||||
for key, value in obj.iteritems():
|
||||
# It is not relevant to filter by all arguments. That is why
|
||||
# there is a list of attr to except
|
||||
if key not in attr_exceptions:
|
||||
body = list_objs(**{key: value})
|
||||
objs = [v[key] for v in body[obj_name + 's']]
|
||||
self.assertIn(value, objs)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('c96dbfab-4a80-4e74-a535-e950b5bedd47')
|
||||
def test_list_vips(self):
|
||||
# Verify the vIP exists in the list of all vIPs
|
||||
body = self.client.list_vips()
|
||||
vips = body['vips']
|
||||
self.assertIn(self.vip['id'], [v['id'] for v in vips])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b8853f65-5089-4e69-befd-041a143427ff')
|
||||
def test_list_vips_with_filter(self):
|
||||
name = data_utils.rand_name('vip-')
|
||||
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
|
||||
lb_method="ROUND_ROBIN",
|
||||
protocol="HTTPS",
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.client.delete_pool, pool['id'])
|
||||
attr_exceptions = ['status', 'session_persistence',
|
||||
'status_description']
|
||||
self._check_list_with_filter(
|
||||
'vip', attr_exceptions, name=name, protocol="HTTPS",
|
||||
protocol_port=81, subnet_id=self.subnet['id'], pool_id=pool['id'],
|
||||
description=data_utils.rand_name('description-'),
|
||||
admin_state_up=False)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('27f56083-9af9-4a48-abe9-ca1bcc6c9035')
|
||||
def test_create_update_delete_pool_vip(self):
|
||||
# Creates a vip
|
||||
name = data_utils.rand_name('vip-')
|
||||
address = self.subnet['allocation_pools'][0]['end']
|
||||
body = self.client.create_pool(
|
||||
name=data_utils.rand_name("pool-"),
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
body = self.client.create_vip(name=name,
|
||||
protocol="HTTP",
|
||||
protocol_port=80,
|
||||
subnet_id=self.subnet['id'],
|
||||
pool_id=pool['id'],
|
||||
address=address)
|
||||
vip = body['vip']
|
||||
vip_id = vip['id']
|
||||
# Confirm VIP's address correctness with a show
|
||||
body = self.client.show_vip(vip_id)
|
||||
vip = body['vip']
|
||||
self.assertEqual(address, vip['address'])
|
||||
# Verification of vip update
|
||||
new_name = "New_vip"
|
||||
new_description = "New description"
|
||||
persistence_type = "HTTP_COOKIE"
|
||||
update_data = {"session_persistence": {
|
||||
"type": persistence_type}}
|
||||
body = self.client.update_vip(vip_id,
|
||||
name=new_name,
|
||||
description=new_description,
|
||||
connection_limit=10,
|
||||
admin_state_up=False,
|
||||
**update_data)
|
||||
updated_vip = body['vip']
|
||||
self.assertEqual(new_name, updated_vip['name'])
|
||||
self.assertEqual(new_description, updated_vip['description'])
|
||||
self.assertEqual(10, updated_vip['connection_limit'])
|
||||
self.assertFalse(updated_vip['admin_state_up'])
|
||||
self.assertEqual(persistence_type,
|
||||
updated_vip['session_persistence']['type'])
|
||||
self.client.delete_vip(vip['id'])
|
||||
self.client.wait_for_resource_deletion('vip', vip['id'])
|
||||
# Verification of pool update
|
||||
new_name = "New_pool"
|
||||
body = self.client.update_pool(pool['id'],
|
||||
name=new_name,
|
||||
description="new_description",
|
||||
lb_method='LEAST_CONNECTIONS')
|
||||
updated_pool = body['pool']
|
||||
self.assertEqual(new_name, updated_pool['name'])
|
||||
self.assertEqual('new_description', updated_pool['description'])
|
||||
self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
|
||||
self.client.delete_pool(pool['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('0435a95e-1d19-4d90-9e9f-3b979e9ad089')
|
||||
def test_show_vip(self):
|
||||
# Verifies the details of a vip
|
||||
body = self.client.show_vip(self.vip['id'])
|
||||
vip = body['vip']
|
||||
for key, value in vip.iteritems():
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.vip[key], value)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('6e7a7d31-8451-456d-b24a-e50479ce42a7')
|
||||
def test_show_pool(self):
|
||||
# Here we need to new pool without any dependence with vips
|
||||
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.client.delete_pool, pool['id'])
|
||||
# Verifies the details of a pool
|
||||
body = self.client.show_pool(pool['id'])
|
||||
shown_pool = body['pool']
|
||||
for key, value in pool.iteritems():
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(value, shown_pool[key])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d1ab1ffa-e06a-487f-911f-56418cb27727')
|
||||
def test_list_pools(self):
|
||||
# Verify the pool exists in the list of all pools
|
||||
body = self.client.list_pools()
|
||||
pools = body['pools']
|
||||
self.assertIn(self.pool['id'], [p['id'] for p in pools])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('27cc4c1a-caac-4273-b983-2acb4afaad4f')
|
||||
def test_list_pools_with_filters(self):
|
||||
attr_exceptions = ['status', 'vip_id', 'members', 'provider',
|
||||
'status_description']
|
||||
self._check_list_with_filter(
|
||||
'pool', attr_exceptions, name=data_utils.rand_name("pool-"),
|
||||
lb_method="ROUND_ROBIN", protocol="HTTPS",
|
||||
subnet_id=self.subnet['id'],
|
||||
description=data_utils.rand_name('description-'),
|
||||
admin_state_up=False)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('282d0dfd-5c3a-4c9b-b39c-c99782f39193')
|
||||
def test_list_members(self):
|
||||
# Verify the member exists in the list of all members
|
||||
body = self.client.list_members()
|
||||
members = body['members']
|
||||
self.assertIn(self.member['id'], [m['id'] for m in members])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('243b5126-24c6-4879-953e-7c7e32d8a57f')
|
||||
def test_list_members_with_filters(self):
|
||||
attr_exceptions = ['status', 'status_description']
|
||||
self._check_list_with_filter('member', attr_exceptions,
|
||||
address=self.member_address,
|
||||
protocol_port=80,
|
||||
pool_id=self.pool['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('fb833ee8-9e69-489f-b540-a409762b78b2')
|
||||
def test_create_update_delete_member(self):
|
||||
# Creates a member
|
||||
body = self.client.create_member(address=self.member_address,
|
||||
protocol_port=80,
|
||||
pool_id=self.pool['id'])
|
||||
member = body['member']
|
||||
# Verification of member update
|
||||
body = self.client.update_member(member['id'],
|
||||
admin_state_up=False)
|
||||
updated_member = body['member']
|
||||
self.assertFalse(updated_member['admin_state_up'])
|
||||
# Verification of member delete
|
||||
self.client.delete_member(member['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('893cd71f-a7dd-4485-b162-f6ab9a534914')
|
||||
def test_show_member(self):
|
||||
# Verifies the details of a member
|
||||
body = self.client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
for key, value in member.iteritems():
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.member[key], value)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('8e5822c5-68a4-4224-8d6c-a617741ebc2d')
|
||||
def test_list_health_monitors(self):
|
||||
# Verify the health monitor exists in the list of all health monitors
|
||||
body = self.client.list_health_monitors()
|
||||
health_monitors = body['health_monitors']
|
||||
self.assertIn(self.health_monitor['id'],
|
||||
[h['id'] for h in health_monitors])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('49bac58a-511c-4875-b794-366698211d25')
|
||||
def test_list_health_monitors_with_filters(self):
|
||||
attr_exceptions = ['status', 'status_description', 'pools']
|
||||
self._check_list_with_filter('health_monitor', attr_exceptions,
|
||||
delay=5, max_retries=4, type="TCP",
|
||||
timeout=2)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e8ce05c4-d554-4d1e-a257-ad32ce134bb5')
|
||||
def test_create_update_delete_health_monitor(self):
|
||||
# Creates a health_monitor
|
||||
body = self.client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type="TCP",
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
# Verification of health_monitor update
|
||||
body = (self.client.update_health_monitor
|
||||
(health_monitor['id'],
|
||||
admin_state_up=False))
|
||||
updated_health_monitor = body['health_monitor']
|
||||
self.assertFalse(updated_health_monitor['admin_state_up'])
|
||||
# Verification of health_monitor delete
|
||||
body = self.client.delete_health_monitor(health_monitor['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d3e1aebc-06c2-49b3-9816-942af54012eb')
|
||||
def test_create_health_monitor_http_type(self):
|
||||
hm_type = "HTTP"
|
||||
body = self.client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type=hm_type,
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
self.addCleanup(self.client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
self.assertEqual(hm_type, health_monitor['type'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('0eff9f67-90fb-4bb1-b4ed-c5fda99fff0c')
|
||||
def test_update_health_monitor_http_method(self):
|
||||
body = self.client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type="HTTP",
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
self.addCleanup(self.client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
body = (self.client.update_health_monitor
|
||||
(health_monitor['id'],
|
||||
http_method="POST",
|
||||
url_path="/home/user",
|
||||
expected_codes="290"))
|
||||
updated_health_monitor = body['health_monitor']
|
||||
self.assertEqual("POST", updated_health_monitor['http_method'])
|
||||
self.assertEqual("/home/user", updated_health_monitor['url_path'])
|
||||
self.assertEqual("290", updated_health_monitor['expected_codes'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('08e126ab-1407-483f-a22e-b11cc032ca7c')
|
||||
def test_show_health_monitor(self):
|
||||
# Verifies the details of a health_monitor
|
||||
body = self.client.show_health_monitor(self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
for key, value in health_monitor.iteritems():
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.health_monitor[key], value)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('87f7628e-8918-493d-af50-0602845dbb5b')
|
||||
def test_associate_disassociate_health_monitor_with_pool(self):
|
||||
# Verify that a health monitor can be associated with a pool
|
||||
self.client.associate_health_monitor_with_pool(
|
||||
self.health_monitor['id'], self.pool['id'])
|
||||
body = self.client.show_health_monitor(
|
||||
self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
self.assertIn(pool['id'],
|
||||
[p['pool_id'] for p in health_monitor['pools']])
|
||||
self.assertIn(health_monitor['id'], pool['health_monitors'])
|
||||
# Verify that a health monitor can be disassociated from a pool
|
||||
(self.client.disassociate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
body = self.client.show_health_monitor(
|
||||
self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
self.assertNotIn(health_monitor['id'], pool['health_monitors'])
|
||||
self.assertNotIn(pool['id'],
|
||||
[p['pool_id'] for p in health_monitor['pools']])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('525fc7dc-be24-408d-938d-822e9783e027')
|
||||
def test_get_lb_pool_stats(self):
|
||||
# Verify the details of pool stats
|
||||
body = self.client.list_lb_pool_stats(self.pool['id'])
|
||||
stats = body['stats']
|
||||
self.assertIn("bytes_in", stats)
|
||||
self.assertIn("total_connections", stats)
|
||||
self.assertIn("active_connections", stats)
|
||||
self.assertIn("bytes_out", stats)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('66236be2-5121-4047-8cde-db4b83b110a5')
|
||||
def test_update_list_of_health_monitors_associated_with_pool(self):
|
||||
(self.client.associate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
self.client.update_health_monitor(
|
||||
self.health_monitor['id'], admin_state_up=False)
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
health_monitors = body['pool']['health_monitors']
|
||||
for health_monitor_id in health_monitors:
|
||||
body = self.client.show_health_monitor(health_monitor_id)
|
||||
self.assertFalse(body['health_monitor']['admin_state_up'])
|
||||
(self.client.disassociate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('44ec9b40-b501-41e2-951f-4fc673b15ac0')
|
||||
def test_update_admin_state_up_of_pool(self):
|
||||
self.client.update_pool(self.pool['id'],
|
||||
admin_state_up=False)
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
self.assertFalse(pool['admin_state_up'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('466a9d4c-37c6-4ea2-b807-133437beb48c')
|
||||
def test_show_vip_associated_with_pool(self):
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
body = self.client.show_vip(pool['vip_id'])
|
||||
vip = body['vip']
|
||||
self.assertEqual(self.vip['name'], vip['name'])
|
||||
self.assertEqual(self.vip['id'], vip['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('7b97694e-69d0-4151-b265-e1052a465aa8')
|
||||
def test_show_members_associated_with_pool(self):
|
||||
body = self.client.show_pool(self.pool['id'])
|
||||
members = body['pool']['members']
|
||||
for member_id in members:
|
||||
body = self.client.show_member(member_id)
|
||||
self.assertIsNotNone(body['member']['status'])
|
||||
self.assertEqual(member_id, body['member']['id'])
|
||||
self.assertIsNotNone(body['member']['admin_state_up'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('73ed6f27-595b-4b2c-969c-dbdda6b8ab34')
|
||||
def test_update_pool_related_to_member(self):
|
||||
# Create new pool
|
||||
body = self.client.create_pool(name=data_utils.rand_name("pool-"),
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
new_pool = body['pool']
|
||||
self.addCleanup(self.client.delete_pool, new_pool['id'])
|
||||
# Update member with new pool's id
|
||||
body = self.client.update_member(self.member['id'],
|
||||
pool_id=new_pool['id'])
|
||||
# Confirm with show that pool_id change
|
||||
body = self.client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
self.assertEqual(member['pool_id'], new_pool['id'])
|
||||
# Update member with old pool id, this is needed for clean up
|
||||
body = self.client.update_member(self.member['id'],
|
||||
pool_id=self.pool['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('cf63f071-bbe3-40ba-97a0-a33e11923162')
|
||||
def test_update_member_weight(self):
|
||||
self.client.update_member(self.member['id'],
|
||||
weight=2)
|
||||
body = self.client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
self.assertEqual(2, member['weight'])
|
||||
|
||||
|
||||
@decorators.skip_because(bug="1402007")
|
||||
class LoadBalancerIpV6TestJSON(LoadBalancerTestJSON):
|
||||
_ip_version = 6
|
150
neutron/tests/tempest/api/network/test_metering_extensions.py
Normal file
150
neutron/tests/tempest/api/network/test_metering_extensions.py
Normal file
@ -0,0 +1,150 @@
|
||||
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MeteringTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
List, Show, Create, Delete Metering labels
|
||||
List, Show, Create, Delete Metering labels rules
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(MeteringTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('metering', 'network'):
|
||||
msg = "metering extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
description = "metering label created by tempest"
|
||||
name = data_utils.rand_name("metering-label")
|
||||
cls.metering_label = cls.create_metering_label(name, description)
|
||||
remote_ip_prefix = ("10.0.0.0/24" if cls._ip_version == 4
|
||||
else "fd02::/64")
|
||||
direction = "ingress"
|
||||
cls.metering_label_rule = cls.create_metering_label_rule(
|
||||
remote_ip_prefix, direction,
|
||||
metering_label_id=cls.metering_label['id'])
|
||||
|
||||
def _delete_metering_label(self, metering_label_id):
|
||||
# Deletes a label and verifies if it is deleted or not
|
||||
self.admin_client.delete_metering_label(metering_label_id)
|
||||
# Asserting that the label is not found in list after deletion
|
||||
labels = self.admin_client.list_metering_labels(id=metering_label_id)
|
||||
self.assertEqual(len(labels['metering_labels']), 0)
|
||||
|
||||
def _delete_metering_label_rule(self, metering_label_rule_id):
|
||||
# Deletes a rule and verifies if it is deleted or not
|
||||
self.admin_client.delete_metering_label_rule(
|
||||
metering_label_rule_id)
|
||||
# Asserting that the rule is not found in list after deletion
|
||||
rules = (self.admin_client.list_metering_label_rules(
|
||||
id=metering_label_rule_id))
|
||||
self.assertEqual(len(rules['metering_label_rules']), 0)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e2fb2f8c-45bf-429a-9f17-171c70444612')
|
||||
def test_list_metering_labels(self):
|
||||
# Verify label filtering
|
||||
body = self.admin_client.list_metering_labels(id=33)
|
||||
metering_labels = body['metering_labels']
|
||||
self.assertEqual(0, len(metering_labels))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('ec8e15ff-95d0-433b-b8a6-b466bddb1e50')
|
||||
def test_create_delete_metering_label_with_filters(self):
|
||||
# Creates a label
|
||||
name = data_utils.rand_name('metering-label-')
|
||||
description = "label created by tempest"
|
||||
body = self.admin_client.create_metering_label(name=name,
|
||||
description=description)
|
||||
metering_label = body['metering_label']
|
||||
self.addCleanup(self._delete_metering_label,
|
||||
metering_label['id'])
|
||||
# Assert whether created labels are found in labels list or fail
|
||||
# if created labels are not found in labels list
|
||||
labels = (self.admin_client.list_metering_labels(
|
||||
id=metering_label['id']))
|
||||
self.assertEqual(len(labels['metering_labels']), 1)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('30abb445-0eea-472e-bd02-8649f54a5968')
|
||||
def test_show_metering_label(self):
|
||||
# Verifies the details of a label
|
||||
body = self.admin_client.show_metering_label(self.metering_label['id'])
|
||||
metering_label = body['metering_label']
|
||||
self.assertEqual(self.metering_label['id'], metering_label['id'])
|
||||
self.assertEqual(self.metering_label['tenant_id'],
|
||||
metering_label['tenant_id'])
|
||||
self.assertEqual(self.metering_label['name'], metering_label['name'])
|
||||
self.assertEqual(self.metering_label['description'],
|
||||
metering_label['description'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('cc832399-6681-493b-9d79-0202831a1281')
|
||||
def test_list_metering_label_rules(self):
|
||||
# Verify rule filtering
|
||||
body = self.admin_client.list_metering_label_rules(id=33)
|
||||
metering_label_rules = body['metering_label_rules']
|
||||
self.assertEqual(0, len(metering_label_rules))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('f4d547cd-3aee-408f-bf36-454f8825e045')
|
||||
def test_create_delete_metering_label_rule_with_filters(self):
|
||||
# Creates a rule
|
||||
remote_ip_prefix = ("10.0.1.0/24" if self._ip_version == 4
|
||||
else "fd03::/64")
|
||||
body = (self.admin_client.create_metering_label_rule(
|
||||
remote_ip_prefix=remote_ip_prefix,
|
||||
direction="ingress",
|
||||
metering_label_id=self.metering_label['id']))
|
||||
metering_label_rule = body['metering_label_rule']
|
||||
self.addCleanup(self._delete_metering_label_rule,
|
||||
metering_label_rule['id'])
|
||||
# Assert whether created rules are found in rules list or fail
|
||||
# if created rules are not found in rules list
|
||||
rules = (self.admin_client.list_metering_label_rules(
|
||||
id=metering_label_rule['id']))
|
||||
self.assertEqual(len(rules['metering_label_rules']), 1)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b7354489-96ea-41f3-9452-bace120fb4a7')
|
||||
def test_show_metering_label_rule(self):
|
||||
# Verifies the details of a rule
|
||||
body = (self.admin_client.show_metering_label_rule(
|
||||
self.metering_label_rule['id']))
|
||||
metering_label_rule = body['metering_label_rule']
|
||||
self.assertEqual(self.metering_label_rule['id'],
|
||||
metering_label_rule['id'])
|
||||
self.assertEqual(self.metering_label_rule['remote_ip_prefix'],
|
||||
metering_label_rule['remote_ip_prefix'])
|
||||
self.assertEqual(self.metering_label_rule['direction'],
|
||||
metering_label_rule['direction'])
|
||||
self.assertEqual(self.metering_label_rule['metering_label_id'],
|
||||
metering_label_rule['metering_label_id'])
|
||||
self.assertFalse(metering_label_rule['excluded'])
|
||||
|
||||
|
||||
class MeteringIpV6TestJSON(MeteringTestJSON):
|
||||
_ip_version = 6
|
675
neutron/tests/tempest/api/network/test_networks.py
Normal file
675
neutron/tests/tempest/api/network/test_networks.py
Normal file
@ -0,0 +1,675 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import itertools
|
||||
|
||||
import netaddr
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common import custom_matchers
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class NetworksTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
create a network for a tenant
|
||||
list tenant's networks
|
||||
show a tenant network details
|
||||
create a subnet for a tenant
|
||||
list tenant's subnets
|
||||
show a tenant subnet details
|
||||
network update
|
||||
subnet update
|
||||
delete a network also deletes its subnets
|
||||
list external networks
|
||||
|
||||
All subnet tests are run once with ipv4 and once with ipv6.
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the following
|
||||
options are defined in the [network] section of etc/tempest.conf:
|
||||
|
||||
tenant_network_cidr with a block of cidr's from which smaller blocks
|
||||
can be allocated for tenant ipv4 subnets
|
||||
|
||||
tenant_network_v6_cidr is the equivalent for ipv6 subnets
|
||||
|
||||
tenant_network_mask_bits with the mask bits to be used to partition the
|
||||
block defined by tenant_network_cidr
|
||||
|
||||
tenant_network_v6_mask_bits is the equivalent for ipv6 subnets
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(NetworksTestJSON, cls).resource_setup()
|
||||
cls.network = cls.create_network()
|
||||
cls.name = cls.network['name']
|
||||
cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network,
|
||||
cls._ip_version)
|
||||
cls.cidr = cls.subnet['cidr']
|
||||
cls._subnet_data = {6: {'gateway':
|
||||
str(cls._get_gateway_from_tempest_conf(6)),
|
||||
'allocation_pools':
|
||||
cls._get_allocation_pools_from_gateway(6),
|
||||
'dns_nameservers': ['2001:4860:4860::8844',
|
||||
'2001:4860:4860::8888'],
|
||||
'host_routes': [{'destination': '2001::/64',
|
||||
'nexthop': '2003::1'}],
|
||||
'new_host_routes': [{'destination':
|
||||
'2001::/64',
|
||||
'nexthop': '2005::1'}],
|
||||
'new_dns_nameservers':
|
||||
['2001:4860:4860::7744',
|
||||
'2001:4860:4860::7888']},
|
||||
4: {'gateway':
|
||||
str(cls._get_gateway_from_tempest_conf(4)),
|
||||
'allocation_pools':
|
||||
cls._get_allocation_pools_from_gateway(4),
|
||||
'dns_nameservers': ['8.8.4.4', '8.8.8.8'],
|
||||
'host_routes': [{'destination': '10.20.0.0/32',
|
||||
'nexthop': '10.100.1.1'}],
|
||||
'new_host_routes': [{'destination':
|
||||
'10.20.0.0/32',
|
||||
'nexthop':
|
||||
'10.100.1.2'}],
|
||||
'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
|
||||
|
||||
@classmethod
|
||||
def _create_subnet_with_last_subnet_block(cls, network, ip_version):
|
||||
"""Derive last subnet CIDR block from tenant CIDR and
|
||||
create the subnet with that derived CIDR
|
||||
"""
|
||||
if ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
|
||||
mask_bits = CONF.network.tenant_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
|
||||
mask_bits = CONF.network.tenant_network_v6_mask_bits
|
||||
|
||||
subnet_cidr = list(cidr.subnet(mask_bits))[-1]
|
||||
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
|
||||
return cls.create_subnet(network, gateway=gateway_ip,
|
||||
cidr=subnet_cidr, mask_bits=mask_bits)
|
||||
|
||||
@classmethod
|
||||
def _get_gateway_from_tempest_conf(cls, ip_version):
|
||||
"""Return first subnet gateway for configured CIDR """
|
||||
if ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
|
||||
mask_bits = CONF.network.tenant_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
|
||||
mask_bits = CONF.network.tenant_network_v6_mask_bits
|
||||
|
||||
if mask_bits >= cidr.prefixlen:
|
||||
return netaddr.IPAddress(cidr) + 1
|
||||
else:
|
||||
for subnet in cidr.subnet(mask_bits):
|
||||
return netaddr.IPAddress(subnet) + 1
|
||||
|
||||
@classmethod
|
||||
def _get_allocation_pools_from_gateway(cls, ip_version):
|
||||
"""Return allocation range for subnet of given gateway"""
|
||||
gateway = cls._get_gateway_from_tempest_conf(ip_version)
|
||||
return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
|
||||
|
||||
def subnet_dict(self, include_keys):
|
||||
"""Return a subnet dict which has include_keys and their corresponding
|
||||
value from self._subnet_data
|
||||
"""
|
||||
return dict((key, self._subnet_data[self._ip_version][key])
|
||||
for key in include_keys)
|
||||
|
||||
def _compare_resource_attrs(self, actual, expected):
|
||||
exclude_keys = set(actual).symmetric_difference(expected)
|
||||
self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
|
||||
expected, exclude_keys))
|
||||
|
||||
def _delete_network(self, network):
|
||||
# Deleting network also deletes its subnets if exists
|
||||
self.client.delete_network(network['id'])
|
||||
if network in self.networks:
|
||||
self.networks.remove(network)
|
||||
for subnet in self.subnets:
|
||||
if subnet['network_id'] == network['id']:
|
||||
self.subnets.remove(subnet)
|
||||
|
||||
def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
|
||||
**kwargs):
|
||||
network = self.create_network()
|
||||
net_id = network['id']
|
||||
gateway = kwargs.pop('gateway', None)
|
||||
subnet = self.create_subnet(network, gateway, cidr, mask_bits,
|
||||
**kwargs)
|
||||
compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
|
||||
mask_bits=mask_bits, **kwargs)
|
||||
compare_args = dict((k, v) for k, v in compare_args_full.iteritems()
|
||||
if v is not None)
|
||||
|
||||
if 'dns_nameservers' in set(subnet).intersection(compare_args):
|
||||
self.assertEqual(sorted(compare_args['dns_nameservers']),
|
||||
sorted(subnet['dns_nameservers']))
|
||||
del subnet['dns_nameservers'], compare_args['dns_nameservers']
|
||||
|
||||
self._compare_resource_attrs(subnet, compare_args)
|
||||
self.client.delete_network(net_id)
|
||||
self.networks.pop()
|
||||
self.subnets.pop()
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('0e269138-0da6-4efc-a46d-578161e7b221')
|
||||
def test_create_update_delete_network_subnet(self):
|
||||
# Create a network
|
||||
name = data_utils.rand_name('network-')
|
||||
network = self.create_network(network_name=name)
|
||||
self.addCleanup(self._delete_network, network)
|
||||
net_id = network['id']
|
||||
self.assertEqual('ACTIVE', network['status'])
|
||||
# Verify network update
|
||||
new_name = "New_network"
|
||||
body = self.client.update_network(net_id, name=new_name)
|
||||
updated_net = body['network']
|
||||
self.assertEqual(updated_net['name'], new_name)
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
subnet = self.create_subnet(network)
|
||||
subnet_id = subnet['id']
|
||||
# Verify subnet update
|
||||
new_name = "New_subnet"
|
||||
body = self.client.update_subnet(subnet_id, name=new_name)
|
||||
updated_subnet = body['subnet']
|
||||
self.assertEqual(updated_subnet['name'], new_name)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('2bf13842-c93f-4a69-83ed-717d2ec3b44e')
|
||||
def test_show_network(self):
|
||||
# Verify the details of a network
|
||||
body = self.client.show_network(self.network['id'])
|
||||
network = body['network']
|
||||
for key in ['id', 'name']:
|
||||
self.assertEqual(network[key], self.network[key])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('867819bb-c4b6-45f7-acf9-90edcf70aa5e')
|
||||
def test_show_network_fields(self):
|
||||
# Verify specific fields of a network
|
||||
fields = ['id', 'name']
|
||||
body = self.client.show_network(self.network['id'],
|
||||
fields=fields)
|
||||
network = body['network']
|
||||
self.assertEqual(sorted(network.keys()), sorted(fields))
|
||||
for field_name in fields:
|
||||
self.assertEqual(network[field_name], self.network[field_name])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('f7ffdeda-e200-4a7a-bcbe-05716e86bf43')
|
||||
def test_list_networks(self):
|
||||
# Verify the network exists in the list of all networks
|
||||
body = self.client.list_networks()
|
||||
networks = [network['id'] for network in body['networks']
|
||||
if network['id'] == self.network['id']]
|
||||
self.assertNotEmpty(networks, "Created network not found in the list")
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('6ae6d24f-9194-4869-9c85-c313cb20e080')
|
||||
def test_list_networks_fields(self):
|
||||
# Verify specific fields of the networks
|
||||
fields = ['id', 'name']
|
||||
body = self.client.list_networks(fields=fields)
|
||||
networks = body['networks']
|
||||
self.assertNotEmpty(networks, "Network list returned is empty")
|
||||
for network in networks:
|
||||
self.assertEqual(sorted(network.keys()), sorted(fields))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc')
|
||||
def test_show_subnet(self):
|
||||
# Verify the details of a subnet
|
||||
body = self.client.show_subnet(self.subnet['id'])
|
||||
subnet = body['subnet']
|
||||
self.assertNotEmpty(subnet, "Subnet returned has no fields")
|
||||
for key in ['id', 'cidr']:
|
||||
self.assertIn(key, subnet)
|
||||
self.assertEqual(subnet[key], self.subnet[key])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('270fff0b-8bfc-411f-a184-1e8fd35286f0')
|
||||
def test_show_subnet_fields(self):
|
||||
# Verify specific fields of a subnet
|
||||
fields = ['id', 'network_id']
|
||||
body = self.client.show_subnet(self.subnet['id'],
|
||||
fields=fields)
|
||||
subnet = body['subnet']
|
||||
self.assertEqual(sorted(subnet.keys()), sorted(fields))
|
||||
for field_name in fields:
|
||||
self.assertEqual(subnet[field_name], self.subnet[field_name])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('db68ba48-f4ea-49e9-81d1-e367f6d0b20a')
|
||||
def test_list_subnets(self):
|
||||
# Verify the subnet exists in the list of all subnets
|
||||
body = self.client.list_subnets()
|
||||
subnets = [subnet['id'] for subnet in body['subnets']
|
||||
if subnet['id'] == self.subnet['id']]
|
||||
self.assertNotEmpty(subnets, "Created subnet not found in the list")
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('842589e3-9663-46b0-85e4-7f01273b0412')
|
||||
def test_list_subnets_fields(self):
|
||||
# Verify specific fields of subnets
|
||||
fields = ['id', 'network_id']
|
||||
body = self.client.list_subnets(fields=fields)
|
||||
subnets = body['subnets']
|
||||
self.assertNotEmpty(subnets, "Subnet list returned is empty")
|
||||
for subnet in subnets:
|
||||
self.assertEqual(sorted(subnet.keys()), sorted(fields))
|
||||
|
||||
def _try_delete_network(self, net_id):
|
||||
# delete network, if it exists
|
||||
try:
|
||||
self.client.delete_network(net_id)
|
||||
# if network is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('f04f61a9-b7f3-4194-90b2-9bcf660d1bfe')
|
||||
def test_delete_network_with_subnet(self):
|
||||
# Creates a network
|
||||
name = data_utils.rand_name('network-')
|
||||
body = self.client.create_network(name=name)
|
||||
network = body['network']
|
||||
net_id = network['id']
|
||||
self.addCleanup(self._try_delete_network, net_id)
|
||||
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
subnet = self.create_subnet(network)
|
||||
subnet_id = subnet['id']
|
||||
|
||||
# Delete network while the subnet still exists
|
||||
body = self.client.delete_network(net_id)
|
||||
|
||||
# Verify that the subnet got automatically deleted.
|
||||
self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
|
||||
subnet_id)
|
||||
|
||||
# Since create_subnet adds the subnet to the delete list, and it is
|
||||
# is actually deleted here - this will create and issue, hence remove
|
||||
# it from the list.
|
||||
self.subnets.pop()
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d2d596e2-8e76-47a9-ac51-d4648009f4d3')
|
||||
def test_create_delete_subnet_without_gateway(self):
|
||||
self._create_verify_delete_subnet()
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('9393b468-186d-496d-aa36-732348cd76e7')
|
||||
def test_create_delete_subnet_with_gw(self):
|
||||
self._create_verify_delete_subnet(
|
||||
**self.subnet_dict(['gateway']))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('bec949c4-3147-4ba6-af5f-cd2306118404')
|
||||
def test_create_delete_subnet_with_allocation_pools(self):
|
||||
self._create_verify_delete_subnet(
|
||||
**self.subnet_dict(['allocation_pools']))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('8217a149-0c6c-4cfb-93db-0486f707d13f')
|
||||
def test_create_delete_subnet_with_gw_and_allocation_pools(self):
|
||||
self._create_verify_delete_subnet(**self.subnet_dict(
|
||||
['gateway', 'allocation_pools']))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d830de0a-be47-468f-8f02-1fd996118289')
|
||||
def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
|
||||
self._create_verify_delete_subnet(
|
||||
**self.subnet_dict(['host_routes', 'dns_nameservers']))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('94ce038d-ff0a-4a4c-a56b-09da3ca0b55d')
|
||||
def test_create_delete_subnet_with_dhcp_enabled(self):
|
||||
self._create_verify_delete_subnet(enable_dhcp=True)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
|
||||
def test_update_subnet_gw_dns_host_routes_dhcp(self):
|
||||
network = self.create_network()
|
||||
self.addCleanup(self._delete_network, network)
|
||||
|
||||
subnet = self.create_subnet(
|
||||
network, **self.subnet_dict(['gateway', 'host_routes',
|
||||
'dns_nameservers',
|
||||
'allocation_pools']))
|
||||
subnet_id = subnet['id']
|
||||
new_gateway = str(netaddr.IPAddress(
|
||||
self._subnet_data[self._ip_version]['gateway']) + 1)
|
||||
# Verify subnet update
|
||||
new_host_routes = self._subnet_data[self._ip_version][
|
||||
'new_host_routes']
|
||||
|
||||
new_dns_nameservers = self._subnet_data[self._ip_version][
|
||||
'new_dns_nameservers']
|
||||
kwargs = {'host_routes': new_host_routes,
|
||||
'dns_nameservers': new_dns_nameservers,
|
||||
'gateway_ip': new_gateway, 'enable_dhcp': True}
|
||||
|
||||
new_name = "New_subnet"
|
||||
body = self.client.update_subnet(subnet_id, name=new_name,
|
||||
**kwargs)
|
||||
updated_subnet = body['subnet']
|
||||
kwargs['name'] = new_name
|
||||
self.assertEqual(sorted(updated_subnet['dns_nameservers']),
|
||||
sorted(kwargs['dns_nameservers']))
|
||||
del subnet['dns_nameservers'], kwargs['dns_nameservers']
|
||||
|
||||
self._compare_resource_attrs(updated_subnet, kwargs)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('a4d9ec4c-0306-4111-a75c-db01a709030b')
|
||||
def test_create_delete_subnet_all_attributes(self):
|
||||
self._create_verify_delete_subnet(
|
||||
enable_dhcp=True,
|
||||
**self.subnet_dict(['gateway', 'host_routes', 'dns_nameservers']))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('af774677-42a9-4e4b-bb58-16fe6a5bc1ec')
|
||||
def test_external_network_visibility(self):
|
||||
"""Verifies user can see external networks but not subnets."""
|
||||
body = self.client.list_networks(**{'router:external': True})
|
||||
networks = [network['id'] for network in body['networks']]
|
||||
self.assertNotEmpty(networks, "No external networks found")
|
||||
|
||||
nonexternal = [net for net in body['networks'] if
|
||||
not net['router:external']]
|
||||
self.assertEmpty(nonexternal, "Found non-external networks"
|
||||
" in filtered list (%s)." % nonexternal)
|
||||
self.assertIn(CONF.network.public_network_id, networks)
|
||||
|
||||
subnets_iter = (network['subnets'] for network in body['networks'])
|
||||
# subnets_iter is a list (iterator) of lists. This flattens it to a
|
||||
# list of UUIDs
|
||||
public_subnets_iter = itertools.chain(*subnets_iter)
|
||||
body = self.client.list_subnets()
|
||||
subnets = [sub['id'] for sub in body['subnets']
|
||||
if sub['id'] in public_subnets_iter]
|
||||
self.assertEmpty(subnets, "Public subnets visible")
|
||||
|
||||
|
||||
class BulkNetworkOpsTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
bulk network creation
|
||||
bulk subnet creation
|
||||
bulk port creation
|
||||
list tenant's networks
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the following
|
||||
options are defined in the [network] section of etc/tempest.conf:
|
||||
|
||||
tenant_network_cidr with a block of cidr's from which smaller blocks
|
||||
can be allocated for tenant networks
|
||||
|
||||
tenant_network_mask_bits with the mask bits to be used to partition the
|
||||
block defined by tenant-network_cidr
|
||||
"""
|
||||
|
||||
def _delete_networks(self, created_networks):
|
||||
for n in created_networks:
|
||||
self.client.delete_network(n['id'])
|
||||
# Asserting that the networks are not found in the list after deletion
|
||||
body = self.client.list_networks()
|
||||
networks_list = [network['id'] for network in body['networks']]
|
||||
for n in created_networks:
|
||||
self.assertNotIn(n['id'], networks_list)
|
||||
|
||||
def _delete_subnets(self, created_subnets):
|
||||
for n in created_subnets:
|
||||
self.client.delete_subnet(n['id'])
|
||||
# Asserting that the subnets are not found in the list after deletion
|
||||
body = self.client.list_subnets()
|
||||
subnets_list = [subnet['id'] for subnet in body['subnets']]
|
||||
for n in created_subnets:
|
||||
self.assertNotIn(n['id'], subnets_list)
|
||||
|
||||
def _delete_ports(self, created_ports):
|
||||
for n in created_ports:
|
||||
self.client.delete_port(n['id'])
|
||||
# Asserting that the ports are not found in the list after deletion
|
||||
body = self.client.list_ports()
|
||||
ports_list = [port['id'] for port in body['ports']]
|
||||
for n in created_ports:
|
||||
self.assertNotIn(n['id'], ports_list)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2')
|
||||
def test_bulk_create_delete_network(self):
|
||||
# Creates 2 networks in one request
|
||||
network_names = [data_utils.rand_name('network-'),
|
||||
data_utils.rand_name('network-')]
|
||||
body = self.client.create_bulk_network(network_names)
|
||||
created_networks = body['networks']
|
||||
self.addCleanup(self._delete_networks, created_networks)
|
||||
# Asserting that the networks are found in the list after creation
|
||||
body = self.client.list_networks()
|
||||
networks_list = [network['id'] for network in body['networks']]
|
||||
for n in created_networks:
|
||||
self.assertIsNotNone(n['id'])
|
||||
self.assertIn(n['id'], networks_list)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('8936533b-c0aa-4f29-8e53-6cc873aec489')
|
||||
def test_bulk_create_delete_subnet(self):
|
||||
networks = [self.create_network(), self.create_network()]
|
||||
# Creates 2 subnets in one request
|
||||
if self._ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
|
||||
mask_bits = CONF.network.tenant_network_mask_bits
|
||||
else:
|
||||
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
|
||||
mask_bits = CONF.network.tenant_network_v6_mask_bits
|
||||
|
||||
cidrs = [subnet_cidr for subnet_cidr in cidr.subnet(mask_bits)]
|
||||
|
||||
names = [data_utils.rand_name('subnet-') for i in range(len(networks))]
|
||||
subnets_list = []
|
||||
for i in range(len(names)):
|
||||
p1 = {
|
||||
'network_id': networks[i]['id'],
|
||||
'cidr': str(cidrs[(i)]),
|
||||
'name': names[i],
|
||||
'ip_version': self._ip_version
|
||||
}
|
||||
subnets_list.append(p1)
|
||||
del subnets_list[1]['name']
|
||||
body = self.client.create_bulk_subnet(subnets_list)
|
||||
created_subnets = body['subnets']
|
||||
self.addCleanup(self._delete_subnets, created_subnets)
|
||||
# Asserting that the subnets are found in the list after creation
|
||||
body = self.client.list_subnets()
|
||||
subnets_list = [subnet['id'] for subnet in body['subnets']]
|
||||
for n in created_subnets:
|
||||
self.assertIsNotNone(n['id'])
|
||||
self.assertIn(n['id'], subnets_list)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('48037ff2-e889-4c3b-b86a-8e3f34d2d060')
|
||||
def test_bulk_create_delete_port(self):
|
||||
networks = [self.create_network(), self.create_network()]
|
||||
# Creates 2 ports in one request
|
||||
names = [data_utils.rand_name('port-') for i in range(len(networks))]
|
||||
port_list = []
|
||||
state = [True, False]
|
||||
for i in range(len(names)):
|
||||
p1 = {
|
||||
'network_id': networks[i]['id'],
|
||||
'name': names[i],
|
||||
'admin_state_up': state[i],
|
||||
}
|
||||
port_list.append(p1)
|
||||
del port_list[1]['name']
|
||||
body = self.client.create_bulk_port(port_list)
|
||||
created_ports = body['ports']
|
||||
self.addCleanup(self._delete_ports, created_ports)
|
||||
# Asserting that the ports are found in the list after creation
|
||||
body = self.client.list_ports()
|
||||
ports_list = [port['id'] for port in body['ports']]
|
||||
for n in created_ports:
|
||||
self.assertIsNotNone(n['id'])
|
||||
self.assertIn(n['id'], ports_list)
|
||||
|
||||
|
||||
class BulkNetworkOpsIpV6TestJSON(BulkNetworkOpsTestJSON):
|
||||
_ip_version = 6
|
||||
|
||||
|
||||
class NetworksIpV6TestJSON(NetworksTestJSON):
|
||||
_ip_version = 6
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e41a4888-65a6-418c-a095-f7c2ef4ad59a')
|
||||
def test_create_delete_subnet_with_gw(self):
|
||||
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
|
||||
gateway = str(netaddr.IPAddress(net.first + 2))
|
||||
name = data_utils.rand_name('network-')
|
||||
network = self.create_network(network_name=name)
|
||||
subnet = self.create_subnet(network, gateway)
|
||||
# Verifies Subnet GW in IPv6
|
||||
self.assertEqual(subnet['gateway_ip'], gateway)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('ebb4fd95-524f-46af-83c1-0305b239338f')
|
||||
def test_create_delete_subnet_with_default_gw(self):
|
||||
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
|
||||
gateway_ip = str(netaddr.IPAddress(net.first + 1))
|
||||
name = data_utils.rand_name('network-')
|
||||
network = self.create_network(network_name=name)
|
||||
subnet = self.create_subnet(network)
|
||||
# Verifies Subnet GW in IPv6
|
||||
self.assertEqual(subnet['gateway_ip'], gateway_ip)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('a9653883-b2a4-469b-8c3c-4518430a7e55')
|
||||
def test_create_list_subnet_with_no_gw64_one_network(self):
|
||||
name = data_utils.rand_name('network-')
|
||||
network = self.create_network(name)
|
||||
ipv6_gateway = self.subnet_dict(['gateway'])['gateway']
|
||||
subnet1 = self.create_subnet(network,
|
||||
ip_version=6,
|
||||
gateway=ipv6_gateway)
|
||||
self.assertEqual(netaddr.IPNetwork(subnet1['cidr']).version, 6,
|
||||
'The created subnet is not IPv6')
|
||||
subnet2 = self.create_subnet(network,
|
||||
gateway=None,
|
||||
ip_version=4)
|
||||
self.assertEqual(netaddr.IPNetwork(subnet2['cidr']).version, 4,
|
||||
'The created subnet is not IPv4')
|
||||
# Verifies Subnet GW is set in IPv6
|
||||
self.assertEqual(subnet1['gateway_ip'], ipv6_gateway)
|
||||
# Verifies Subnet GW is None in IPv4
|
||||
self.assertEqual(subnet2['gateway_ip'], None)
|
||||
# Verifies all 2 subnets in the same network
|
||||
body = self.client.list_subnets()
|
||||
subnets = [sub['id'] for sub in body['subnets']
|
||||
if sub['network_id'] == network['id']]
|
||||
test_subnet_ids = [sub['id'] for sub in (subnet1, subnet2)]
|
||||
self.assertItemsEqual(subnets,
|
||||
test_subnet_ids,
|
||||
'Subnet are not in the same network')
|
||||
|
||||
|
||||
class NetworksIpV6TestAttrs(NetworksIpV6TestJSON):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
if not CONF.network_feature_enabled.ipv6_subnet_attributes:
|
||||
raise cls.skipException("IPv6 extended attributes for "
|
||||
"subnets not available")
|
||||
super(NetworksIpV6TestAttrs, cls).resource_setup()
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('da40cd1b-a833-4354-9a85-cd9b8a3b74ca')
|
||||
def test_create_delete_subnet_with_v6_attributes_stateful(self):
|
||||
self._create_verify_delete_subnet(
|
||||
gateway=self._subnet_data[self._ip_version]['gateway'],
|
||||
ipv6_ra_mode='dhcpv6-stateful',
|
||||
ipv6_address_mode='dhcpv6-stateful')
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('176b030f-a923-4040-a755-9dc94329e60c')
|
||||
def test_create_delete_subnet_with_v6_attributes_slaac(self):
|
||||
self._create_verify_delete_subnet(
|
||||
ipv6_ra_mode='slaac',
|
||||
ipv6_address_mode='slaac')
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('7d410310-8c86-4902-adf9-865d08e31adb')
|
||||
def test_create_delete_subnet_with_v6_attributes_stateless(self):
|
||||
self._create_verify_delete_subnet(
|
||||
ipv6_ra_mode='dhcpv6-stateless',
|
||||
ipv6_address_mode='dhcpv6-stateless')
|
||||
|
||||
def _test_delete_subnet_with_ports(self, mode):
|
||||
"""Create subnet and delete it with existing ports"""
|
||||
slaac_network = self.create_network()
|
||||
subnet_slaac = self.create_subnet(slaac_network,
|
||||
**{'ipv6_ra_mode': mode,
|
||||
'ipv6_address_mode': mode})
|
||||
port = self.create_port(slaac_network)
|
||||
self.assertIsNotNone(port['fixed_ips'][0]['ip_address'])
|
||||
self.client.delete_subnet(subnet_slaac['id'])
|
||||
self.subnets.pop()
|
||||
subnets = self.client.list_subnets()
|
||||
subnet_ids = [subnet['id'] for subnet in subnets['subnets']]
|
||||
self.assertNotIn(subnet_slaac['id'], subnet_ids,
|
||||
"Subnet wasn't deleted")
|
||||
self.assertRaisesRegexp(
|
||||
lib_exc.Conflict,
|
||||
"There are one or more ports still in use on the network",
|
||||
self.client.delete_network,
|
||||
slaac_network['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('88554555-ebf8-41ef-9300-4926d45e06e9')
|
||||
def test_create_delete_slaac_subnet_with_ports(self):
|
||||
"""Test deleting subnet with SLAAC ports
|
||||
|
||||
Create subnet with SLAAC, create ports in network
|
||||
and then you shall be able to delete subnet without port
|
||||
deletion. But you still can not delete the network.
|
||||
"""
|
||||
self._test_delete_subnet_with_ports("slaac")
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('2de6ab5a-fcf0-4144-9813-f91a940291f1')
|
||||
def test_create_delete_stateless_subnet_with_ports(self):
|
||||
"""Test deleting subnet with DHCPv6 stateless ports
|
||||
|
||||
Create subnet with DHCPv6 stateless, create ports in network
|
||||
and then you shall be able to delete subnet without port
|
||||
deletion. But you still can not delete the network.
|
||||
"""
|
||||
self._test_delete_subnet_with_ports("dhcpv6-stateless")
|
59
neutron/tests/tempest/api/network/test_networks_negative.py
Normal file
59
neutron/tests/tempest/api/network/test_networks_negative.py
Normal file
@ -0,0 +1,59 @@
|
||||
# Copyright 2013 Huawei Technologies Co.,LTD.
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class NetworksNegativeTestJSON(base.BaseNetworkTest):
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('9293e937-824d-42d2-8d5b-e985ea67002a')
|
||||
def test_show_non_existent_network(self):
|
||||
non_exist_id = data_utils.rand_name('network')
|
||||
self.assertRaises(lib_exc.NotFound, self.client.show_network,
|
||||
non_exist_id)
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('d746b40c-5e09-4043-99f7-cba1be8b70df')
|
||||
def test_show_non_existent_subnet(self):
|
||||
non_exist_id = data_utils.rand_name('subnet')
|
||||
self.assertRaises(lib_exc.NotFound, self.client.show_subnet,
|
||||
non_exist_id)
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('a954861d-cbfd-44e8-b0a9-7fab111f235d')
|
||||
def test_show_non_existent_port(self):
|
||||
non_exist_id = data_utils.rand_name('port')
|
||||
self.assertRaises(lib_exc.NotFound, self.client.show_port,
|
||||
non_exist_id)
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('98bfe4e3-574e-4012-8b17-b2647063de87')
|
||||
def test_update_non_existent_network(self):
|
||||
non_exist_id = data_utils.rand_name('network')
|
||||
self.assertRaises(lib_exc.NotFound, self.client.update_network,
|
||||
non_exist_id, name="new_name")
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('03795047-4a94-4120-a0a1-bd376e36fd4e')
|
||||
def test_delete_non_existent_network(self):
|
||||
non_exist_id = data_utils.rand_name('network')
|
||||
self.assertRaises(lib_exc.NotFound, self.client.delete_network,
|
||||
non_exist_id)
|
402
neutron/tests/tempest/api/network/test_ports.py
Normal file
402
neutron/tests/tempest/api/network/test_ports.py
Normal file
@ -0,0 +1,402 @@
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
import socket
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.api.network import base_security_groups as sec_base
|
||||
from neutron.tests.tempest.common import custom_matchers
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class PortsTestJSON(sec_base.BaseSecGroupTest):
|
||||
|
||||
"""
|
||||
Test the following operations for ports:
|
||||
|
||||
port create
|
||||
port delete
|
||||
port list
|
||||
port show
|
||||
port update
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(PortsTestJSON, cls).resource_setup()
|
||||
cls.network = cls.create_network()
|
||||
cls.port = cls.create_port(cls.network)
|
||||
|
||||
def _delete_port(self, port_id):
|
||||
self.client.delete_port(port_id)
|
||||
body = self.client.list_ports()
|
||||
ports_list = body['ports']
|
||||
self.assertFalse(port_id in [n['id'] for n in ports_list])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
|
||||
def test_create_update_delete_port(self):
|
||||
# Verify port creation
|
||||
body = self.client.create_port(network_id=self.network['id'])
|
||||
port = body['port']
|
||||
# Schedule port deletion with verification upon test completion
|
||||
self.addCleanup(self._delete_port, port['id'])
|
||||
self.assertTrue(port['admin_state_up'])
|
||||
# Verify port update
|
||||
new_name = "New_Port"
|
||||
body = self.client.update_port(port['id'],
|
||||
name=new_name,
|
||||
admin_state_up=False)
|
||||
updated_port = body['port']
|
||||
self.assertEqual(updated_port['name'], new_name)
|
||||
self.assertFalse(updated_port['admin_state_up'])
|
||||
|
||||
@test.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
|
||||
def test_create_bulk_port(self):
|
||||
network1 = self.network
|
||||
name = data_utils.rand_name('network-')
|
||||
network2 = self.create_network(network_name=name)
|
||||
network_list = [network1['id'], network2['id']]
|
||||
port_list = [{'network_id': net_id} for net_id in network_list]
|
||||
body = self.client.create_bulk_port(port_list)
|
||||
created_ports = body['ports']
|
||||
port1 = created_ports[0]
|
||||
port2 = created_ports[1]
|
||||
self.addCleanup(self._delete_port, port1['id'])
|
||||
self.addCleanup(self._delete_port, port2['id'])
|
||||
self.assertEqual(port1['network_id'], network1['id'])
|
||||
self.assertEqual(port2['network_id'], network2['id'])
|
||||
self.assertTrue(port1['admin_state_up'])
|
||||
self.assertTrue(port2['admin_state_up'])
|
||||
|
||||
@classmethod
|
||||
def _get_ipaddress_from_tempest_conf(cls):
|
||||
"""Return first subnet gateway for configured CIDR """
|
||||
if cls._ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
|
||||
|
||||
elif cls._ip_version == 6:
|
||||
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
|
||||
|
||||
return netaddr.IPAddress(cidr)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
|
||||
def test_create_port_in_allowed_allocation_pools(self):
|
||||
network = self.create_network()
|
||||
net_id = network['id']
|
||||
address = self._get_ipaddress_from_tempest_conf()
|
||||
allocation_pools = {'allocation_pools': [{'start': str(address + 4),
|
||||
'end': str(address + 6)}]}
|
||||
subnet = self.create_subnet(network, **allocation_pools)
|
||||
self.addCleanup(self.client.delete_subnet, subnet['id'])
|
||||
body = self.client.create_port(network_id=net_id)
|
||||
self.addCleanup(self.client.delete_port, body['port']['id'])
|
||||
port = body['port']
|
||||
ip_address = port['fixed_ips'][0]['ip_address']
|
||||
start_ip_address = allocation_pools['allocation_pools'][0]['start']
|
||||
end_ip_address = allocation_pools['allocation_pools'][0]['end']
|
||||
ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
|
||||
self.assertIn(ip_address, ip_range)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
|
||||
def test_show_port(self):
|
||||
# Verify the details of port
|
||||
body = self.client.show_port(self.port['id'])
|
||||
port = body['port']
|
||||
self.assertIn('id', port)
|
||||
# TODO(Santosh)- This is a temporary workaround to compare create_port
|
||||
# and show_port dict elements.Remove this once extra_dhcp_opts issue
|
||||
# gets fixed in neutron.( bug - 1365341.)
|
||||
self.assertThat(self.port,
|
||||
custom_matchers.MatchesDictExceptForKeys
|
||||
(port, excluded_keys=['extra_dhcp_opts']))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
|
||||
def test_show_port_fields(self):
|
||||
# Verify specific fields of a port
|
||||
fields = ['id', 'mac_address']
|
||||
body = self.client.show_port(self.port['id'],
|
||||
fields=fields)
|
||||
port = body['port']
|
||||
self.assertEqual(sorted(port.keys()), sorted(fields))
|
||||
for field_name in fields:
|
||||
self.assertEqual(port[field_name], self.port[field_name])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
|
||||
def test_list_ports(self):
|
||||
# Verify the port exists in the list of all ports
|
||||
body = self.client.list_ports()
|
||||
ports = [port['id'] for port in body['ports']
|
||||
if port['id'] == self.port['id']]
|
||||
self.assertNotEmpty(ports, "Created port not found in the list")
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
|
||||
def test_port_list_filter_by_router_id(self):
|
||||
# Create a router
|
||||
network = self.create_network()
|
||||
self.addCleanup(self.client.delete_network, network['id'])
|
||||
subnet = self.create_subnet(network)
|
||||
self.addCleanup(self.client.delete_subnet, subnet['id'])
|
||||
router = self.create_router(data_utils.rand_name('router-'))
|
||||
self.addCleanup(self.client.delete_router, router['id'])
|
||||
port = self.client.create_port(network_id=network['id'])
|
||||
# Add router interface to port created above
|
||||
self.client.add_router_interface_with_port_id(
|
||||
router['id'], port['port']['id'])
|
||||
self.addCleanup(self.client.remove_router_interface_with_port_id,
|
||||
router['id'], port['port']['id'])
|
||||
# List ports filtered by router_id
|
||||
port_list = self.client.list_ports(device_id=router['id'])
|
||||
ports = port_list['ports']
|
||||
self.assertEqual(len(ports), 1)
|
||||
self.assertEqual(ports[0]['id'], port['port']['id'])
|
||||
self.assertEqual(ports[0]['device_id'], router['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
|
||||
def test_list_ports_fields(self):
|
||||
# Verify specific fields of ports
|
||||
fields = ['id', 'mac_address']
|
||||
body = self.client.list_ports(fields=fields)
|
||||
ports = body['ports']
|
||||
self.assertNotEmpty(ports, "Port list returned is empty")
|
||||
# Asserting the fields returned are correct
|
||||
for port in ports:
|
||||
self.assertEqual(sorted(fields), sorted(port.keys()))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
|
||||
def test_create_update_port_with_second_ip(self):
|
||||
# Create a network with two subnets
|
||||
network = self.create_network()
|
||||
self.addCleanup(self.client.delete_network, network['id'])
|
||||
subnet_1 = self.create_subnet(network)
|
||||
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
|
||||
subnet_2 = self.create_subnet(network)
|
||||
self.addCleanup(self.client.delete_subnet, subnet_2['id'])
|
||||
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
|
||||
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
|
||||
|
||||
fixed_ips = fixed_ip_1 + fixed_ip_2
|
||||
|
||||
# Create a port with multiple IP addresses
|
||||
port = self.create_port(network,
|
||||
fixed_ips=fixed_ips)
|
||||
self.addCleanup(self.client.delete_port, port['id'])
|
||||
self.assertEqual(2, len(port['fixed_ips']))
|
||||
check_fixed_ips = [subnet_1['id'], subnet_2['id']]
|
||||
for item in port['fixed_ips']:
|
||||
self.assertIn(item['subnet_id'], check_fixed_ips)
|
||||
|
||||
# Update the port to return to a single IP address
|
||||
port = self.update_port(port, fixed_ips=fixed_ip_1)
|
||||
self.assertEqual(1, len(port['fixed_ips']))
|
||||
|
||||
# Update the port with a second IP address from second subnet
|
||||
port = self.update_port(port, fixed_ips=fixed_ips)
|
||||
self.assertEqual(2, len(port['fixed_ips']))
|
||||
|
||||
def _update_port_with_security_groups(self, security_groups_names):
|
||||
subnet_1 = self.create_subnet(self.network)
|
||||
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
|
||||
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
|
||||
|
||||
security_groups_list = list()
|
||||
for name in security_groups_names:
|
||||
group_create_body = self.client.create_security_group(
|
||||
name=name)
|
||||
self.addCleanup(self.client.delete_security_group,
|
||||
group_create_body['security_group']['id'])
|
||||
security_groups_list.append(group_create_body['security_group']
|
||||
['id'])
|
||||
# Create a port
|
||||
sec_grp_name = data_utils.rand_name('secgroup')
|
||||
security_group = self.client.create_security_group(name=sec_grp_name)
|
||||
self.addCleanup(self.client.delete_security_group,
|
||||
security_group['security_group']['id'])
|
||||
post_body = {
|
||||
"name": data_utils.rand_name('port-'),
|
||||
"security_groups": [security_group['security_group']['id']],
|
||||
"network_id": self.network['id'],
|
||||
"admin_state_up": True,
|
||||
"fixed_ips": fixed_ip_1}
|
||||
body = self.client.create_port(**post_body)
|
||||
self.addCleanup(self.client.delete_port, body['port']['id'])
|
||||
port = body['port']
|
||||
|
||||
# Update the port with security groups
|
||||
subnet_2 = self.create_subnet(self.network)
|
||||
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
|
||||
update_body = {"name": data_utils.rand_name('port-'),
|
||||
"admin_state_up": False,
|
||||
"fixed_ips": fixed_ip_2,
|
||||
"security_groups": security_groups_list}
|
||||
body = self.client.update_port(port['id'], **update_body)
|
||||
port_show = body['port']
|
||||
# Verify the security groups and other attributes updated to port
|
||||
exclude_keys = set(port_show).symmetric_difference(update_body)
|
||||
exclude_keys.add('fixed_ips')
|
||||
exclude_keys.add('security_groups')
|
||||
self.assertThat(port_show, custom_matchers.MatchesDictExceptForKeys(
|
||||
update_body, exclude_keys))
|
||||
self.assertEqual(fixed_ip_2[0]['subnet_id'],
|
||||
port_show['fixed_ips'][0]['subnet_id'])
|
||||
|
||||
for security_group in security_groups_list:
|
||||
self.assertIn(security_group, port_show['security_groups'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
|
||||
def test_update_port_with_security_group_and_extra_attributes(self):
|
||||
self._update_port_with_security_groups(
|
||||
[data_utils.rand_name('secgroup')])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
|
||||
def test_update_port_with_two_security_groups_and_extra_attributes(self):
|
||||
self._update_port_with_security_groups(
|
||||
[data_utils.rand_name('secgroup'),
|
||||
data_utils.rand_name('secgroup')])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
|
||||
def test_create_show_delete_port_user_defined_mac(self):
|
||||
# Create a port for a legal mac
|
||||
body = self.client.create_port(network_id=self.network['id'])
|
||||
old_port = body['port']
|
||||
free_mac_address = old_port['mac_address']
|
||||
self.client.delete_port(old_port['id'])
|
||||
# Create a new port with user defined mac
|
||||
body = self.client.create_port(network_id=self.network['id'],
|
||||
mac_address=free_mac_address)
|
||||
self.addCleanup(self.client.delete_port, body['port']['id'])
|
||||
port = body['port']
|
||||
body = self.client.show_port(port['id'])
|
||||
show_port = body['port']
|
||||
self.assertEqual(free_mac_address,
|
||||
show_port['mac_address'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
|
||||
def test_create_port_with_no_securitygroups(self):
|
||||
network = self.create_network()
|
||||
self.addCleanup(self.client.delete_network, network['id'])
|
||||
subnet = self.create_subnet(network)
|
||||
self.addCleanup(self.client.delete_subnet, subnet['id'])
|
||||
port = self.create_port(network, security_groups=[])
|
||||
self.addCleanup(self.client.delete_port, port['id'])
|
||||
self.assertIsNotNone(port['security_groups'])
|
||||
self.assertEmpty(port['security_groups'])
|
||||
|
||||
|
||||
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
|
||||
cls.identity_client = cls._get_identity_admin_client()
|
||||
cls.tenant = cls.identity_client.get_tenant_by_name(
|
||||
CONF.identity.tenant_name)
|
||||
cls.network = cls.create_network()
|
||||
cls.host_id = socket.gethostname()
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
|
||||
def test_create_port_binding_ext_attr(self):
|
||||
post_body = {"network_id": self.network['id'],
|
||||
"binding:host_id": self.host_id}
|
||||
body = self.admin_client.create_port(**post_body)
|
||||
port = body['port']
|
||||
self.addCleanup(self.admin_client.delete_port, port['id'])
|
||||
host_id = port['binding:host_id']
|
||||
self.assertIsNotNone(host_id)
|
||||
self.assertEqual(self.host_id, host_id)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
|
||||
def test_update_port_binding_ext_attr(self):
|
||||
post_body = {"network_id": self.network['id']}
|
||||
body = self.admin_client.create_port(**post_body)
|
||||
port = body['port']
|
||||
self.addCleanup(self.admin_client.delete_port, port['id'])
|
||||
update_body = {"binding:host_id": self.host_id}
|
||||
body = self.admin_client.update_port(port['id'], **update_body)
|
||||
updated_port = body['port']
|
||||
host_id = updated_port['binding:host_id']
|
||||
self.assertIsNotNone(host_id)
|
||||
self.assertEqual(self.host_id, host_id)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
|
||||
def test_list_ports_binding_ext_attr(self):
|
||||
# Create a new port
|
||||
post_body = {"network_id": self.network['id']}
|
||||
body = self.admin_client.create_port(**post_body)
|
||||
port = body['port']
|
||||
self.addCleanup(self.admin_client.delete_port, port['id'])
|
||||
|
||||
# Update the port's binding attributes so that is now 'bound'
|
||||
# to a host
|
||||
update_body = {"binding:host_id": self.host_id}
|
||||
self.admin_client.update_port(port['id'], **update_body)
|
||||
|
||||
# List all ports, ensure new port is part of list and its binding
|
||||
# attributes are set and accurate
|
||||
body = self.admin_client.list_ports()
|
||||
ports_list = body['ports']
|
||||
pids_list = [p['id'] for p in ports_list]
|
||||
self.assertIn(port['id'], pids_list)
|
||||
listed_port = [p for p in ports_list if p['id'] == port['id']]
|
||||
self.assertEqual(1, len(listed_port),
|
||||
'Multiple ports listed with id %s in ports listing: '
|
||||
'%s' % (port['id'], ports_list))
|
||||
self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
|
||||
def test_show_port_binding_ext_attr(self):
|
||||
body = self.admin_client.create_port(network_id=self.network['id'])
|
||||
port = body['port']
|
||||
self.addCleanup(self.admin_client.delete_port, port['id'])
|
||||
body = self.admin_client.show_port(port['id'])
|
||||
show_port = body['port']
|
||||
self.assertEqual(port['binding:host_id'],
|
||||
show_port['binding:host_id'])
|
||||
self.assertEqual(port['binding:vif_type'],
|
||||
show_port['binding:vif_type'])
|
||||
self.assertEqual(port['binding:vif_details'],
|
||||
show_port['binding:vif_details'])
|
||||
|
||||
|
||||
class PortsIpV6TestJSON(PortsTestJSON):
|
||||
_ip_version = 6
|
||||
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
|
||||
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
|
||||
|
||||
|
||||
class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
|
||||
_ip_version = 6
|
||||
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
|
||||
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
|
359
neutron/tests/tempest/api/network/test_routers.py
Normal file
359
neutron/tests/tempest/api/network/test_routers.py
Normal file
@ -0,0 +1,359 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from neutron.tests.tempest.api.network import base_routers as base
|
||||
from neutron.tests.api.contrib import clients
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class RoutersTest(base.BaseRouterTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(RoutersTest, cls).resource_setup()
|
||||
if not test.is_extension_enabled('router', 'network'):
|
||||
msg = "router extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
admin_manager = clients.AdminManager()
|
||||
cls.identity_admin_client = admin_manager.identity_client
|
||||
cls.tenant_cidr = (CONF.network.tenant_network_cidr
|
||||
if cls._ip_version == 4 else
|
||||
CONF.network.tenant_network_v6_cidr)
|
||||
|
||||
def _cleanup_router(self, router):
|
||||
self.delete_router(router)
|
||||
self.routers.remove(router)
|
||||
|
||||
def _create_router(self, name, admin_state_up=False,
|
||||
external_network_id=None, enable_snat=None):
|
||||
# associate a cleanup with created routers to avoid quota limits
|
||||
router = self.create_router(name, admin_state_up,
|
||||
external_network_id, enable_snat)
|
||||
self.addCleanup(self._cleanup_router, router)
|
||||
return router
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('f64403e2-8483-4b34-8ccd-b09a87bcc68c')
|
||||
def test_create_show_list_update_delete_router(self):
|
||||
# Create a router
|
||||
# NOTE(salv-orlando): Do not invoke self.create_router
|
||||
# as we need to check the response code
|
||||
name = data_utils.rand_name('router-')
|
||||
create_body = self.client.create_router(
|
||||
name, external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id},
|
||||
admin_state_up=False)
|
||||
self.addCleanup(self._delete_router, create_body['router']['id'])
|
||||
self.assertEqual(create_body['router']['name'], name)
|
||||
self.assertEqual(
|
||||
create_body['router']['external_gateway_info']['network_id'],
|
||||
CONF.network.public_network_id)
|
||||
self.assertEqual(create_body['router']['admin_state_up'], False)
|
||||
# Show details of the created router
|
||||
show_body = self.client.show_router(create_body['router']['id'])
|
||||
self.assertEqual(show_body['router']['name'], name)
|
||||
self.assertEqual(
|
||||
show_body['router']['external_gateway_info']['network_id'],
|
||||
CONF.network.public_network_id)
|
||||
self.assertEqual(show_body['router']['admin_state_up'], False)
|
||||
# List routers and verify if created router is there in response
|
||||
list_body = self.client.list_routers()
|
||||
routers_list = list()
|
||||
for router in list_body['routers']:
|
||||
routers_list.append(router['id'])
|
||||
self.assertIn(create_body['router']['id'], routers_list)
|
||||
# Update the name of router and verify if it is updated
|
||||
updated_name = 'updated ' + name
|
||||
update_body = self.client.update_router(create_body['router']['id'],
|
||||
name=updated_name)
|
||||
self.assertEqual(update_body['router']['name'], updated_name)
|
||||
show_body = self.client.show_router(
|
||||
create_body['router']['id'])
|
||||
self.assertEqual(show_body['router']['name'], updated_name)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')
|
||||
def test_create_router_setting_tenant_id(self):
|
||||
# Test creating router from admin user setting tenant_id.
|
||||
test_tenant = data_utils.rand_name('test_tenant_')
|
||||
test_description = data_utils.rand_name('desc_')
|
||||
tenant = self.identity_admin_client.create_tenant(
|
||||
name=test_tenant, description=test_description)
|
||||
tenant_id = tenant['id']
|
||||
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
|
||||
|
||||
name = data_utils.rand_name('router-')
|
||||
create_body = self.admin_client.create_router(name,
|
||||
tenant_id=tenant_id)
|
||||
self.addCleanup(self.admin_client.delete_router,
|
||||
create_body['router']['id'])
|
||||
self.assertEqual(tenant_id, create_body['router']['tenant_id'])
|
||||
|
||||
@test.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')
|
||||
@test.requires_ext(extension='ext-gw-mode', service='network')
|
||||
@test.attr(type='smoke')
|
||||
def test_create_router_with_default_snat_value(self):
|
||||
# Create a router with default snat rule
|
||||
name = data_utils.rand_name('router')
|
||||
router = self._create_router(
|
||||
name, external_network_id=CONF.network.public_network_id)
|
||||
self._verify_router_gateway(
|
||||
router['id'], {'network_id': CONF.network.public_network_id,
|
||||
'enable_snat': True})
|
||||
|
||||
@test.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f')
|
||||
@test.requires_ext(extension='ext-gw-mode', service='network')
|
||||
@test.attr(type='smoke')
|
||||
def test_create_router_with_snat_explicit(self):
|
||||
name = data_utils.rand_name('snat-router')
|
||||
# Create a router enabling snat attributes
|
||||
enable_snat_states = [False, True]
|
||||
for enable_snat in enable_snat_states:
|
||||
external_gateway_info = {
|
||||
'network_id': CONF.network.public_network_id,
|
||||
'enable_snat': enable_snat}
|
||||
create_body = self.admin_client.create_router(
|
||||
name, external_gateway_info=external_gateway_info)
|
||||
self.addCleanup(self.admin_client.delete_router,
|
||||
create_body['router']['id'])
|
||||
# Verify snat attributes after router creation
|
||||
self._verify_router_gateway(create_body['router']['id'],
|
||||
exp_ext_gw_info=external_gateway_info)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b42e6e39-2e37-49cc-a6f4-8467e940900a')
|
||||
def test_add_remove_router_interface_with_subnet_id(self):
|
||||
network = self.create_network()
|
||||
subnet = self.create_subnet(network)
|
||||
router = self._create_router(data_utils.rand_name('router-'))
|
||||
# Add router interface with subnet id
|
||||
interface = self.client.add_router_interface_with_subnet_id(
|
||||
router['id'], subnet['id'])
|
||||
self.addCleanup(self._remove_router_interface_with_subnet_id,
|
||||
router['id'], subnet['id'])
|
||||
self.assertIn('subnet_id', interface.keys())
|
||||
self.assertIn('port_id', interface.keys())
|
||||
# Verify router id is equal to device id in port details
|
||||
show_port_body = self.client.show_port(
|
||||
interface['port_id'])
|
||||
self.assertEqual(show_port_body['port']['device_id'],
|
||||
router['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('2b7d2f37-6748-4d78-92e5-1d590234f0d5')
|
||||
def test_add_remove_router_interface_with_port_id(self):
|
||||
network = self.create_network()
|
||||
self.create_subnet(network)
|
||||
router = self._create_router(data_utils.rand_name('router-'))
|
||||
port_body = self.client.create_port(
|
||||
network_id=network['id'])
|
||||
# add router interface to port created above
|
||||
interface = self.client.add_router_interface_with_port_id(
|
||||
router['id'], port_body['port']['id'])
|
||||
self.addCleanup(self._remove_router_interface_with_port_id,
|
||||
router['id'], port_body['port']['id'])
|
||||
self.assertIn('subnet_id', interface.keys())
|
||||
self.assertIn('port_id', interface.keys())
|
||||
# Verify router id is equal to device id in port details
|
||||
show_port_body = self.client.show_port(
|
||||
interface['port_id'])
|
||||
self.assertEqual(show_port_body['port']['device_id'],
|
||||
router['id'])
|
||||
|
||||
def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
|
||||
show_body = self.admin_client.show_router(router_id)
|
||||
actual_ext_gw_info = show_body['router']['external_gateway_info']
|
||||
if exp_ext_gw_info is None:
|
||||
self.assertIsNone(actual_ext_gw_info)
|
||||
return
|
||||
# Verify only keys passed in exp_ext_gw_info
|
||||
for k, v in exp_ext_gw_info.iteritems():
|
||||
self.assertEqual(v, actual_ext_gw_info[k])
|
||||
|
||||
def _verify_gateway_port(self, router_id):
|
||||
list_body = self.admin_client.list_ports(
|
||||
network_id=CONF.network.public_network_id,
|
||||
device_id=router_id)
|
||||
self.assertEqual(len(list_body['ports']), 1)
|
||||
gw_port = list_body['ports'][0]
|
||||
fixed_ips = gw_port['fixed_ips']
|
||||
self.assertGreaterEqual(len(fixed_ips), 1)
|
||||
public_net_body = self.admin_client.show_network(
|
||||
CONF.network.public_network_id)
|
||||
public_subnet_id = public_net_body['network']['subnets'][0]
|
||||
self.assertIn(public_subnet_id,
|
||||
map(lambda x: x['subnet_id'], fixed_ips))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('6cc285d8-46bf-4f36-9b1a-783e3008ba79')
|
||||
def test_update_router_set_gateway(self):
|
||||
router = self._create_router(data_utils.rand_name('router-'))
|
||||
self.client.update_router(
|
||||
router['id'],
|
||||
external_gateway_info={
|
||||
'network_id': CONF.network.public_network_id})
|
||||
# Verify operation - router
|
||||
self._verify_router_gateway(
|
||||
router['id'],
|
||||
{'network_id': CONF.network.public_network_id})
|
||||
self._verify_gateway_port(router['id'])
|
||||
|
||||
@test.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33')
|
||||
@test.requires_ext(extension='ext-gw-mode', service='network')
|
||||
@test.attr(type='smoke')
|
||||
def test_update_router_set_gateway_with_snat_explicit(self):
|
||||
router = self._create_router(data_utils.rand_name('router-'))
|
||||
self.admin_client.update_router_with_snat_gw_info(
|
||||
router['id'],
|
||||
external_gateway_info={
|
||||
'network_id': CONF.network.public_network_id,
|
||||
'enable_snat': True})
|
||||
self._verify_router_gateway(
|
||||
router['id'],
|
||||
{'network_id': CONF.network.public_network_id,
|
||||
'enable_snat': True})
|
||||
self._verify_gateway_port(router['id'])
|
||||
|
||||
@test.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279')
|
||||
@test.requires_ext(extension='ext-gw-mode', service='network')
|
||||
@test.attr(type='smoke')
|
||||
def test_update_router_set_gateway_without_snat(self):
|
||||
router = self._create_router(data_utils.rand_name('router-'))
|
||||
self.admin_client.update_router_with_snat_gw_info(
|
||||
router['id'],
|
||||
external_gateway_info={
|
||||
'network_id': CONF.network.public_network_id,
|
||||
'enable_snat': False})
|
||||
self._verify_router_gateway(
|
||||
router['id'],
|
||||
{'network_id': CONF.network.public_network_id,
|
||||
'enable_snat': False})
|
||||
self._verify_gateway_port(router['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('ad81b7ee-4f81-407b-a19c-17e623f763e8')
|
||||
def test_update_router_unset_gateway(self):
|
||||
router = self._create_router(
|
||||
data_utils.rand_name('router-'),
|
||||
external_network_id=CONF.network.public_network_id)
|
||||
self.client.update_router(router['id'], external_gateway_info={})
|
||||
self._verify_router_gateway(router['id'])
|
||||
# No gateway port expected
|
||||
list_body = self.admin_client.list_ports(
|
||||
network_id=CONF.network.public_network_id,
|
||||
device_id=router['id'])
|
||||
self.assertFalse(list_body['ports'])
|
||||
|
||||
@test.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374')
|
||||
@test.requires_ext(extension='ext-gw-mode', service='network')
|
||||
@test.attr(type='smoke')
|
||||
def test_update_router_reset_gateway_without_snat(self):
|
||||
router = self._create_router(
|
||||
data_utils.rand_name('router-'),
|
||||
external_network_id=CONF.network.public_network_id)
|
||||
self.admin_client.update_router_with_snat_gw_info(
|
||||
router['id'],
|
||||
external_gateway_info={
|
||||
'network_id': CONF.network.public_network_id,
|
||||
'enable_snat': False})
|
||||
self._verify_router_gateway(
|
||||
router['id'],
|
||||
{'network_id': CONF.network.public_network_id,
|
||||
'enable_snat': False})
|
||||
self._verify_gateway_port(router['id'])
|
||||
|
||||
@test.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c')
|
||||
@test.requires_ext(extension='extraroute', service='network')
|
||||
@test.attr(type='smoke')
|
||||
def test_update_extra_route(self):
|
||||
self.network = self.create_network()
|
||||
self.name = self.network['name']
|
||||
self.subnet = self.create_subnet(self.network)
|
||||
# Add router interface with subnet id
|
||||
self.router = self._create_router(
|
||||
data_utils.rand_name('router-'), True)
|
||||
self.create_router_interface(self.router['id'], self.subnet['id'])
|
||||
self.addCleanup(
|
||||
self._delete_extra_routes,
|
||||
self.router['id'])
|
||||
# Update router extra route, second ip of the range is
|
||||
# used as next hop
|
||||
cidr = netaddr.IPNetwork(self.subnet['cidr'])
|
||||
next_hop = str(cidr[2])
|
||||
destination = str(self.subnet['cidr'])
|
||||
extra_route = self.client.update_extra_routes(self.router['id'],
|
||||
next_hop, destination)
|
||||
self.assertEqual(1, len(extra_route['router']['routes']))
|
||||
self.assertEqual(destination,
|
||||
extra_route['router']['routes'][0]['destination'])
|
||||
self.assertEqual(next_hop,
|
||||
extra_route['router']['routes'][0]['nexthop'])
|
||||
show_body = self.client.show_router(self.router['id'])
|
||||
self.assertEqual(destination,
|
||||
show_body['router']['routes'][0]['destination'])
|
||||
self.assertEqual(next_hop,
|
||||
show_body['router']['routes'][0]['nexthop'])
|
||||
|
||||
def _delete_extra_routes(self, router_id):
|
||||
self.client.delete_extra_routes(router_id)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
|
||||
def test_update_router_admin_state(self):
|
||||
self.router = self._create_router(data_utils.rand_name('router-'))
|
||||
self.assertFalse(self.router['admin_state_up'])
|
||||
# Update router admin state
|
||||
update_body = self.client.update_router(self.router['id'],
|
||||
admin_state_up=True)
|
||||
self.assertTrue(update_body['router']['admin_state_up'])
|
||||
show_body = self.client.show_router(self.router['id'])
|
||||
self.assertTrue(show_body['router']['admin_state_up'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('802c73c9-c937-4cef-824b-2191e24a6aab')
|
||||
def test_add_multiple_router_interfaces(self):
|
||||
network01 = self.create_network(
|
||||
network_name=data_utils.rand_name('router-network01-'))
|
||||
network02 = self.create_network(
|
||||
network_name=data_utils.rand_name('router-network02-'))
|
||||
subnet01 = self.create_subnet(network01)
|
||||
sub02_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
|
||||
subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
|
||||
router = self._create_router(data_utils.rand_name('router-'))
|
||||
interface01 = self._add_router_interface_with_subnet_id(router['id'],
|
||||
subnet01['id'])
|
||||
self._verify_router_interface(router['id'], subnet01['id'],
|
||||
interface01['port_id'])
|
||||
interface02 = self._add_router_interface_with_subnet_id(router['id'],
|
||||
subnet02['id'])
|
||||
self._verify_router_interface(router['id'], subnet02['id'],
|
||||
interface02['port_id'])
|
||||
|
||||
def _verify_router_interface(self, router_id, subnet_id, port_id):
|
||||
show_port_body = self.client.show_port(port_id)
|
||||
interface_port = show_port_body['port']
|
||||
self.assertEqual(router_id, interface_port['device_id'])
|
||||
self.assertEqual(subnet_id,
|
||||
interface_port['fixed_ips'][0]['subnet_id'])
|
||||
|
||||
|
||||
class RoutersIpV6Test(RoutersTest):
|
||||
_ip_version = 6
|
112
neutron/tests/tempest/api/network/test_routers_negative.py
Normal file
112
neutron/tests/tempest/api/network/test_routers_negative.py
Normal file
@ -0,0 +1,112 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base_routers as base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class RoutersNegativeTest(base.BaseRouterTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(RoutersNegativeTest, cls).resource_setup()
|
||||
if not test.is_extension_enabled('router', 'network'):
|
||||
msg = "router extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
cls.router = cls.create_router(data_utils.rand_name('router-'))
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.tenant_cidr = (CONF.network.tenant_network_cidr
|
||||
if cls._ip_version == 4 else
|
||||
CONF.network.tenant_network_v6_cidr)
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
|
||||
def test_router_add_gateway_invalid_network_returns_404(self):
|
||||
self.assertRaises(lib_exc.NotFound,
|
||||
self.client.update_router,
|
||||
self.router['id'],
|
||||
external_gateway_info={
|
||||
'network_id': self.router['id']})
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
|
||||
def test_router_add_gateway_net_not_external_returns_400(self):
|
||||
alt_network = self.create_network(
|
||||
network_name=data_utils.rand_name('router-negative-'))
|
||||
sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
|
||||
self.create_subnet(alt_network, cidr=sub_cidr)
|
||||
self.assertRaises(lib_exc.BadRequest,
|
||||
self.client.update_router,
|
||||
self.router['id'],
|
||||
external_gateway_info={
|
||||
'network_id': alt_network['id']})
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('957751a3-3c68-4fa2-93b6-eb52ea10db6e')
|
||||
def test_add_router_interfaces_on_overlapping_subnets_returns_400(self):
|
||||
network01 = self.create_network(
|
||||
network_name=data_utils.rand_name('router-network01-'))
|
||||
network02 = self.create_network(
|
||||
network_name=data_utils.rand_name('router-network02-'))
|
||||
subnet01 = self.create_subnet(network01)
|
||||
subnet02 = self.create_subnet(network02)
|
||||
self._add_router_interface_with_subnet_id(self.router['id'],
|
||||
subnet01['id'])
|
||||
self.assertRaises(lib_exc.BadRequest,
|
||||
self._add_router_interface_with_subnet_id,
|
||||
self.router['id'],
|
||||
subnet02['id'])
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('04df80f9-224d-47f5-837a-bf23e33d1c20')
|
||||
def test_router_remove_interface_in_use_returns_409(self):
|
||||
self.client.add_router_interface_with_subnet_id(
|
||||
self.router['id'], self.subnet['id'])
|
||||
self.assertRaises(lib_exc.Conflict,
|
||||
self.client.delete_router,
|
||||
self.router['id'])
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('c2a70d72-8826-43a7-8208-0209e6360c47')
|
||||
def test_show_non_existent_router_returns_404(self):
|
||||
router = data_utils.rand_name('non_exist_router')
|
||||
self.assertRaises(lib_exc.NotFound, self.client.show_router,
|
||||
router)
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('b23d1569-8b0c-4169-8d4b-6abd34fad5c7')
|
||||
def test_update_non_existent_router_returns_404(self):
|
||||
router = data_utils.rand_name('non_exist_router')
|
||||
self.assertRaises(lib_exc.NotFound, self.client.update_router,
|
||||
router, name="new_name")
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4')
|
||||
def test_delete_non_existent_router_returns_404(self):
|
||||
router = data_utils.rand_name('non_exist_router')
|
||||
self.assertRaises(lib_exc.NotFound, self.client.delete_router,
|
||||
router)
|
||||
|
||||
|
||||
class RoutersNegativeIpV6Test(RoutersNegativeTest):
|
||||
_ip_version = 6
|
244
neutron/tests/tempest/api/network/test_security_groups.py
Normal file
244
neutron/tests/tempest/api/network/test_security_groups.py
Normal file
@ -0,0 +1,244 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from neutron.tests.tempest.api.network import base_security_groups as base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class SecGroupTest(base.BaseSecGroupTest):
|
||||
|
||||
_tenant_network_cidr = CONF.network.tenant_network_cidr
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(SecGroupTest, cls).resource_setup()
|
||||
if not test.is_extension_enabled('security-group', 'network'):
|
||||
msg = "security-group extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
def _create_verify_security_group_rule(self, sg_id, direction,
|
||||
ethertype, protocol,
|
||||
port_range_min,
|
||||
port_range_max,
|
||||
remote_group_id=None,
|
||||
remote_ip_prefix=None):
|
||||
# Create Security Group rule with the input params and validate
|
||||
# that SG rule is created with the same parameters.
|
||||
rule_create_body = self.client.create_security_group_rule(
|
||||
security_group_id=sg_id,
|
||||
direction=direction,
|
||||
ethertype=ethertype,
|
||||
protocol=protocol,
|
||||
port_range_min=port_range_min,
|
||||
port_range_max=port_range_max,
|
||||
remote_group_id=remote_group_id,
|
||||
remote_ip_prefix=remote_ip_prefix
|
||||
)
|
||||
|
||||
sec_group_rule = rule_create_body['security_group_rule']
|
||||
self.addCleanup(self._delete_security_group_rule,
|
||||
sec_group_rule['id'])
|
||||
|
||||
expected = {'direction': direction, 'protocol': protocol,
|
||||
'ethertype': ethertype, 'port_range_min': port_range_min,
|
||||
'port_range_max': port_range_max,
|
||||
'remote_group_id': remote_group_id,
|
||||
'remote_ip_prefix': remote_ip_prefix}
|
||||
for key, value in six.iteritems(expected):
|
||||
self.assertEqual(value, sec_group_rule[key],
|
||||
"Field %s of the created security group "
|
||||
"rule does not match with %s." %
|
||||
(key, value))
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e30abd17-fef9-4739-8617-dc26da88e686')
|
||||
def test_list_security_groups(self):
|
||||
# Verify the that security group belonging to tenant exist in list
|
||||
body = self.client.list_security_groups()
|
||||
security_groups = body['security_groups']
|
||||
found = None
|
||||
for n in security_groups:
|
||||
if (n['name'] == 'default'):
|
||||
found = n['id']
|
||||
msg = "Security-group list doesn't contain default security-group"
|
||||
self.assertIsNotNone(found, msg)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('bfd128e5-3c92-44b6-9d66-7fe29d22c802')
|
||||
def test_create_list_update_show_delete_security_group(self):
|
||||
group_create_body, name = self._create_security_group()
|
||||
|
||||
# List security groups and verify if created group is there in response
|
||||
list_body = self.client.list_security_groups()
|
||||
secgroup_list = list()
|
||||
for secgroup in list_body['security_groups']:
|
||||
secgroup_list.append(secgroup['id'])
|
||||
self.assertIn(group_create_body['security_group']['id'], secgroup_list)
|
||||
# Update the security group
|
||||
new_name = data_utils.rand_name('security-')
|
||||
new_description = data_utils.rand_name('security-description')
|
||||
update_body = self.client.update_security_group(
|
||||
group_create_body['security_group']['id'],
|
||||
name=new_name,
|
||||
description=new_description)
|
||||
# Verify if security group is updated
|
||||
self.assertEqual(update_body['security_group']['name'], new_name)
|
||||
self.assertEqual(update_body['security_group']['description'],
|
||||
new_description)
|
||||
# Show details of the updated security group
|
||||
show_body = self.client.show_security_group(
|
||||
group_create_body['security_group']['id'])
|
||||
self.assertEqual(show_body['security_group']['name'], new_name)
|
||||
self.assertEqual(show_body['security_group']['description'],
|
||||
new_description)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('cfb99e0e-7410-4a3d-8a0c-959a63ee77e9')
|
||||
def test_create_show_delete_security_group_rule(self):
|
||||
group_create_body, _ = self._create_security_group()
|
||||
|
||||
# Create rules for each protocol
|
||||
protocols = ['tcp', 'udp', 'icmp']
|
||||
for protocol in protocols:
|
||||
rule_create_body = self.client.create_security_group_rule(
|
||||
security_group_id=group_create_body['security_group']['id'],
|
||||
protocol=protocol,
|
||||
direction='ingress',
|
||||
ethertype=self.ethertype
|
||||
)
|
||||
|
||||
# Show details of the created security rule
|
||||
show_rule_body = self.client.show_security_group_rule(
|
||||
rule_create_body['security_group_rule']['id']
|
||||
)
|
||||
create_dict = rule_create_body['security_group_rule']
|
||||
for key, value in six.iteritems(create_dict):
|
||||
self.assertEqual(value,
|
||||
show_rule_body['security_group_rule'][key],
|
||||
"%s does not match." % key)
|
||||
|
||||
# List rules and verify created rule is in response
|
||||
rule_list_body = self.client.list_security_group_rules()
|
||||
rule_list = [rule['id']
|
||||
for rule in rule_list_body['security_group_rules']]
|
||||
self.assertIn(rule_create_body['security_group_rule']['id'],
|
||||
rule_list)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('87dfbcf9-1849-43ea-b1e4-efa3eeae9f71')
|
||||
def test_create_security_group_rule_with_additional_args(self):
|
||||
"""Verify security group rule with additional arguments works.
|
||||
|
||||
direction:ingress, ethertype:[IPv4/IPv6],
|
||||
protocol:tcp, port_range_min:77, port_range_max:77
|
||||
"""
|
||||
group_create_body, _ = self._create_security_group()
|
||||
sg_id = group_create_body['security_group']['id']
|
||||
direction = 'ingress'
|
||||
protocol = 'tcp'
|
||||
port_range_min = 77
|
||||
port_range_max = 77
|
||||
self._create_verify_security_group_rule(sg_id, direction,
|
||||
self.ethertype, protocol,
|
||||
port_range_min,
|
||||
port_range_max)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('c9463db8-b44d-4f52-b6c0-8dbda99f26ce')
|
||||
def test_create_security_group_rule_with_icmp_type_code(self):
|
||||
"""Verify security group rule for icmp protocol works.
|
||||
|
||||
Specify icmp type (port_range_min) and icmp code
|
||||
(port_range_max) with different values. A separate testcase
|
||||
is added for icmp protocol as icmp validation would be
|
||||
different from tcp/udp.
|
||||
"""
|
||||
group_create_body, _ = self._create_security_group()
|
||||
|
||||
sg_id = group_create_body['security_group']['id']
|
||||
direction = 'ingress'
|
||||
protocol = 'icmp'
|
||||
icmp_type_codes = [(3, 2), (3, 0), (8, 0), (0, 0), (11, None)]
|
||||
for icmp_type, icmp_code in icmp_type_codes:
|
||||
self._create_verify_security_group_rule(sg_id, direction,
|
||||
self.ethertype, protocol,
|
||||
icmp_type, icmp_code)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b')
|
||||
def test_create_security_group_rule_with_remote_group_id(self):
|
||||
# Verify creating security group rule with remote_group_id works
|
||||
sg1_body, _ = self._create_security_group()
|
||||
sg2_body, _ = self._create_security_group()
|
||||
|
||||
sg_id = sg1_body['security_group']['id']
|
||||
direction = 'ingress'
|
||||
protocol = 'udp'
|
||||
port_range_min = 50
|
||||
port_range_max = 55
|
||||
remote_id = sg2_body['security_group']['id']
|
||||
self._create_verify_security_group_rule(sg_id, direction,
|
||||
self.ethertype, protocol,
|
||||
port_range_min,
|
||||
port_range_max,
|
||||
remote_group_id=remote_id)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('16459776-5da2-4634-bce4-4b55ee3ec188')
|
||||
def test_create_security_group_rule_with_remote_ip_prefix(self):
|
||||
# Verify creating security group rule with remote_ip_prefix works
|
||||
sg1_body, _ = self._create_security_group()
|
||||
|
||||
sg_id = sg1_body['security_group']['id']
|
||||
direction = 'ingress'
|
||||
protocol = 'tcp'
|
||||
port_range_min = 76
|
||||
port_range_max = 77
|
||||
ip_prefix = self._tenant_network_cidr
|
||||
self._create_verify_security_group_rule(sg_id, direction,
|
||||
self.ethertype, protocol,
|
||||
port_range_min,
|
||||
port_range_max,
|
||||
remote_ip_prefix=ip_prefix)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('0a307599-6655-4220-bebc-fd70c64f2290')
|
||||
def test_create_security_group_rule_with_protocol_integer_value(self):
|
||||
# Verify creating security group rule with the
|
||||
# protocol as integer value
|
||||
# arguments : "protocol": 17
|
||||
group_create_body, _ = self._create_security_group()
|
||||
direction = 'ingress'
|
||||
protocol = 17
|
||||
security_group_id = group_create_body['security_group']['id']
|
||||
rule_create_body = self.client.create_security_group_rule(
|
||||
security_group_id=security_group_id,
|
||||
direction=direction,
|
||||
protocol=protocol
|
||||
)
|
||||
sec_group_rule = rule_create_body['security_group_rule']
|
||||
self.assertEqual(sec_group_rule['direction'], direction)
|
||||
self.assertEqual(int(sec_group_rule['protocol']), protocol)
|
||||
|
||||
|
||||
class SecGroupIPv6Test(SecGroupTest):
|
||||
_ip_version = 6
|
||||
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
|
@ -0,0 +1,228 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base_security_groups as base
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class NegativeSecGroupTest(base.BaseSecGroupTest):
|
||||
|
||||
_tenant_network_cidr = CONF.network.tenant_network_cidr
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(NegativeSecGroupTest, cls).resource_setup()
|
||||
if not test.is_extension_enabled('security-group', 'network'):
|
||||
msg = "security-group extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('424fd5c3-9ddc-486a-b45f-39bf0c820fc6')
|
||||
def test_show_non_existent_security_group(self):
|
||||
non_exist_id = str(uuid.uuid4())
|
||||
self.assertRaises(lib_exc.NotFound, self.client.show_security_group,
|
||||
non_exist_id)
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('4c094c09-000b-4e41-8100-9617600c02a6')
|
||||
def test_show_non_existent_security_group_rule(self):
|
||||
non_exist_id = str(uuid.uuid4())
|
||||
self.assertRaises(lib_exc.NotFound,
|
||||
self.client.show_security_group_rule,
|
||||
non_exist_id)
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('1f1bb89d-5664-4956-9fcd-83ee0fa603df')
|
||||
def test_delete_non_existent_security_group(self):
|
||||
non_exist_id = str(uuid.uuid4())
|
||||
self.assertRaises(lib_exc.NotFound,
|
||||
self.client.delete_security_group,
|
||||
non_exist_id
|
||||
)
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('981bdc22-ce48-41ed-900a-73148b583958')
|
||||
def test_create_security_group_rule_with_bad_protocol(self):
|
||||
group_create_body, _ = self._create_security_group()
|
||||
|
||||
# Create rule with bad protocol name
|
||||
pname = 'bad_protocol_name'
|
||||
self.assertRaises(
|
||||
lib_exc.BadRequest, self.client.create_security_group_rule,
|
||||
security_group_id=group_create_body['security_group']['id'],
|
||||
protocol=pname, direction='ingress', ethertype=self.ethertype)
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('5f8daf69-3c5f-4aaa-88c9-db1d66f68679')
|
||||
def test_create_security_group_rule_with_bad_remote_ip_prefix(self):
|
||||
group_create_body, _ = self._create_security_group()
|
||||
|
||||
# Create rule with bad remote_ip_prefix
|
||||
prefix = ['192.168.1./24', '192.168.1.1/33', 'bad_prefix', '256']
|
||||
for remote_ip_prefix in prefix:
|
||||
self.assertRaises(
|
||||
lib_exc.BadRequest, self.client.create_security_group_rule,
|
||||
security_group_id=group_create_body['security_group']['id'],
|
||||
protocol='tcp', direction='ingress', ethertype=self.ethertype,
|
||||
remote_ip_prefix=remote_ip_prefix)
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('4bf786fd-2f02-443c-9716-5b98e159a49a')
|
||||
def test_create_security_group_rule_with_non_existent_remote_groupid(self):
|
||||
group_create_body, _ = self._create_security_group()
|
||||
non_exist_id = str(uuid.uuid4())
|
||||
|
||||
# Create rule with non existent remote_group_id
|
||||
group_ids = ['bad_group_id', non_exist_id]
|
||||
for remote_group_id in group_ids:
|
||||
self.assertRaises(
|
||||
lib_exc.NotFound, self.client.create_security_group_rule,
|
||||
security_group_id=group_create_body['security_group']['id'],
|
||||
protocol='tcp', direction='ingress', ethertype=self.ethertype,
|
||||
remote_group_id=remote_group_id)
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('b5c4b247-6b02-435b-b088-d10d45650881')
|
||||
def test_create_security_group_rule_with_remote_ip_and_group(self):
|
||||
sg1_body, _ = self._create_security_group()
|
||||
sg2_body, _ = self._create_security_group()
|
||||
|
||||
# Create rule specifying both remote_ip_prefix and remote_group_id
|
||||
prefix = self._tenant_network_cidr
|
||||
self.assertRaises(
|
||||
lib_exc.BadRequest, self.client.create_security_group_rule,
|
||||
security_group_id=sg1_body['security_group']['id'],
|
||||
protocol='tcp', direction='ingress',
|
||||
ethertype=self.ethertype, remote_ip_prefix=prefix,
|
||||
remote_group_id=sg2_body['security_group']['id'])
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('5666968c-fff3-40d6-9efc-df1c8bd01abb')
|
||||
def test_create_security_group_rule_with_bad_ethertype(self):
|
||||
group_create_body, _ = self._create_security_group()
|
||||
|
||||
# Create rule with bad ethertype
|
||||
ethertype = 'bad_ethertype'
|
||||
self.assertRaises(
|
||||
lib_exc.BadRequest, self.client.create_security_group_rule,
|
||||
security_group_id=group_create_body['security_group']['id'],
|
||||
protocol='udp', direction='ingress', ethertype=ethertype)
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d')
|
||||
def test_create_security_group_rule_with_invalid_ports(self):
|
||||
group_create_body, _ = self._create_security_group()
|
||||
|
||||
# Create rule for tcp protocol with invalid ports
|
||||
states = [(-16, 80, 'Invalid value for port -16'),
|
||||
(80, 79, 'port_range_min must be <= port_range_max'),
|
||||
(80, 65536, 'Invalid value for port 65536'),
|
||||
(None, 6, 'port_range_min must be <= port_range_max'),
|
||||
(-16, 65536, 'Invalid value for port')]
|
||||
for pmin, pmax, msg in states:
|
||||
ex = self.assertRaises(
|
||||
lib_exc.BadRequest, self.client.create_security_group_rule,
|
||||
security_group_id=group_create_body['security_group']['id'],
|
||||
protocol='tcp', port_range_min=pmin, port_range_max=pmax,
|
||||
direction='ingress', ethertype=self.ethertype)
|
||||
self.assertIn(msg, str(ex))
|
||||
|
||||
# Create rule for icmp protocol with invalid ports
|
||||
states = [(1, 256, 'Invalid value for ICMP code'),
|
||||
(None, 6, 'ICMP type (port-range-min) is missing'),
|
||||
(300, 1, 'Invalid value for ICMP type')]
|
||||
for pmin, pmax, msg in states:
|
||||
ex = self.assertRaises(
|
||||
lib_exc.BadRequest, self.client.create_security_group_rule,
|
||||
security_group_id=group_create_body['security_group']['id'],
|
||||
protocol='icmp', port_range_min=pmin, port_range_max=pmax,
|
||||
direction='ingress', ethertype=self.ethertype)
|
||||
self.assertIn(msg, str(ex))
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('2323061e-9fbf-4eb0-b547-7e8fafc90849')
|
||||
def test_create_additional_default_security_group_fails(self):
|
||||
# Create security group named 'default', it should be failed.
|
||||
name = 'default'
|
||||
self.assertRaises(lib_exc.Conflict,
|
||||
self.client.create_security_group,
|
||||
name=name)
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('8fde898f-ce88-493b-adc9-4e4692879fc5')
|
||||
def test_create_duplicate_security_group_rule_fails(self):
|
||||
# Create duplicate security group rule, it should fail.
|
||||
body, _ = self._create_security_group()
|
||||
|
||||
min_port = 66
|
||||
max_port = 67
|
||||
# Create a rule with valid params
|
||||
self.client.create_security_group_rule(
|
||||
security_group_id=body['security_group']['id'],
|
||||
direction='ingress',
|
||||
ethertype=self.ethertype,
|
||||
protocol='tcp',
|
||||
port_range_min=min_port,
|
||||
port_range_max=max_port
|
||||
)
|
||||
|
||||
# Try creating the same security group rule, it should fail
|
||||
self.assertRaises(
|
||||
lib_exc.Conflict, self.client.create_security_group_rule,
|
||||
security_group_id=body['security_group']['id'],
|
||||
protocol='tcp', direction='ingress', ethertype=self.ethertype,
|
||||
port_range_min=min_port, port_range_max=max_port)
|
||||
|
||||
@test.attr(type=['negative', 'smoke'])
|
||||
@test.idempotent_id('be308db6-a7cf-4d5c-9baf-71bafd73f35e')
|
||||
def test_create_security_group_rule_with_non_existent_security_group(self):
|
||||
# Create security group rules with not existing security group.
|
||||
non_existent_sg = str(uuid.uuid4())
|
||||
self.assertRaises(lib_exc.NotFound,
|
||||
self.client.create_security_group_rule,
|
||||
security_group_id=non_existent_sg,
|
||||
direction='ingress', ethertype=self.ethertype)
|
||||
|
||||
|
||||
class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
|
||||
_ip_version = 6
|
||||
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
|
||||
|
||||
@test.attr(type=['negative', 'gate'])
|
||||
@test.idempotent_id('7607439c-af73-499e-bf64-f687fd12a842')
|
||||
def test_create_security_group_rule_wrong_ip_prefix_version(self):
|
||||
group_create_body, _ = self._create_security_group()
|
||||
|
||||
# Create rule with bad remote_ip_prefix
|
||||
pairs = ({'ethertype': 'IPv6',
|
||||
'ip_prefix': CONF.network.tenant_network_cidr},
|
||||
{'ethertype': 'IPv4',
|
||||
'ip_prefix': CONF.network.tenant_network_v6_cidr})
|
||||
for pair in pairs:
|
||||
self.assertRaisesRegexp(
|
||||
lib_exc.BadRequest,
|
||||
"Conflicting value ethertype",
|
||||
self.client.create_security_group_rule,
|
||||
security_group_id=group_create_body['security_group']['id'],
|
||||
protocol='tcp', direction='ingress',
|
||||
ethertype=pair['ethertype'],
|
||||
remote_ip_prefix=pair['ip_prefix'])
|
@ -0,0 +1,33 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib import decorators
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
|
||||
class ServiceTypeManagementTestJSON(base.BaseNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ServiceTypeManagementTestJSON, cls).resource_setup()
|
||||
if not test.is_extension_enabled('service-type', 'network'):
|
||||
msg = "Neutron Service Type Management not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@decorators.skip_because(bug="1400370")
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('2cbbeea9-f010-40f6-8df5-4eaa0c918ea6')
|
||||
def test_service_provider_list(self):
|
||||
body = self.client.list_service_providers()
|
||||
self.assertIsInstance(body['service_providers'], list)
|
327
neutron/tests/tempest/api/network/test_vpnaas_extensions.py
Normal file
327
neutron/tests/tempest/api/network/test_vpnaas_extensions.py
Normal file
@ -0,0 +1,327 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.api.network import base
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class VPNaaSTestJSON(base.BaseAdminNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
List, Show, Create, Delete, and Update VPN Service
|
||||
List, Show, Create, Delete, and Update IKE policy
|
||||
List, Show, Create, Delete, and Update IPSec policy
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
if not test.is_extension_enabled('vpnaas', 'network'):
|
||||
msg = "vpnaas extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
super(VPNaaSTestJSON, cls).resource_setup()
|
||||
cls.ext_net_id = CONF.network.public_network_id
|
||||
cls.network = cls.create_network()
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.router = cls.create_router(
|
||||
data_utils.rand_name("router"),
|
||||
external_network_id=CONF.network.public_network_id)
|
||||
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
|
||||
cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
|
||||
cls.router['id'])
|
||||
|
||||
cls.ikepolicy = cls.create_ikepolicy(
|
||||
data_utils.rand_name("ike-policy-"))
|
||||
cls.ipsecpolicy = cls.create_ipsecpolicy(
|
||||
data_utils.rand_name("ipsec-policy-"))
|
||||
|
||||
def _delete_ike_policy(self, ike_policy_id):
|
||||
# Deletes a ike policy and verifies if it is deleted or not
|
||||
ike_list = list()
|
||||
all_ike = self.client.list_ikepolicies()
|
||||
for ike in all_ike['ikepolicies']:
|
||||
ike_list.append(ike['id'])
|
||||
if ike_policy_id in ike_list:
|
||||
self.client.delete_ikepolicy(ike_policy_id)
|
||||
# Asserting that the policy is not found in list after deletion
|
||||
ikepolicies = self.client.list_ikepolicies()
|
||||
ike_id_list = list()
|
||||
for i in ikepolicies['ikepolicies']:
|
||||
ike_id_list.append(i['id'])
|
||||
self.assertNotIn(ike_policy_id, ike_id_list)
|
||||
|
||||
def _delete_ipsec_policy(self, ipsec_policy_id):
|
||||
# Deletes an ike policy if it exists
|
||||
try:
|
||||
self.client.delete_ipsecpolicy(ipsec_policy_id)
|
||||
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
def _assertExpected(self, expected, actual):
|
||||
# Check if not expected keys/values exists in actual response body
|
||||
for key, value in expected.iteritems():
|
||||
self.assertIn(key, actual)
|
||||
self.assertEqual(value, actual[key])
|
||||
|
||||
def _delete_vpn_service(self, vpn_service_id):
|
||||
self.client.delete_vpnservice(vpn_service_id)
|
||||
# Asserting if vpn service is found in the list after deletion
|
||||
body = self.client.list_vpnservices()
|
||||
vpn_services = [vs['id'] for vs in body['vpnservices']]
|
||||
self.assertNotIn(vpn_service_id, vpn_services)
|
||||
|
||||
def _get_tenant_id(self):
|
||||
"""
|
||||
Returns the tenant_id of the client current user
|
||||
"""
|
||||
# TODO(jroovers) This is a temporary workaround to get the tenant_id
|
||||
# of the the current client. Replace this once tenant_isolation for
|
||||
# neutron is fixed.
|
||||
body = self.client.show_network(self.network['id'])
|
||||
return body['network']['tenant_id']
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('14311574-0737-4e53-ac05-f7ae27742eed')
|
||||
def test_admin_create_ipsec_policy_for_tenant(self):
|
||||
tenant_id = self._get_tenant_id()
|
||||
# Create IPSec policy for the newly created tenant
|
||||
name = data_utils.rand_name('ipsec-policy')
|
||||
body = (self.admin_client.
|
||||
create_ipsecpolicy(name=name, tenant_id=tenant_id))
|
||||
ipsecpolicy = body['ipsecpolicy']
|
||||
self.assertIsNotNone(ipsecpolicy['id'])
|
||||
self.addCleanup(self.admin_client.delete_ipsecpolicy,
|
||||
ipsecpolicy['id'])
|
||||
|
||||
# Assert that created ipsec policy is found in API list call
|
||||
body = self.client.list_ipsecpolicies()
|
||||
ipsecpolicies = [policy['id'] for policy in body['ipsecpolicies']]
|
||||
self.assertIn(ipsecpolicy['id'], ipsecpolicies)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b62acdc6-0c53-4d84-84aa-859b22b79799')
|
||||
def test_admin_create_vpn_service_for_tenant(self):
|
||||
tenant_id = self._get_tenant_id()
|
||||
|
||||
# Create vpn service for the newly created tenant
|
||||
network2 = self.create_network()
|
||||
subnet2 = self.create_subnet(network2)
|
||||
router2 = self.create_router(data_utils.rand_name('router-'),
|
||||
external_network_id=self.ext_net_id)
|
||||
self.create_router_interface(router2['id'], subnet2['id'])
|
||||
name = data_utils.rand_name('vpn-service')
|
||||
body = self.admin_client.create_vpnservice(
|
||||
subnet_id=subnet2['id'],
|
||||
router_id=router2['id'],
|
||||
name=name,
|
||||
admin_state_up=True,
|
||||
tenant_id=tenant_id)
|
||||
vpnservice = body['vpnservice']
|
||||
self.assertIsNotNone(vpnservice['id'])
|
||||
self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
|
||||
# Assert that created vpnservice is found in API list call
|
||||
body = self.client.list_vpnservices()
|
||||
vpn_services = [vs['id'] for vs in body['vpnservices']]
|
||||
self.assertIn(vpnservice['id'], vpn_services)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('58cc4a1c-443b-4f39-8fb6-c19d39f343ab')
|
||||
def test_admin_create_ike_policy_for_tenant(self):
|
||||
tenant_id = self._get_tenant_id()
|
||||
|
||||
# Create IKE policy for the newly created tenant
|
||||
name = data_utils.rand_name('ike-policy')
|
||||
body = (self.admin_client.
|
||||
create_ikepolicy(name=name, ike_version="v1",
|
||||
encryption_algorithm="aes-128",
|
||||
auth_algorithm="sha1",
|
||||
tenant_id=tenant_id))
|
||||
ikepolicy = body['ikepolicy']
|
||||
self.assertIsNotNone(ikepolicy['id'])
|
||||
self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
|
||||
|
||||
# Assert that created ike policy is found in API list call
|
||||
body = self.client.list_ikepolicies()
|
||||
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
|
||||
self.assertIn(ikepolicy['id'], ikepolicies)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('de5bb04c-3a1f-46b1-b329-7a8abba5c7f1')
|
||||
def test_list_vpn_services(self):
|
||||
# Verify the VPN service exists in the list of all VPN services
|
||||
body = self.client.list_vpnservices()
|
||||
vpnservices = body['vpnservices']
|
||||
self.assertIn(self.vpnservice['id'], [v['id'] for v in vpnservices])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('aacb13b1-fdc7-41fd-bab2-32621aee1878')
|
||||
def test_create_update_delete_vpn_service(self):
|
||||
# Creates a VPN service and sets up deletion
|
||||
network1 = self.create_network()
|
||||
subnet1 = self.create_subnet(network1)
|
||||
router1 = self.create_router(data_utils.rand_name('router-'),
|
||||
external_network_id=self.ext_net_id)
|
||||
self.create_router_interface(router1['id'], subnet1['id'])
|
||||
name = data_utils.rand_name('vpn-service1')
|
||||
body = self.client.create_vpnservice(subnet_id=subnet1['id'],
|
||||
router_id=router1['id'],
|
||||
name=name,
|
||||
admin_state_up=True)
|
||||
vpnservice = body['vpnservice']
|
||||
self.addCleanup(self._delete_vpn_service, vpnservice['id'])
|
||||
# Assert if created vpnservices are not found in vpnservices list
|
||||
body = self.client.list_vpnservices()
|
||||
vpn_services = [vs['id'] for vs in body['vpnservices']]
|
||||
self.assertIsNotNone(vpnservice['id'])
|
||||
self.assertIn(vpnservice['id'], vpn_services)
|
||||
|
||||
# TODO(raies): implement logic to update vpnservice
|
||||
# VPNaaS client function to update is implemented.
|
||||
# But precondition is that current state of vpnservice
|
||||
# should be "ACTIVE" not "PENDING*"
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('0dedfc1d-f8ee-4e2a-bfd4-7997b9dc17ff')
|
||||
def test_show_vpn_service(self):
|
||||
# Verifies the details of a vpn service
|
||||
body = self.client.show_vpnservice(self.vpnservice['id'])
|
||||
vpnservice = body['vpnservice']
|
||||
self.assertEqual(self.vpnservice['id'], vpnservice['id'])
|
||||
self.assertEqual(self.vpnservice['name'], vpnservice['name'])
|
||||
self.assertEqual(self.vpnservice['description'],
|
||||
vpnservice['description'])
|
||||
self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
|
||||
self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
|
||||
self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
|
||||
valid_status = ["ACTIVE", "DOWN", "BUILD", "ERROR", "PENDING_CREATE",
|
||||
"PENDING_UPDATE", "PENDING_DELETE"]
|
||||
self.assertIn(vpnservice['status'], valid_status)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('e0fb6200-da3d-4869-8340-a8c1956ca618')
|
||||
def test_list_ike_policies(self):
|
||||
# Verify the ike policy exists in the list of all IKE policies
|
||||
body = self.client.list_ikepolicies()
|
||||
ikepolicies = body['ikepolicies']
|
||||
self.assertIn(self.ikepolicy['id'], [i['id'] for i in ikepolicies])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('d61f29a5-160c-487d-bc0d-22e32e731b44')
|
||||
def test_create_update_delete_ike_policy(self):
|
||||
# Creates a IKE policy
|
||||
name = data_utils.rand_name('ike-policy')
|
||||
body = (self.client.create_ikepolicy(
|
||||
name=name,
|
||||
ike_version="v1",
|
||||
encryption_algorithm="aes-128",
|
||||
auth_algorithm="sha1"))
|
||||
ikepolicy = body['ikepolicy']
|
||||
self.assertIsNotNone(ikepolicy['id'])
|
||||
self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
|
||||
|
||||
# Update IKE Policy
|
||||
new_ike = {'name': data_utils.rand_name("New-IKE"),
|
||||
'description': "Updated ike policy",
|
||||
'encryption_algorithm': "aes-256",
|
||||
'ike_version': "v2",
|
||||
'pfs': "group14",
|
||||
'lifetime': {'units': "seconds", 'value': 2000}}
|
||||
self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
|
||||
# Confirm that update was successful by verifying using 'show'
|
||||
body = self.client.show_ikepolicy(ikepolicy['id'])
|
||||
ike_policy = body['ikepolicy']
|
||||
for key, value in new_ike.iteritems():
|
||||
self.assertIn(key, ike_policy)
|
||||
self.assertEqual(value, ike_policy[key])
|
||||
|
||||
# Verification of ike policy delete
|
||||
self.client.delete_ikepolicy(ikepolicy['id'])
|
||||
body = self.client.list_ikepolicies()
|
||||
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
|
||||
self.assertNotIn(ike_policy['id'], ikepolicies)
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('b5fcf3a3-9407-452d-b8a8-e7c6c32baea8')
|
||||
def test_show_ike_policy(self):
|
||||
# Verifies the details of a ike policy
|
||||
body = self.client.show_ikepolicy(self.ikepolicy['id'])
|
||||
ikepolicy = body['ikepolicy']
|
||||
self.assertEqual(self.ikepolicy['id'], ikepolicy['id'])
|
||||
self.assertEqual(self.ikepolicy['name'], ikepolicy['name'])
|
||||
self.assertEqual(self.ikepolicy['description'],
|
||||
ikepolicy['description'])
|
||||
self.assertEqual(self.ikepolicy['encryption_algorithm'],
|
||||
ikepolicy['encryption_algorithm'])
|
||||
self.assertEqual(self.ikepolicy['auth_algorithm'],
|
||||
ikepolicy['auth_algorithm'])
|
||||
self.assertEqual(self.ikepolicy['tenant_id'],
|
||||
ikepolicy['tenant_id'])
|
||||
self.assertEqual(self.ikepolicy['pfs'],
|
||||
ikepolicy['pfs'])
|
||||
self.assertEqual(self.ikepolicy['phase1_negotiation_mode'],
|
||||
ikepolicy['phase1_negotiation_mode'])
|
||||
self.assertEqual(self.ikepolicy['ike_version'],
|
||||
ikepolicy['ike_version'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('19ea0a2f-add9-44be-b732-ffd8a7b42f37')
|
||||
def test_list_ipsec_policies(self):
|
||||
# Verify the ipsec policy exists in the list of all ipsec policies
|
||||
body = self.client.list_ipsecpolicies()
|
||||
ipsecpolicies = body['ipsecpolicies']
|
||||
self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('9c1701c9-329a-4e5d-930a-1ead1b3f86ad')
|
||||
def test_create_update_delete_ipsec_policy(self):
|
||||
# Creates an ipsec policy
|
||||
ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
|
||||
'pfs': 'group5',
|
||||
'encryption_algorithm': "aes-128",
|
||||
'auth_algorithm': 'sha1'}
|
||||
resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
|
||||
ipsecpolicy = resp_body['ipsecpolicy']
|
||||
self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
|
||||
self._assertExpected(ipsec_policy_body, ipsecpolicy)
|
||||
# Verification of ipsec policy update
|
||||
new_ipsec = {'description': 'Updated ipsec policy',
|
||||
'pfs': 'group2',
|
||||
'name': data_utils.rand_name("New-IPSec"),
|
||||
'encryption_algorithm': "aes-256",
|
||||
'lifetime': {'units': "seconds", 'value': '2000'}}
|
||||
body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
|
||||
**new_ipsec)
|
||||
updated_ipsec_policy = body['ipsecpolicy']
|
||||
self._assertExpected(new_ipsec, updated_ipsec_policy)
|
||||
# Verification of ipsec policy delete
|
||||
self.client.delete_ipsecpolicy(ipsecpolicy['id'])
|
||||
self.assertRaises(lib_exc.NotFound,
|
||||
self.client.delete_ipsecpolicy, ipsecpolicy['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@test.idempotent_id('601f8a05-9d3c-4539-a400-1c4b3a21b03b')
|
||||
def test_show_ipsec_policy(self):
|
||||
# Verifies the details of an ipsec policy
|
||||
body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
|
||||
ipsecpolicy = body['ipsecpolicy']
|
||||
self._assertExpected(self.ipsecpolicy, ipsecpolicy)
|
636
neutron/tests/tempest/auth.py
Normal file
636
neutron/tests/tempest/auth.py
Normal file
@ -0,0 +1,636 @@
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import copy
|
||||
import datetime
|
||||
import exceptions
|
||||
import re
|
||||
import urlparse
|
||||
|
||||
import six
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.tests.tempest.services.identity.v2.json import token_client as json_v2id
|
||||
from neutron.tests.tempest.services.identity.v3.json import token_client as json_v3id
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class AuthProvider(object):
|
||||
"""
|
||||
Provide authentication
|
||||
"""
|
||||
|
||||
def __init__(self, credentials):
|
||||
"""
|
||||
:param credentials: credentials for authentication
|
||||
"""
|
||||
if self.check_credentials(credentials):
|
||||
self.credentials = credentials
|
||||
else:
|
||||
raise TypeError("Invalid credentials")
|
||||
self.cache = None
|
||||
self.alt_auth_data = None
|
||||
self.alt_part = None
|
||||
|
||||
def __str__(self):
|
||||
return "Creds :{creds}, cached auth data: {cache}".format(
|
||||
creds=self.credentials, cache=self.cache)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _decorate_request(self, filters, method, url, headers=None, body=None,
|
||||
auth_data=None):
|
||||
"""
|
||||
Decorate request with authentication data
|
||||
"""
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_auth(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def _fill_credentials(self, auth_data_body):
|
||||
return
|
||||
|
||||
def fill_credentials(self):
|
||||
"""
|
||||
Fill credentials object with data from auth
|
||||
"""
|
||||
auth_data = self.get_auth()
|
||||
self._fill_credentials(auth_data[1])
|
||||
return self.credentials
|
||||
|
||||
@classmethod
|
||||
def check_credentials(cls, credentials):
|
||||
"""
|
||||
Verify credentials are valid.
|
||||
"""
|
||||
return isinstance(credentials, Credentials) and credentials.is_valid()
|
||||
|
||||
@property
|
||||
def auth_data(self):
|
||||
return self.get_auth()
|
||||
|
||||
@auth_data.deleter
|
||||
def auth_data(self):
|
||||
self.clear_auth()
|
||||
|
||||
def get_auth(self):
|
||||
"""
|
||||
Returns auth from cache if available, else auth first
|
||||
"""
|
||||
if self.cache is None or self.is_expired(self.cache):
|
||||
self.set_auth()
|
||||
return self.cache
|
||||
|
||||
def set_auth(self):
|
||||
"""
|
||||
Forces setting auth, ignores cache if it exists.
|
||||
Refills credentials
|
||||
"""
|
||||
self.cache = self._get_auth()
|
||||
self._fill_credentials(self.cache[1])
|
||||
|
||||
def clear_auth(self):
|
||||
"""
|
||||
Can be called to clear the access cache so that next request
|
||||
will fetch a new token and base_url.
|
||||
"""
|
||||
self.cache = None
|
||||
self.credentials.reset()
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_expired(self, auth_data):
|
||||
return
|
||||
|
||||
def auth_request(self, method, url, headers=None, body=None, filters=None):
|
||||
"""
|
||||
Obtains auth data and decorates a request with that.
|
||||
:param method: HTTP method of the request
|
||||
:param url: relative URL of the request (path)
|
||||
:param headers: HTTP headers of the request
|
||||
:param body: HTTP body in case of POST / PUT
|
||||
:param filters: select a base URL out of the catalog
|
||||
:returns a Tuple (url, headers, body)
|
||||
"""
|
||||
orig_req = dict(url=url, headers=headers, body=body)
|
||||
|
||||
auth_url, auth_headers, auth_body = self._decorate_request(
|
||||
filters, method, url, headers, body)
|
||||
auth_req = dict(url=auth_url, headers=auth_headers, body=auth_body)
|
||||
|
||||
# Overwrite part if the request if it has been requested
|
||||
if self.alt_part is not None:
|
||||
if self.alt_auth_data is not None:
|
||||
alt_url, alt_headers, alt_body = self._decorate_request(
|
||||
filters, method, url, headers, body,
|
||||
auth_data=self.alt_auth_data)
|
||||
alt_auth_req = dict(url=alt_url, headers=alt_headers,
|
||||
body=alt_body)
|
||||
auth_req[self.alt_part] = alt_auth_req[self.alt_part]
|
||||
|
||||
else:
|
||||
# If alt auth data is None, skip auth in the requested part
|
||||
auth_req[self.alt_part] = orig_req[self.alt_part]
|
||||
|
||||
# Next auth request will be normal, unless otherwise requested
|
||||
self.reset_alt_auth_data()
|
||||
|
||||
return auth_req['url'], auth_req['headers'], auth_req['body']
|
||||
|
||||
def reset_alt_auth_data(self):
|
||||
"""
|
||||
Configure auth provider to provide valid authentication data
|
||||
"""
|
||||
self.alt_part = None
|
||||
self.alt_auth_data = None
|
||||
|
||||
def set_alt_auth_data(self, request_part, auth_data):
|
||||
"""
|
||||
Configure auth provider to provide alt authentication data
|
||||
on a part of the *next* auth_request. If credentials are None,
|
||||
set invalid data.
|
||||
:param request_part: request part to contain invalid auth: url,
|
||||
headers, body
|
||||
:param auth_data: alternative auth_data from which to get the
|
||||
invalid data to be injected
|
||||
"""
|
||||
self.alt_part = request_part
|
||||
self.alt_auth_data = auth_data
|
||||
|
||||
@abc.abstractmethod
|
||||
def base_url(self, filters, auth_data=None):
|
||||
"""
|
||||
Extracts the base_url based on provided filters
|
||||
"""
|
||||
return
|
||||
|
||||
|
||||
class KeystoneAuthProvider(AuthProvider):
|
||||
|
||||
token_expiry_threshold = datetime.timedelta(seconds=60)
|
||||
|
||||
def __init__(self, credentials, auth_url,
|
||||
disable_ssl_certificate_validation=None,
|
||||
ca_certs=None, trace_requests=None):
|
||||
super(KeystoneAuthProvider, self).__init__(credentials)
|
||||
self.dsvm = disable_ssl_certificate_validation
|
||||
self.ca_certs = ca_certs
|
||||
self.trace_requests = trace_requests
|
||||
self.auth_client = self._auth_client(auth_url)
|
||||
|
||||
def _decorate_request(self, filters, method, url, headers=None, body=None,
|
||||
auth_data=None):
|
||||
if auth_data is None:
|
||||
auth_data = self.auth_data
|
||||
token, _ = auth_data
|
||||
base_url = self.base_url(filters=filters, auth_data=auth_data)
|
||||
# build authenticated request
|
||||
# returns new request, it does not touch the original values
|
||||
_headers = copy.deepcopy(headers) if headers is not None else {}
|
||||
_headers['X-Auth-Token'] = str(token)
|
||||
if url is None or url == "":
|
||||
_url = base_url
|
||||
else:
|
||||
# Join base URL and url, and remove multiple contiguous slashes
|
||||
_url = "/".join([base_url, url])
|
||||
parts = [x for x in urlparse.urlparse(_url)]
|
||||
parts[2] = re.sub("/{2,}", "/", parts[2])
|
||||
_url = urlparse.urlunparse(parts)
|
||||
# no change to method or body
|
||||
return str(_url), _headers, body
|
||||
|
||||
@abc.abstractmethod
|
||||
def _auth_client(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def _auth_params(self):
|
||||
return
|
||||
|
||||
def _get_auth(self):
|
||||
# Bypasses the cache
|
||||
auth_func = getattr(self.auth_client, 'get_token')
|
||||
auth_params = self._auth_params()
|
||||
|
||||
# returns token, auth_data
|
||||
token, auth_data = auth_func(**auth_params)
|
||||
return token, auth_data
|
||||
|
||||
def get_token(self):
|
||||
return self.auth_data[0]
|
||||
|
||||
|
||||
class KeystoneV2AuthProvider(KeystoneAuthProvider):
|
||||
|
||||
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
|
||||
|
||||
def _auth_client(self, auth_url):
|
||||
return json_v2id.TokenClientJSON(
|
||||
auth_url, disable_ssl_certificate_validation=self.dsvm,
|
||||
ca_certs=self.ca_certs, trace_requests=self.trace_requests)
|
||||
|
||||
def _auth_params(self):
|
||||
return dict(
|
||||
user=self.credentials.username,
|
||||
password=self.credentials.password,
|
||||
tenant=self.credentials.tenant_name,
|
||||
auth_data=True)
|
||||
|
||||
def _fill_credentials(self, auth_data_body):
|
||||
tenant = auth_data_body['token']['tenant']
|
||||
user = auth_data_body['user']
|
||||
if self.credentials.tenant_name is None:
|
||||
self.credentials.tenant_name = tenant['name']
|
||||
if self.credentials.tenant_id is None:
|
||||
self.credentials.tenant_id = tenant['id']
|
||||
if self.credentials.username is None:
|
||||
self.credentials.username = user['name']
|
||||
if self.credentials.user_id is None:
|
||||
self.credentials.user_id = user['id']
|
||||
|
||||
def base_url(self, filters, auth_data=None):
|
||||
"""
|
||||
Filters can be:
|
||||
- service: compute, image, etc
|
||||
- region: the service region
|
||||
- endpoint_type: adminURL, publicURL, internalURL
|
||||
- api_version: replace catalog version with this
|
||||
- skip_path: take just the base URL
|
||||
"""
|
||||
if auth_data is None:
|
||||
auth_data = self.auth_data
|
||||
token, _auth_data = auth_data
|
||||
service = filters.get('service')
|
||||
region = filters.get('region')
|
||||
endpoint_type = filters.get('endpoint_type', 'publicURL')
|
||||
|
||||
if service is None:
|
||||
raise exceptions.EndpointNotFound("No service provided")
|
||||
|
||||
_base_url = None
|
||||
for ep in _auth_data['serviceCatalog']:
|
||||
if ep["type"] == service:
|
||||
for _ep in ep['endpoints']:
|
||||
if region is not None and _ep['region'] == region:
|
||||
_base_url = _ep.get(endpoint_type)
|
||||
if not _base_url:
|
||||
# No region matching, use the first
|
||||
_base_url = ep['endpoints'][0].get(endpoint_type)
|
||||
break
|
||||
if _base_url is None:
|
||||
raise exceptions.EndpointNotFound(service)
|
||||
|
||||
parts = urlparse.urlparse(_base_url)
|
||||
if filters.get('api_version', None) is not None:
|
||||
path = "/" + filters['api_version']
|
||||
noversion_path = "/".join(parts.path.split("/")[2:])
|
||||
if noversion_path != "":
|
||||
path += "/" + noversion_path
|
||||
_base_url = _base_url.replace(parts.path, path)
|
||||
if filters.get('skip_path', None) is not None and parts.path != '':
|
||||
_base_url = _base_url.replace(parts.path, "/")
|
||||
|
||||
return _base_url
|
||||
|
||||
def is_expired(self, auth_data):
|
||||
_, access = auth_data
|
||||
expiry = datetime.datetime.strptime(access['token']['expires'],
|
||||
self.EXPIRY_DATE_FORMAT)
|
||||
return expiry - self.token_expiry_threshold <= \
|
||||
datetime.datetime.utcnow()
|
||||
|
||||
|
||||
class KeystoneV3AuthProvider(KeystoneAuthProvider):
|
||||
|
||||
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
|
||||
|
||||
def _auth_client(self, auth_url):
|
||||
return json_v3id.V3TokenClientJSON(
|
||||
auth_url, disable_ssl_certificate_validation=self.dsvm,
|
||||
ca_certs=self.ca_certs, trace_requests=self.trace_requests)
|
||||
|
||||
def _auth_params(self):
|
||||
return dict(
|
||||
user=self.credentials.username,
|
||||
password=self.credentials.password,
|
||||
project=self.credentials.tenant_name,
|
||||
user_domain=self.credentials.user_domain_name,
|
||||
project_domain=self.credentials.project_domain_name,
|
||||
auth_data=True)
|
||||
|
||||
def _fill_credentials(self, auth_data_body):
|
||||
# project or domain, depending on the scope
|
||||
project = auth_data_body.get('project', None)
|
||||
domain = auth_data_body.get('domain', None)
|
||||
# user is always there
|
||||
user = auth_data_body['user']
|
||||
# Set project fields
|
||||
if project is not None:
|
||||
if self.credentials.project_name is None:
|
||||
self.credentials.project_name = project['name']
|
||||
if self.credentials.project_id is None:
|
||||
self.credentials.project_id = project['id']
|
||||
if self.credentials.project_domain_id is None:
|
||||
self.credentials.project_domain_id = project['domain']['id']
|
||||
if self.credentials.project_domain_name is None:
|
||||
self.credentials.project_domain_name = \
|
||||
project['domain']['name']
|
||||
# Set domain fields
|
||||
if domain is not None:
|
||||
if self.credentials.domain_id is None:
|
||||
self.credentials.domain_id = domain['id']
|
||||
if self.credentials.domain_name is None:
|
||||
self.credentials.domain_name = domain['name']
|
||||
# Set user fields
|
||||
if self.credentials.username is None:
|
||||
self.credentials.username = user['name']
|
||||
if self.credentials.user_id is None:
|
||||
self.credentials.user_id = user['id']
|
||||
if self.credentials.user_domain_id is None:
|
||||
self.credentials.user_domain_id = user['domain']['id']
|
||||
if self.credentials.user_domain_name is None:
|
||||
self.credentials.user_domain_name = user['domain']['name']
|
||||
|
||||
def base_url(self, filters, auth_data=None):
|
||||
"""
|
||||
Filters can be:
|
||||
- service: compute, image, etc
|
||||
- region: the service region
|
||||
- endpoint_type: adminURL, publicURL, internalURL
|
||||
- api_version: replace catalog version with this
|
||||
- skip_path: take just the base URL
|
||||
"""
|
||||
if auth_data is None:
|
||||
auth_data = self.auth_data
|
||||
token, _auth_data = auth_data
|
||||
service = filters.get('service')
|
||||
region = filters.get('region')
|
||||
endpoint_type = filters.get('endpoint_type', 'public')
|
||||
|
||||
if service is None:
|
||||
raise exceptions.EndpointNotFound("No service provided")
|
||||
|
||||
if 'URL' in endpoint_type:
|
||||
endpoint_type = endpoint_type.replace('URL', '')
|
||||
_base_url = None
|
||||
catalog = _auth_data['catalog']
|
||||
# Select entries with matching service type
|
||||
service_catalog = [ep for ep in catalog if ep['type'] == service]
|
||||
if len(service_catalog) > 0:
|
||||
service_catalog = service_catalog[0]['endpoints']
|
||||
else:
|
||||
# No matching service
|
||||
raise exceptions.EndpointNotFound(service)
|
||||
# Filter by endpoint type (interface)
|
||||
filtered_catalog = [ep for ep in service_catalog if
|
||||
ep['interface'] == endpoint_type]
|
||||
if len(filtered_catalog) == 0:
|
||||
# No matching type, keep all and try matching by region at least
|
||||
filtered_catalog = service_catalog
|
||||
# Filter by region
|
||||
filtered_catalog = [ep for ep in filtered_catalog if
|
||||
ep['region'] == region]
|
||||
if len(filtered_catalog) == 0:
|
||||
# No matching region, take the first endpoint
|
||||
filtered_catalog = [service_catalog[0]]
|
||||
# There should be only one match. If not take the first.
|
||||
_base_url = filtered_catalog[0].get('url', None)
|
||||
if _base_url is None:
|
||||
raise exceptions.EndpointNotFound(service)
|
||||
|
||||
parts = urlparse.urlparse(_base_url)
|
||||
if filters.get('api_version', None) is not None:
|
||||
path = "/" + filters['api_version']
|
||||
noversion_path = "/".join(parts.path.split("/")[2:])
|
||||
if noversion_path != "":
|
||||
path += "/" + noversion_path
|
||||
_base_url = _base_url.replace(parts.path, path)
|
||||
if filters.get('skip_path', None) is not None:
|
||||
_base_url = _base_url.replace(parts.path, "/")
|
||||
|
||||
return _base_url
|
||||
|
||||
def is_expired(self, auth_data):
|
||||
_, access = auth_data
|
||||
expiry = datetime.datetime.strptime(access['expires_at'],
|
||||
self.EXPIRY_DATE_FORMAT)
|
||||
return expiry - self.token_expiry_threshold <= \
|
||||
datetime.datetime.utcnow()
|
||||
|
||||
|
||||
def is_identity_version_supported(identity_version):
|
||||
return identity_version in IDENTITY_VERSION
|
||||
|
||||
|
||||
def get_credentials(auth_url, fill_in=True, identity_version='v2', **kwargs):
|
||||
"""
|
||||
Builds a credentials object based on the configured auth_version
|
||||
|
||||
:param auth_url (string): Full URI of the OpenStack Identity API(Keystone)
|
||||
which is used to fetch the token from Identity service.
|
||||
:param fill_in (boolean): obtain a token and fill in all credential
|
||||
details provided by the identity service. When fill_in is not
|
||||
specified, credentials are not validated. Validation can be invoked
|
||||
by invoking ``is_valid()``
|
||||
:param identity_version (string): identity API version is used to
|
||||
select the matching auth provider and credentials class
|
||||
:param kwargs (dict): Dict of credential key/value pairs
|
||||
|
||||
Examples:
|
||||
|
||||
Returns credentials from the provided parameters:
|
||||
>>> get_credentials(username='foo', password='bar')
|
||||
|
||||
Returns credentials including IDs:
|
||||
>>> get_credentials(username='foo', password='bar', fill_in=True)
|
||||
"""
|
||||
if not is_identity_version_supported(identity_version):
|
||||
raise exceptions.InvalidIdentityVersion(
|
||||
identity_version=identity_version)
|
||||
|
||||
credential_class, auth_provider_class = IDENTITY_VERSION.get(
|
||||
identity_version)
|
||||
|
||||
creds = credential_class(**kwargs)
|
||||
# Fill in the credentials fields that were not specified
|
||||
if fill_in:
|
||||
auth_provider = auth_provider_class(creds, auth_url)
|
||||
creds = auth_provider.fill_credentials()
|
||||
return creds
|
||||
|
||||
|
||||
class Credentials(object):
|
||||
"""
|
||||
Set of credentials for accessing OpenStack services
|
||||
|
||||
ATTRIBUTES: list of valid class attributes representing credentials.
|
||||
"""
|
||||
|
||||
ATTRIBUTES = []
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
Enforce the available attributes at init time (only).
|
||||
Additional attributes can still be set afterwards if tests need
|
||||
to do so.
|
||||
"""
|
||||
self._initial = kwargs
|
||||
self._apply_credentials(kwargs)
|
||||
|
||||
def _apply_credentials(self, attr):
|
||||
for key in attr.keys():
|
||||
if key in self.ATTRIBUTES:
|
||||
setattr(self, key, attr[key])
|
||||
else:
|
||||
raise exceptions.InvalidCredentials
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Represent only attributes included in self.ATTRIBUTES
|
||||
"""
|
||||
_repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
|
||||
return str(_repr)
|
||||
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
Credentials are equal if attributes in self.ATTRIBUTES are equal
|
||||
"""
|
||||
return str(self) == str(other)
|
||||
|
||||
def __getattr__(self, key):
|
||||
# If an attribute is set, __getattr__ is not invoked
|
||||
# If an attribute is not set, and it is a known one, return None
|
||||
if key in self.ATTRIBUTES:
|
||||
return None
|
||||
else:
|
||||
raise AttributeError
|
||||
|
||||
def __delitem__(self, key):
|
||||
# For backwards compatibility, support dict behaviour
|
||||
if key in self.ATTRIBUTES:
|
||||
delattr(self, key)
|
||||
else:
|
||||
raise AttributeError
|
||||
|
||||
def get(self, item, default):
|
||||
# In this patch act as dict for backward compatibility
|
||||
try:
|
||||
return getattr(self, item)
|
||||
except AttributeError:
|
||||
return default
|
||||
|
||||
def get_init_attributes(self):
|
||||
return self._initial.keys()
|
||||
|
||||
def is_valid(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def reset(self):
|
||||
# First delete all known attributes
|
||||
for key in self.ATTRIBUTES:
|
||||
if getattr(self, key) is not None:
|
||||
delattr(self, key)
|
||||
# Then re-apply initial setup
|
||||
self._apply_credentials(self._initial)
|
||||
|
||||
|
||||
class KeystoneV2Credentials(Credentials):
|
||||
|
||||
ATTRIBUTES = ['username', 'password', 'tenant_name', 'user_id',
|
||||
'tenant_id']
|
||||
|
||||
def is_valid(self):
|
||||
"""
|
||||
Minimum set of valid credentials, are username and password.
|
||||
Tenant is optional.
|
||||
"""
|
||||
return None not in (self.username, self.password)
|
||||
|
||||
|
||||
class KeystoneV3Credentials(Credentials):
|
||||
"""
|
||||
Credentials suitable for the Keystone Identity V3 API
|
||||
"""
|
||||
|
||||
ATTRIBUTES = ['domain_name', 'password', 'tenant_name', 'username',
|
||||
'project_domain_id', 'project_domain_name', 'project_id',
|
||||
'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
|
||||
'user_domain_name', 'user_id']
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
parent = super(KeystoneV3Credentials, self)
|
||||
# for tenant_* set both project and tenant
|
||||
if key == 'tenant_id':
|
||||
parent.__setattr__('project_id', value)
|
||||
elif key == 'tenant_name':
|
||||
parent.__setattr__('project_name', value)
|
||||
# for project_* set both project and tenant
|
||||
if key == 'project_id':
|
||||
parent.__setattr__('tenant_id', value)
|
||||
elif key == 'project_name':
|
||||
parent.__setattr__('tenant_name', value)
|
||||
# for *_domain_* set both user and project if not set yet
|
||||
if key == 'user_domain_id':
|
||||
if self.project_domain_id is None:
|
||||
parent.__setattr__('project_domain_id', value)
|
||||
if key == 'project_domain_id':
|
||||
if self.user_domain_id is None:
|
||||
parent.__setattr__('user_domain_id', value)
|
||||
if key == 'user_domain_name':
|
||||
if self.project_domain_name is None:
|
||||
parent.__setattr__('project_domain_name', value)
|
||||
if key == 'project_domain_name':
|
||||
if self.user_domain_name is None:
|
||||
parent.__setattr__('user_domain_name', value)
|
||||
# support domain_name coming from config
|
||||
if key == 'domain_name':
|
||||
parent.__setattr__('user_domain_name', value)
|
||||
parent.__setattr__('project_domain_name', value)
|
||||
# finally trigger default behaviour for all attributes
|
||||
parent.__setattr__(key, value)
|
||||
|
||||
def is_valid(self):
|
||||
"""
|
||||
Valid combinations of v3 credentials (excluding token, scope)
|
||||
- User id, password (optional domain)
|
||||
- User name, password and its domain id/name
|
||||
For the scope, valid combinations are:
|
||||
- None
|
||||
- Project id (optional domain)
|
||||
- Project name and its domain id/name
|
||||
"""
|
||||
valid_user_domain = any(
|
||||
[self.user_domain_id is not None,
|
||||
self.user_domain_name is not None])
|
||||
valid_project_domain = any(
|
||||
[self.project_domain_id is not None,
|
||||
self.project_domain_name is not None])
|
||||
valid_user = any(
|
||||
[self.user_id is not None,
|
||||
self.username is not None and valid_user_domain])
|
||||
valid_project = any(
|
||||
[self.project_name is None and self.project_id is None,
|
||||
self.project_id is not None,
|
||||
self.project_name is not None and valid_project_domain])
|
||||
return all([self.password is not None, valid_user, valid_project])
|
||||
|
||||
|
||||
IDENTITY_VERSION = {'v2': (KeystoneV2Credentials, KeystoneV2AuthProvider),
|
||||
'v3': (KeystoneV3Credentials, KeystoneV3AuthProvider)}
|
0
neutron/tests/tempest/common/__init__.py
Normal file
0
neutron/tests/tempest/common/__init__.py
Normal file
350
neutron/tests/tempest/common/accounts.py
Normal file
350
neutron/tests/tempest/common/accounts.py
Normal file
@ -0,0 +1,350 @@
|
||||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
import yaml
|
||||
|
||||
from neutron.tests.tempest.common import cred_provider
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
from oslo_concurrency import lockutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def read_accounts_yaml(path):
|
||||
yaml_file = open(path, 'r')
|
||||
accounts = yaml.load(yaml_file)
|
||||
return accounts
|
||||
|
||||
|
||||
class Accounts(cred_provider.CredentialProvider):
|
||||
|
||||
def __init__(self, name):
|
||||
super(Accounts, self).__init__(name)
|
||||
self.name = name
|
||||
if os.path.isfile(CONF.auth.test_accounts_file):
|
||||
accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
|
||||
self.use_default_creds = False
|
||||
else:
|
||||
accounts = {}
|
||||
self.use_default_creds = True
|
||||
self.hash_dict = self.get_hash_dict(accounts)
|
||||
self.accounts_dir = os.path.join(CONF.oslo_concurrency.lock_path, 'test_accounts')
|
||||
self.isolated_creds = {}
|
||||
|
||||
@classmethod
|
||||
def _append_role(cls, role, account_hash, hash_dict):
|
||||
if role in hash_dict['roles']:
|
||||
hash_dict['roles'][role].append(account_hash)
|
||||
else:
|
||||
hash_dict['roles'][role] = [account_hash]
|
||||
return hash_dict
|
||||
|
||||
@classmethod
|
||||
def get_hash_dict(cls, accounts):
|
||||
hash_dict = {'roles': {}, 'creds': {}}
|
||||
# Loop over the accounts read from the yaml file
|
||||
for account in accounts:
|
||||
roles = []
|
||||
types = []
|
||||
if 'roles' in account:
|
||||
roles = account.pop('roles')
|
||||
if 'types' in account:
|
||||
types = account.pop('types')
|
||||
temp_hash = hashlib.md5()
|
||||
temp_hash.update(str(account))
|
||||
temp_hash_key = temp_hash.hexdigest()
|
||||
hash_dict['creds'][temp_hash_key] = account
|
||||
for role in roles:
|
||||
hash_dict = cls._append_role(role, temp_hash_key,
|
||||
hash_dict)
|
||||
# If types are set for the account append the matching role
|
||||
# subdict with the hash
|
||||
for type in types:
|
||||
if type == 'admin':
|
||||
hash_dict = cls._append_role(CONF.identity.admin_role,
|
||||
temp_hash_key, hash_dict)
|
||||
elif type == 'operator':
|
||||
hash_dict = cls._append_role(
|
||||
CONF.object_storage.operator_role, temp_hash_key,
|
||||
hash_dict)
|
||||
elif type == 'reseller_admin':
|
||||
hash_dict = cls._append_role(
|
||||
CONF.object_storage.reseller_admin_role,
|
||||
temp_hash_key,
|
||||
hash_dict)
|
||||
return hash_dict
|
||||
|
||||
def is_multi_user(self):
|
||||
# Default credentials is not a valid option with locking Account
|
||||
if self.use_default_creds:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
|
||||
else:
|
||||
return len(self.hash_dict['creds']) > 1
|
||||
|
||||
def is_multi_tenant(self):
|
||||
return self.is_multi_user()
|
||||
|
||||
def _create_hash_file(self, hash_string):
|
||||
path = os.path.join(os.path.join(self.accounts_dir, hash_string))
|
||||
if not os.path.isfile(path):
|
||||
with open(path, 'w') as fd:
|
||||
fd.write(self.name)
|
||||
return True
|
||||
return False
|
||||
|
||||
@lockutils.synchronized('test_accounts_io', external=True)
|
||||
def _get_free_hash(self, hashes):
|
||||
# Cast as a list because in some edge cases a set will be passed in
|
||||
hashes = list(hashes)
|
||||
if not os.path.isdir(self.accounts_dir):
|
||||
os.mkdir(self.accounts_dir)
|
||||
# Create File from first hash (since none are in use)
|
||||
self._create_hash_file(hashes[0])
|
||||
return hashes[0]
|
||||
names = []
|
||||
for _hash in hashes:
|
||||
res = self._create_hash_file(_hash)
|
||||
if res:
|
||||
return _hash
|
||||
else:
|
||||
path = os.path.join(os.path.join(self.accounts_dir,
|
||||
_hash))
|
||||
with open(path, 'r') as fd:
|
||||
names.append(fd.read())
|
||||
msg = ('Insufficient number of users provided. %s have allocated all '
|
||||
'the credentials for this allocation request' % ','.join(names))
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
|
||||
def _get_match_hash_list(self, roles=None):
|
||||
hashes = []
|
||||
if roles:
|
||||
# Loop over all the creds for each role in the subdict and generate
|
||||
# a list of cred lists for each role
|
||||
for role in roles:
|
||||
temp_hashes = self.hash_dict['roles'].get(role, None)
|
||||
if not temp_hashes:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"No credentials with role: %s specified in the "
|
||||
"accounts ""file" % role)
|
||||
hashes.append(temp_hashes)
|
||||
# Take the list of lists and do a boolean and between each list to
|
||||
# find the creds which fall under all the specified roles
|
||||
temp_list = set(hashes[0])
|
||||
for hash_list in hashes[1:]:
|
||||
temp_list = temp_list & set(hash_list)
|
||||
hashes = temp_list
|
||||
else:
|
||||
hashes = self.hash_dict['creds'].keys()
|
||||
# NOTE(mtreinish): admin is a special case because of the increased
|
||||
# privlege set which could potentially cause issues on tests where that
|
||||
# is not expected. So unless the admin role isn't specified do not
|
||||
# allocate admin.
|
||||
admin_hashes = self.hash_dict['roles'].get(CONF.identity.admin_role,
|
||||
None)
|
||||
if ((not roles or CONF.identity.admin_role not in roles) and
|
||||
admin_hashes):
|
||||
useable_hashes = [x for x in hashes if x not in admin_hashes]
|
||||
else:
|
||||
useable_hashes = hashes
|
||||
return useable_hashes
|
||||
|
||||
def _get_creds(self, roles=None):
|
||||
if self.use_default_creds:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
|
||||
useable_hashes = self._get_match_hash_list(roles)
|
||||
free_hash = self._get_free_hash(useable_hashes)
|
||||
return self.hash_dict['creds'][free_hash]
|
||||
|
||||
@lockutils.synchronized('test_accounts_io', external=True)
|
||||
def remove_hash(self, hash_string):
|
||||
hash_path = os.path.join(self.accounts_dir, hash_string)
|
||||
if not os.path.isfile(hash_path):
|
||||
LOG.warning('Expected an account lock file %s to remove, but '
|
||||
'one did not exist' % hash_path)
|
||||
else:
|
||||
os.remove(hash_path)
|
||||
if not os.listdir(self.accounts_dir):
|
||||
os.rmdir(self.accounts_dir)
|
||||
|
||||
def get_hash(self, creds):
|
||||
for _hash in self.hash_dict['creds']:
|
||||
# Comparing on the attributes that are expected in the YAML
|
||||
if all([getattr(creds, k) == self.hash_dict['creds'][_hash][k] for
|
||||
k in creds.get_init_attributes()]):
|
||||
return _hash
|
||||
raise AttributeError('Invalid credentials %s' % creds)
|
||||
|
||||
def remove_credentials(self, creds):
|
||||
_hash = self.get_hash(creds)
|
||||
self.remove_hash(_hash)
|
||||
|
||||
def get_primary_creds(self):
|
||||
if self.isolated_creds.get('primary'):
|
||||
return self.isolated_creds.get('primary')
|
||||
creds = self._get_creds()
|
||||
primary_credential = cred_provider.get_credentials(**creds)
|
||||
self.isolated_creds['primary'] = primary_credential
|
||||
return primary_credential
|
||||
|
||||
def get_alt_creds(self):
|
||||
if self.isolated_creds.get('alt'):
|
||||
return self.isolated_creds.get('alt')
|
||||
creds = self._get_creds()
|
||||
alt_credential = cred_provider.get_credentials(**creds)
|
||||
self.isolated_creds['alt'] = alt_credential
|
||||
return alt_credential
|
||||
|
||||
def get_creds_by_roles(self, roles, force_new=False):
|
||||
roles = list(set(roles))
|
||||
exist_creds = self.isolated_creds.get(str(roles), None)
|
||||
# The force kwarg is used to allocate an additional set of creds with
|
||||
# the same role list. The index used for the previously allocation
|
||||
# in the isolated_creds dict will be moved.
|
||||
if exist_creds and not force_new:
|
||||
return exist_creds
|
||||
elif exist_creds and force_new:
|
||||
new_index = str(roles) + '-' + str(len(self.isolated_creds))
|
||||
self.isolated_creds[new_index] = exist_creds
|
||||
creds = self._get_creds(roles=roles)
|
||||
role_credential = cred_provider.get_credentials(**creds)
|
||||
self.isolated_creds[str(roles)] = role_credential
|
||||
return role_credential
|
||||
|
||||
def clear_isolated_creds(self):
|
||||
for creds in self.isolated_creds.values():
|
||||
self.remove_credentials(creds)
|
||||
|
||||
def get_admin_creds(self):
|
||||
return self.get_creds_by_roles([CONF.identity.admin_role])
|
||||
|
||||
def is_role_available(self, role):
|
||||
if self.use_default_creds:
|
||||
return False
|
||||
else:
|
||||
if self.hash_dict['roles'].get(role):
|
||||
return True
|
||||
return False
|
||||
|
||||
def admin_available(self):
|
||||
return self.is_role_available(CONF.identity.admin_role)
|
||||
|
||||
|
||||
class NotLockingAccounts(Accounts):
|
||||
"""Credentials provider which always returns the first and second
|
||||
configured accounts as primary and alt users.
|
||||
This credential provider can be used in case of serial test execution
|
||||
to preserve the current behaviour of the serial tempest run.
|
||||
"""
|
||||
|
||||
def _unique_creds(self, cred_arg=None):
|
||||
"""Verify that the configured credentials are valid and distinct """
|
||||
if self.use_default_creds:
|
||||
try:
|
||||
user = self.get_primary_creds()
|
||||
alt_user = self.get_alt_creds()
|
||||
return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
|
||||
except exceptions.InvalidCredentials as ic:
|
||||
msg = "At least one of the configured credentials is " \
|
||||
"not valid: %s" % ic.message
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
else:
|
||||
# TODO(andreaf) Add a uniqueness check here
|
||||
return len(self.hash_dict['creds']) > 1
|
||||
|
||||
def is_multi_user(self):
|
||||
return self._unique_creds('username')
|
||||
|
||||
def is_multi_tenant(self):
|
||||
return self._unique_creds('tenant_id')
|
||||
|
||||
def get_creds(self, id, roles=None):
|
||||
try:
|
||||
hashes = self._get_match_hash_list(roles)
|
||||
# No need to sort the dict as within the same python process
|
||||
# the HASH seed won't change, so subsequent calls to keys()
|
||||
# will return the same result
|
||||
_hash = hashes[id]
|
||||
except IndexError:
|
||||
msg = 'Insufficient number of users provided'
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
return self.hash_dict['creds'][_hash]
|
||||
|
||||
def get_primary_creds(self):
|
||||
if self.isolated_creds.get('primary'):
|
||||
return self.isolated_creds.get('primary')
|
||||
if not self.use_default_creds:
|
||||
creds = self.get_creds(0)
|
||||
primary_credential = cred_provider.get_credentials(**creds)
|
||||
else:
|
||||
primary_credential = cred_provider.get_configured_credentials(
|
||||
'user')
|
||||
self.isolated_creds['primary'] = primary_credential
|
||||
return primary_credential
|
||||
|
||||
def get_alt_creds(self):
|
||||
if self.isolated_creds.get('alt'):
|
||||
return self.isolated_creds.get('alt')
|
||||
if not self.use_default_creds:
|
||||
creds = self.get_creds(1)
|
||||
alt_credential = cred_provider.get_credentials(**creds)
|
||||
else:
|
||||
alt_credential = cred_provider.get_configured_credentials(
|
||||
'alt_user')
|
||||
self.isolated_creds['alt'] = alt_credential
|
||||
return alt_credential
|
||||
|
||||
def clear_isolated_creds(self):
|
||||
self.isolated_creds = {}
|
||||
|
||||
def get_admin_creds(self):
|
||||
if not self.use_default_creds:
|
||||
return self.get_creds_by_roles([CONF.identity.admin_role])
|
||||
else:
|
||||
creds = cred_provider.get_configured_credentials(
|
||||
"identity_admin", fill_in=False)
|
||||
self.isolated_creds['admin'] = creds
|
||||
return creds
|
||||
|
||||
def get_creds_by_roles(self, roles, force_new=False):
|
||||
roles = list(set(roles))
|
||||
exist_creds = self.isolated_creds.get(str(roles), None)
|
||||
index = 0
|
||||
if exist_creds and not force_new:
|
||||
return exist_creds
|
||||
elif exist_creds and force_new:
|
||||
new_index = str(roles) + '-' + str(len(self.isolated_creds))
|
||||
self.isolated_creds[new_index] = exist_creds
|
||||
# Figure out how many existing creds for this roles set are present
|
||||
# use this as the index the returning hash list to ensure separate
|
||||
# creds are returned with force_new being True
|
||||
for creds_names in self.isolated_creds:
|
||||
if str(roles) in creds_names:
|
||||
index = index + 1
|
||||
if not self.use_default_creds:
|
||||
creds = self.get_creds(index, roles=roles)
|
||||
role_credential = cred_provider.get_credentials(**creds)
|
||||
self.isolated_creds[str(roles)] = role_credential
|
||||
else:
|
||||
msg = "Default credentials can not be used with specifying "\
|
||||
"credentials by roles"
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
return role_credential
|
39
neutron/tests/tempest/common/commands.py
Normal file
39
neutron/tests/tempest/common/commands.py
Normal file
@ -0,0 +1,39 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import shlex
|
||||
import subprocess
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def copy_file_to_host(file_from, dest, host, username, pkey):
|
||||
dest = "%s@%s:%s" % (username, host, dest)
|
||||
cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
|
||||
"-o StrictHostKeyChecking=no " \
|
||||
"-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
|
||||
'file1': file_from,
|
||||
'dest': dest}
|
||||
args = shlex.split(cmd.encode('utf-8'))
|
||||
subprocess_args = {'stdout': subprocess.PIPE,
|
||||
'stderr': subprocess.STDOUT}
|
||||
proc = subprocess.Popen(args, **subprocess_args)
|
||||
stdout, stderr = proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
LOG.error(("Command {0} returned with exit status {1},"
|
||||
"output {2}, error {3}").format(cmd, proc.returncode,
|
||||
stdout, stderr))
|
||||
return stdout
|
123
neutron/tests/tempest/common/cred_provider.py
Normal file
123
neutron/tests/tempest/common/cred_provider.py
Normal file
@ -0,0 +1,123 @@
|
||||
# Copyright (c) 2014 Deutsche Telekom AG
|
||||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
from neutron.tests.tempest import auth
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Type of credentials available from configuration
|
||||
CREDENTIAL_TYPES = {
|
||||
'identity_admin': ('identity', 'admin'),
|
||||
'user': ('identity', None),
|
||||
'alt_user': ('identity', 'alt')
|
||||
}
|
||||
|
||||
|
||||
# Read credentials from configuration, builds a Credentials object
|
||||
# based on the specified or configured version
|
||||
def get_configured_credentials(credential_type, fill_in=True,
|
||||
identity_version=None):
|
||||
identity_version = identity_version or CONF.identity.auth_version
|
||||
if identity_version not in ('v2', 'v3'):
|
||||
raise exceptions.InvalidConfiguration(
|
||||
'Unsupported auth version: %s' % identity_version)
|
||||
if credential_type not in CREDENTIAL_TYPES:
|
||||
raise exceptions.InvalidCredentials()
|
||||
conf_attributes = ['username', 'password', 'tenant_name']
|
||||
if identity_version == 'v3':
|
||||
conf_attributes.append('domain_name')
|
||||
# Read the parts of credentials from config
|
||||
params = {}
|
||||
section, prefix = CREDENTIAL_TYPES[credential_type]
|
||||
for attr in conf_attributes:
|
||||
_section = getattr(CONF, section)
|
||||
if prefix is None:
|
||||
params[attr] = getattr(_section, attr)
|
||||
else:
|
||||
params[attr] = getattr(_section, prefix + "_" + attr)
|
||||
# Build and validate credentials. We are reading configured credentials,
|
||||
# so validate them even if fill_in is False
|
||||
credentials = get_credentials(fill_in=fill_in, **params)
|
||||
if not fill_in:
|
||||
if not credentials.is_valid():
|
||||
msg = ("The %s credentials are incorrectly set in the config file."
|
||||
" Double check that all required values are assigned" %
|
||||
credential_type)
|
||||
raise exceptions.InvalidConfiguration(msg)
|
||||
return credentials
|
||||
|
||||
|
||||
# Wrapper around auth.get_credentials to use the configured identity version
|
||||
# is none is specified
|
||||
def get_credentials(fill_in=True, identity_version=None, **kwargs):
|
||||
identity_version = identity_version or CONF.identity.auth_version
|
||||
# In case of "v3" add the domain from config if not specified
|
||||
if identity_version == 'v3':
|
||||
domain_fields = set(x for x in auth.KeystoneV3Credentials.ATTRIBUTES
|
||||
if 'domain' in x)
|
||||
if not domain_fields.intersection(kwargs.keys()):
|
||||
kwargs['user_domain_name'] = CONF.identity.admin_domain_name
|
||||
auth_url = CONF.identity.uri_v3
|
||||
else:
|
||||
auth_url = CONF.identity.uri
|
||||
return auth.get_credentials(auth_url,
|
||||
fill_in=fill_in,
|
||||
identity_version=identity_version,
|
||||
**kwargs)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class CredentialProvider(object):
|
||||
def __init__(self, name, password='pass', network_resources=None):
|
||||
self.name = name
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_primary_creds(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_admin_creds(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_alt_creds(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear_isolated_creds(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_multi_user(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_multi_tenant(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_creds_by_roles(self, roles, force_new=False):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_role_available(self, role):
|
||||
return
|
64
neutron/tests/tempest/common/credentials.py
Normal file
64
neutron/tests/tempest/common/credentials.py
Normal file
@ -0,0 +1,64 @@
|
||||
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
from neutron.tests.tempest.common import accounts
|
||||
from neutron.tests.tempest.common import cred_provider
|
||||
from neutron.tests.tempest.common import isolated_creds
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
# Return the right implementation of CredentialProvider based on config
|
||||
# Dropping interface and password, as they are never used anyways
|
||||
# TODO(andreaf) Drop them from the CredentialsProvider interface completely
|
||||
def get_isolated_credentials(name, network_resources=None,
|
||||
force_tenant_isolation=False):
|
||||
# If a test requires a new account to work, it can have it via forcing
|
||||
# tenant isolation. A new account will be produced only for that test.
|
||||
# In case admin credentials are not available for the account creation,
|
||||
# the test should be skipped else it would fail.
|
||||
if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
|
||||
return isolated_creds.IsolatedCreds(
|
||||
name=name,
|
||||
network_resources=network_resources)
|
||||
else:
|
||||
if CONF.auth.locking_credentials_provider:
|
||||
# Most params are not relevant for pre-created accounts
|
||||
return accounts.Accounts(name=name)
|
||||
else:
|
||||
return accounts.NotLockingAccounts(name=name)
|
||||
|
||||
|
||||
# We want a helper function here to check and see if admin credentials
|
||||
# are available so we can do a single call from skip_checks if admin
|
||||
# creds area vailable.
|
||||
def is_admin_available():
|
||||
is_admin = True
|
||||
# If tenant isolation is enabled admin will be available
|
||||
if CONF.auth.allow_tenant_isolation:
|
||||
return is_admin
|
||||
# Check whether test accounts file has the admin specified or not
|
||||
elif os.path.isfile(CONF.auth.test_accounts_file):
|
||||
check_accounts = accounts.Accounts(name='check_admin')
|
||||
if not check_accounts.admin_available():
|
||||
is_admin = False
|
||||
else:
|
||||
try:
|
||||
cred_provider.get_configured_credentials('identity_admin')
|
||||
except exceptions.InvalidConfiguration:
|
||||
is_admin = False
|
||||
return is_admin
|
226
neutron/tests/tempest/common/custom_matchers.py
Normal file
226
neutron/tests/tempest/common/custom_matchers.py
Normal file
@ -0,0 +1,226 @@
|
||||
# Copyright 2013 NTT Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
from testtools import helpers
|
||||
|
||||
|
||||
class ExistsAllResponseHeaders(object):
|
||||
"""
|
||||
Specific matcher to check the existence of Swift's response headers
|
||||
|
||||
This matcher checks the existence of common headers for each HTTP method
|
||||
or the target, which means account, container or object.
|
||||
When checking the existence of 'specific' headers such as
|
||||
X-Account-Meta-* or X-Object-Manifest for example, those headers must be
|
||||
checked in each test code.
|
||||
"""
|
||||
|
||||
def __init__(self, target, method):
|
||||
"""
|
||||
param: target Account/Container/Object
|
||||
param: method PUT/GET/HEAD/DELETE/COPY/POST
|
||||
"""
|
||||
self.target = target
|
||||
self.method = method
|
||||
|
||||
def match(self, actual):
|
||||
"""
|
||||
param: actual HTTP response headers
|
||||
"""
|
||||
# Check common headers for all HTTP methods
|
||||
if 'content-length' not in actual:
|
||||
return NonExistentHeader('content-length')
|
||||
if 'content-type' not in actual:
|
||||
return NonExistentHeader('content-type')
|
||||
if 'x-trans-id' not in actual:
|
||||
return NonExistentHeader('x-trans-id')
|
||||
if 'date' not in actual:
|
||||
return NonExistentHeader('date')
|
||||
|
||||
# Check headers for a specific method or target
|
||||
if self.method == 'GET' or self.method == 'HEAD':
|
||||
if 'x-timestamp' not in actual:
|
||||
return NonExistentHeader('x-timestamp')
|
||||
if 'accept-ranges' not in actual:
|
||||
return NonExistentHeader('accept-ranges')
|
||||
if self.target == 'Account':
|
||||
if 'x-account-bytes-used' not in actual:
|
||||
return NonExistentHeader('x-account-bytes-used')
|
||||
if 'x-account-container-count' not in actual:
|
||||
return NonExistentHeader('x-account-container-count')
|
||||
if 'x-account-object-count' not in actual:
|
||||
return NonExistentHeader('x-account-object-count')
|
||||
elif self.target == 'Container':
|
||||
if 'x-container-bytes-used' not in actual:
|
||||
return NonExistentHeader('x-container-bytes-used')
|
||||
if 'x-container-object-count' not in actual:
|
||||
return NonExistentHeader('x-container-object-count')
|
||||
elif self.target == 'Object':
|
||||
if 'etag' not in actual:
|
||||
return NonExistentHeader('etag')
|
||||
if 'last-modified' not in actual:
|
||||
return NonExistentHeader('last-modified')
|
||||
elif self.method == 'PUT':
|
||||
if self.target == 'Object':
|
||||
if 'etag' not in actual:
|
||||
return NonExistentHeader('etag')
|
||||
if 'last-modified' not in actual:
|
||||
return NonExistentHeader('last-modified')
|
||||
elif self.method == 'COPY':
|
||||
if self.target == 'Object':
|
||||
if 'etag' not in actual:
|
||||
return NonExistentHeader('etag')
|
||||
if 'last-modified' not in actual:
|
||||
return NonExistentHeader('last-modified')
|
||||
if 'x-copied-from' not in actual:
|
||||
return NonExistentHeader('x-copied-from')
|
||||
if 'x-copied-from-last-modified' not in actual:
|
||||
return NonExistentHeader('x-copied-from-last-modified')
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class NonExistentHeader(object):
|
||||
"""
|
||||
Informs an error message for end users in the case of missing a
|
||||
certain header in Swift's responses
|
||||
"""
|
||||
|
||||
def __init__(self, header):
|
||||
self.header = header
|
||||
|
||||
def describe(self):
|
||||
return "%s header does not exist" % self.header
|
||||
|
||||
def get_details(self):
|
||||
return {}
|
||||
|
||||
|
||||
class AreAllWellFormatted(object):
|
||||
"""
|
||||
Specific matcher to check the correctness of formats of values of Swift's
|
||||
response headers
|
||||
|
||||
This matcher checks the format of values of response headers.
|
||||
When checking the format of values of 'specific' headers such as
|
||||
X-Account-Meta-* or X-Object-Manifest for example, those values must be
|
||||
checked in each test code.
|
||||
"""
|
||||
|
||||
def match(self, actual):
|
||||
for key, value in actual.iteritems():
|
||||
if key in ('content-length', 'x-account-bytes-used',
|
||||
'x-account-container-count', 'x-account-object-count',
|
||||
'x-container-bytes-used', 'x-container-object-count')\
|
||||
and not value.isdigit():
|
||||
return InvalidFormat(key, value)
|
||||
elif key in ('content-type', 'date', 'last-modified',
|
||||
'x-copied-from-last-modified') and not value:
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'x-timestamp' and not re.match("^\d+\.?\d*\Z", value):
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'x-copied-from' and not re.match("\S+/\S+", value):
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'x-trans-id' and \
|
||||
not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'accept-ranges' and not value == 'bytes':
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'etag' and not value.isalnum():
|
||||
return InvalidFormat(key, value)
|
||||
elif key == 'transfer-encoding' and not value == 'chunked':
|
||||
return InvalidFormat(key, value)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class InvalidFormat(object):
|
||||
"""
|
||||
Informs an error message for end users if a format of a certain header
|
||||
is invalid
|
||||
"""
|
||||
|
||||
def __init__(self, key, value):
|
||||
self.key = key
|
||||
self.value = value
|
||||
|
||||
def describe(self):
|
||||
return "InvalidFormat (%s, %s)" % (self.key, self.value)
|
||||
|
||||
def get_details(self):
|
||||
return {}
|
||||
|
||||
|
||||
class MatchesDictExceptForKeys(object):
|
||||
"""Matches two dictionaries. Verifies all items are equals except for those
|
||||
identified by a list of keys.
|
||||
"""
|
||||
|
||||
def __init__(self, expected, excluded_keys=None):
|
||||
self.expected = expected
|
||||
self.excluded_keys = excluded_keys if excluded_keys is not None else []
|
||||
|
||||
def match(self, actual):
|
||||
filtered_expected = helpers.dict_subtract(self.expected,
|
||||
self.excluded_keys)
|
||||
filtered_actual = helpers.dict_subtract(actual,
|
||||
self.excluded_keys)
|
||||
if filtered_actual != filtered_expected:
|
||||
return DictMismatch(filtered_expected, filtered_actual)
|
||||
|
||||
|
||||
class DictMismatch(object):
|
||||
"""Mismatch between two dicts describes deltas"""
|
||||
|
||||
def __init__(self, expected, actual):
|
||||
self.expected = expected
|
||||
self.actual = actual
|
||||
self.intersect = set(self.expected) & set(self.actual)
|
||||
self.symmetric_diff = set(self.expected) ^ set(self.actual)
|
||||
|
||||
def _format_dict(self, dict_to_format):
|
||||
# Ensure the error string dict is printed in a set order
|
||||
# NOTE(mtreinish): needed to ensure a deterministic error msg for
|
||||
# testing. Otherwise the error message will be dependent on the
|
||||
# dict ordering.
|
||||
dict_string = "{"
|
||||
for key in sorted(dict_to_format):
|
||||
dict_string += "'%s': %s, " % (key, dict_to_format[key])
|
||||
dict_string = dict_string[:-2] + '}'
|
||||
return dict_string
|
||||
|
||||
def describe(self):
|
||||
msg = ""
|
||||
if self.symmetric_diff:
|
||||
only_expected = helpers.dict_subtract(self.expected, self.actual)
|
||||
only_actual = helpers.dict_subtract(self.actual, self.expected)
|
||||
if only_expected:
|
||||
msg += "Only in expected:\n %s\n" % self._format_dict(
|
||||
only_expected)
|
||||
if only_actual:
|
||||
msg += "Only in actual:\n %s\n" % self._format_dict(
|
||||
only_actual)
|
||||
diff_set = set(o for o in self.intersect if
|
||||
self.expected[o] != self.actual[o])
|
||||
if diff_set:
|
||||
msg += "Differences:\n"
|
||||
for o in diff_set:
|
||||
msg += " %s: expected %s, actual %s\n" % (
|
||||
o, self.expected[o], self.actual[o])
|
||||
return msg
|
||||
|
||||
def get_details(self):
|
||||
return {}
|
0
neutron/tests/tempest/common/generator/__init__.py
Normal file
0
neutron/tests/tempest/common/generator/__init__.py
Normal file
182
neutron/tests/tempest/common/generator/base_generator.py
Normal file
182
neutron/tests/tempest/common/generator/base_generator.py
Normal file
@ -0,0 +1,182 @@
|
||||
# Copyright 2014 Deutsche Telekom AG
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import functools
|
||||
|
||||
import jsonschema
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _check_for_expected_result(name, schema):
|
||||
expected_result = None
|
||||
if "results" in schema:
|
||||
if name in schema["results"]:
|
||||
expected_result = schema["results"][name]
|
||||
return expected_result
|
||||
|
||||
|
||||
def generator_type(*args, **kwargs):
|
||||
def wrapper(func):
|
||||
func.types = args
|
||||
for key in kwargs:
|
||||
setattr(func, key, kwargs[key])
|
||||
return func
|
||||
return wrapper
|
||||
|
||||
|
||||
def simple_generator(fn):
|
||||
"""
|
||||
Decorator for simple generators that return one value
|
||||
"""
|
||||
@functools.wraps(fn)
|
||||
def wrapped(self, schema):
|
||||
result = fn(self, schema)
|
||||
if result is not None:
|
||||
expected_result = _check_for_expected_result(fn.__name__, schema)
|
||||
return (fn.__name__, result, expected_result)
|
||||
return
|
||||
return wrapped
|
||||
|
||||
|
||||
class BasicGeneratorSet(object):
|
||||
_instance = None
|
||||
|
||||
schema = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"http-method": {
|
||||
"enum": ["GET", "PUT", "HEAD",
|
||||
"POST", "PATCH", "DELETE", 'COPY']
|
||||
},
|
||||
"admin_client": {"type": "boolean"},
|
||||
"url": {"type": "string"},
|
||||
"default_result_code": {"type": "integer"},
|
||||
"json-schema": {},
|
||||
"resources": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string"},
|
||||
"expected_result": {"type": "integer"}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"results": {
|
||||
"type": "object",
|
||||
"properties": {}
|
||||
}
|
||||
},
|
||||
"required": ["name", "http-method", "url"],
|
||||
"additionalProperties": False,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.types_dict = {}
|
||||
for m in dir(self):
|
||||
if callable(getattr(self, m)) and not'__' in m:
|
||||
method = getattr(self, m)
|
||||
if hasattr(method, "types"):
|
||||
for type in method.types:
|
||||
if type not in self.types_dict:
|
||||
self.types_dict[type] = []
|
||||
self.types_dict[type].append(method)
|
||||
|
||||
def validate_schema(self, schema):
|
||||
if "json-schema" in schema:
|
||||
jsonschema.Draft4Validator.check_schema(schema['json-schema'])
|
||||
jsonschema.validate(schema, self.schema)
|
||||
|
||||
def generate_scenarios(self, schema, path=None):
|
||||
"""
|
||||
Generates the scenario (all possible test cases) out of the given
|
||||
schema.
|
||||
|
||||
:param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
|
||||
:param path: the schema path if the given schema is a subschema
|
||||
"""
|
||||
schema_type = schema['type']
|
||||
scenarios = []
|
||||
|
||||
if schema_type == 'object':
|
||||
properties = schema["properties"]
|
||||
for attribute, definition in properties.iteritems():
|
||||
current_path = copy.copy(path)
|
||||
if path is not None:
|
||||
current_path.append(attribute)
|
||||
else:
|
||||
current_path = [attribute]
|
||||
scenarios.extend(
|
||||
self.generate_scenarios(definition, current_path))
|
||||
elif isinstance(schema_type, list):
|
||||
if "integer" in schema_type:
|
||||
schema_type = "integer"
|
||||
else:
|
||||
raise Exception("non-integer list types not supported")
|
||||
for generator in self.types_dict[schema_type]:
|
||||
if hasattr(generator, "needed_property"):
|
||||
prop = generator.needed_property
|
||||
if (prop not in schema or
|
||||
schema[prop] is None or
|
||||
schema[prop] is False):
|
||||
continue
|
||||
|
||||
name = generator.__name__
|
||||
if ("exclude_tests" in schema and
|
||||
name in schema["exclude_tests"]):
|
||||
continue
|
||||
if path is not None:
|
||||
name = "%s_%s" % ("_".join(path), name)
|
||||
scenarios.append({
|
||||
"_negtest_name": name,
|
||||
"_negtest_generator": generator,
|
||||
"_negtest_schema": schema,
|
||||
"_negtest_path": path})
|
||||
return scenarios
|
||||
|
||||
def generate_payload(self, test, schema):
|
||||
"""
|
||||
Generates one jsonschema out of the given test. It's mandatory to use
|
||||
generate_scenarios before to register all needed variables to the test.
|
||||
|
||||
:param test: A test object (scenario) with all _negtest variables on it
|
||||
:param schema: schema for the test
|
||||
"""
|
||||
generator = test._negtest_generator
|
||||
ret = generator(test._negtest_schema)
|
||||
path = copy.copy(test._negtest_path)
|
||||
expected_result = None
|
||||
|
||||
if ret is not None:
|
||||
generator_result = generator(test._negtest_schema)
|
||||
invalid_snippet = generator_result[1]
|
||||
expected_result = generator_result[2]
|
||||
element = path.pop()
|
||||
if len(path) > 0:
|
||||
schema_snip = reduce(dict.get, path, schema)
|
||||
schema_snip[element] = invalid_snippet
|
||||
else:
|
||||
schema[element] = invalid_snippet
|
||||
return expected_result
|
78
neutron/tests/tempest/common/generator/negative_generator.py
Normal file
78
neutron/tests/tempest/common/generator/negative_generator.py
Normal file
@ -0,0 +1,78 @@
|
||||
# Copyright 2014 Deutsche Telekom AG
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
import neutron.tests.tempest.common.generator.base_generator as base
|
||||
import neutron.tests.tempest.common.generator.valid_generator as valid
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NegativeTestGenerator(base.BasicGeneratorSet):
|
||||
@base.generator_type("string")
|
||||
@base.simple_generator
|
||||
def gen_int(self, _):
|
||||
return 4
|
||||
|
||||
@base.generator_type("integer")
|
||||
@base.simple_generator
|
||||
def gen_string(self, _):
|
||||
return "XXXXXX"
|
||||
|
||||
@base.generator_type("integer", "string")
|
||||
def gen_none(self, schema):
|
||||
# Note(mkoderer): it's not using the decorator otherwise it'd be
|
||||
# filtered
|
||||
expected_result = base._check_for_expected_result('gen_none', schema)
|
||||
return ('gen_none', None, expected_result)
|
||||
|
||||
@base.generator_type("string")
|
||||
@base.simple_generator
|
||||
def gen_str_min_length(self, schema):
|
||||
min_length = schema.get("minLength", 0)
|
||||
if min_length > 0:
|
||||
return "x" * (min_length - 1)
|
||||
|
||||
@base.generator_type("string", needed_property="maxLength")
|
||||
@base.simple_generator
|
||||
def gen_str_max_length(self, schema):
|
||||
max_length = schema.get("maxLength", -1)
|
||||
return "x" * (max_length + 1)
|
||||
|
||||
@base.generator_type("integer", needed_property="minimum")
|
||||
@base.simple_generator
|
||||
def gen_int_min(self, schema):
|
||||
minimum = schema["minimum"]
|
||||
if "exclusiveMinimum" not in schema:
|
||||
minimum -= 1
|
||||
return minimum
|
||||
|
||||
@base.generator_type("integer", needed_property="maximum")
|
||||
@base.simple_generator
|
||||
def gen_int_max(self, schema):
|
||||
maximum = schema["maximum"]
|
||||
if "exclusiveMaximum" not in schema:
|
||||
maximum += 1
|
||||
return maximum
|
||||
|
||||
@base.generator_type("object", needed_property="additionalProperties")
|
||||
@base.simple_generator
|
||||
def gen_obj_add_attr(self, schema):
|
||||
valid_schema = valid.ValidTestGenerator().generate_valid(schema)
|
||||
new_valid = copy.deepcopy(valid_schema)
|
||||
new_valid["$$$$$$$$$$"] = "xxx"
|
||||
return new_valid
|
81
neutron/tests/tempest/common/generator/valid_generator.py
Normal file
81
neutron/tests/tempest/common/generator/valid_generator.py
Normal file
@ -0,0 +1,81 @@
|
||||
# Copyright 2014 Deutsche Telekom AG
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import neutron.tests.tempest.common.generator.base_generator as base
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ValidTestGenerator(base.BasicGeneratorSet):
|
||||
@base.generator_type("string")
|
||||
@base.simple_generator
|
||||
def generate_valid_string(self, schema):
|
||||
size = schema.get("minLength", 1)
|
||||
# TODO(dkr mko): handle format and pattern
|
||||
return "x" * size
|
||||
|
||||
@base.generator_type("integer")
|
||||
@base.simple_generator
|
||||
def generate_valid_integer(self, schema):
|
||||
# TODO(dkr mko): handle multipleOf
|
||||
if "minimum" in schema:
|
||||
minimum = schema["minimum"]
|
||||
if "exclusiveMinimum" not in schema:
|
||||
return minimum
|
||||
else:
|
||||
return minimum + 1
|
||||
if "maximum" in schema:
|
||||
maximum = schema["maximum"]
|
||||
if "exclusiveMaximum" not in schema:
|
||||
return maximum
|
||||
else:
|
||||
return maximum - 1
|
||||
return 0
|
||||
|
||||
@base.generator_type("object")
|
||||
@base.simple_generator
|
||||
def generate_valid_object(self, schema):
|
||||
obj = {}
|
||||
for k, v in schema["properties"].iteritems():
|
||||
obj[k] = self.generate_valid(v)
|
||||
return obj
|
||||
|
||||
def generate(self, schema):
|
||||
schema_type = schema["type"]
|
||||
if isinstance(schema_type, list):
|
||||
if "integer" in schema_type:
|
||||
schema_type = "integer"
|
||||
else:
|
||||
raise Exception("non-integer list types not supported")
|
||||
result = []
|
||||
if schema_type not in self.types_dict:
|
||||
raise TypeError("generator (%s) doesn't support type: %s"
|
||||
% (self.__class__.__name__, schema_type))
|
||||
for generator in self.types_dict[schema_type]:
|
||||
ret = generator(schema)
|
||||
if ret is not None:
|
||||
if isinstance(ret, list):
|
||||
result.extend(ret)
|
||||
elif isinstance(ret, tuple):
|
||||
result.append(ret)
|
||||
else:
|
||||
raise Exception("generator (%s) returns invalid result: %s"
|
||||
% (generator, ret))
|
||||
return result
|
||||
|
||||
def generate_valid(self, schema):
|
||||
return self.generate(schema)[0][1]
|
377
neutron/tests/tempest/common/glance_http.py
Normal file
377
neutron/tests/tempest/common/glance_http.py
Normal file
@ -0,0 +1,377 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Originally copied from python-glanceclient
|
||||
|
||||
import copy
|
||||
import hashlib
|
||||
import httplib
|
||||
import json
|
||||
import posixpath
|
||||
import re
|
||||
import socket
|
||||
import StringIO
|
||||
import struct
|
||||
import urlparse
|
||||
|
||||
|
||||
import OpenSSL
|
||||
from six import moves
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest import exceptions as exc
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
USER_AGENT = 'tempest'
|
||||
CHUNKSIZE = 1024 * 64 # 64kB
|
||||
TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
|
||||
|
||||
|
||||
class HTTPClient(object):
|
||||
|
||||
def __init__(self, auth_provider, filters, **kwargs):
|
||||
self.auth_provider = auth_provider
|
||||
self.filters = filters
|
||||
self.endpoint = auth_provider.base_url(filters)
|
||||
endpoint_parts = urlparse.urlparse(self.endpoint)
|
||||
self.endpoint_scheme = endpoint_parts.scheme
|
||||
self.endpoint_hostname = endpoint_parts.hostname
|
||||
self.endpoint_port = endpoint_parts.port
|
||||
self.endpoint_path = endpoint_parts.path
|
||||
|
||||
self.connection_class = self.get_connection_class(self.endpoint_scheme)
|
||||
self.connection_kwargs = self.get_connection_kwargs(
|
||||
self.endpoint_scheme, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def get_connection_class(scheme):
|
||||
if scheme == 'https':
|
||||
return VerifiedHTTPSConnection
|
||||
else:
|
||||
return httplib.HTTPConnection
|
||||
|
||||
@staticmethod
|
||||
def get_connection_kwargs(scheme, **kwargs):
|
||||
_kwargs = {'timeout': float(kwargs.get('timeout', 600))}
|
||||
|
||||
if scheme == 'https':
|
||||
_kwargs['ca_certs'] = kwargs.get('ca_certs', None)
|
||||
_kwargs['cert_file'] = kwargs.get('cert_file', None)
|
||||
_kwargs['key_file'] = kwargs.get('key_file', None)
|
||||
_kwargs['insecure'] = kwargs.get('insecure', False)
|
||||
_kwargs['ssl_compression'] = kwargs.get('ssl_compression', True)
|
||||
|
||||
return _kwargs
|
||||
|
||||
def get_connection(self):
|
||||
_class = self.connection_class
|
||||
try:
|
||||
return _class(self.endpoint_hostname, self.endpoint_port,
|
||||
**self.connection_kwargs)
|
||||
except httplib.InvalidURL:
|
||||
raise exc.EndpointNotFound
|
||||
|
||||
def _http_request(self, url, method, **kwargs):
|
||||
"""Send an http request with the specified characteristics.
|
||||
|
||||
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
|
||||
as setting headers and error handling.
|
||||
"""
|
||||
# Copy the kwargs so we can reuse the original in case of redirects
|
||||
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
|
||||
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
|
||||
|
||||
self._log_request(method, url, kwargs['headers'])
|
||||
|
||||
conn = self.get_connection()
|
||||
|
||||
try:
|
||||
url_parts = urlparse.urlparse(url)
|
||||
conn_url = posixpath.normpath(url_parts.path)
|
||||
LOG.debug('Actual Path: {path}'.format(path=conn_url))
|
||||
if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
|
||||
conn.putrequest(method, conn_url)
|
||||
for header, value in kwargs['headers'].items():
|
||||
conn.putheader(header, value)
|
||||
conn.endheaders()
|
||||
chunk = kwargs['body'].read(CHUNKSIZE)
|
||||
# Chunk it, baby...
|
||||
while chunk:
|
||||
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
|
||||
chunk = kwargs['body'].read(CHUNKSIZE)
|
||||
conn.send('0\r\n\r\n')
|
||||
else:
|
||||
conn.request(method, conn_url, **kwargs)
|
||||
resp = conn.getresponse()
|
||||
except socket.gaierror as e:
|
||||
message = ("Error finding address for %(url)s: %(e)s" %
|
||||
{'url': url, 'e': e})
|
||||
raise exc.EndpointNotFound(message)
|
||||
except (socket.error, socket.timeout) as e:
|
||||
message = ("Error communicating with %(endpoint)s %(e)s" %
|
||||
{'endpoint': self.endpoint, 'e': e})
|
||||
raise exc.TimeoutException(message)
|
||||
|
||||
body_iter = ResponseBodyIterator(resp)
|
||||
# Read body into string if it isn't obviously image data
|
||||
if resp.getheader('content-type', None) != 'application/octet-stream':
|
||||
body_str = ''.join([body_chunk for body_chunk in body_iter])
|
||||
body_iter = StringIO.StringIO(body_str)
|
||||
self._log_response(resp, None)
|
||||
else:
|
||||
self._log_response(resp, body_iter)
|
||||
|
||||
return resp, body_iter
|
||||
|
||||
def _log_request(self, method, url, headers):
|
||||
LOG.info('Request: ' + method + ' ' + url)
|
||||
if headers:
|
||||
headers_out = headers
|
||||
if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
|
||||
token = headers['X-Auth-Token']
|
||||
if len(token) > 64 and TOKEN_CHARS_RE.match(token):
|
||||
headers_out = headers.copy()
|
||||
headers_out['X-Auth-Token'] = "<Token omitted>"
|
||||
LOG.info('Request Headers: ' + str(headers_out))
|
||||
|
||||
def _log_response(self, resp, body):
|
||||
status = str(resp.status)
|
||||
LOG.info("Response Status: " + status)
|
||||
if resp.getheaders():
|
||||
LOG.info('Response Headers: ' + str(resp.getheaders()))
|
||||
if body:
|
||||
str_body = str(body)
|
||||
length = len(body)
|
||||
LOG.info('Response Body: ' + str_body[:2048])
|
||||
if length >= 2048:
|
||||
self.LOG.debug("Large body (%d) md5 summary: %s", length,
|
||||
hashlib.md5(str_body).hexdigest())
|
||||
|
||||
def json_request(self, method, url, **kwargs):
|
||||
kwargs.setdefault('headers', {})
|
||||
kwargs['headers'].setdefault('Content-Type', 'application/json')
|
||||
if kwargs['headers']['Content-Type'] != 'application/json':
|
||||
msg = "Only application/json content-type is supported."
|
||||
raise lib_exc.InvalidContentType(msg)
|
||||
|
||||
if 'body' in kwargs:
|
||||
kwargs['body'] = json.dumps(kwargs['body'])
|
||||
|
||||
resp, body_iter = self._http_request(url, method, **kwargs)
|
||||
|
||||
if 'application/json' in resp.getheader('content-type', ''):
|
||||
body = ''.join([chunk for chunk in body_iter])
|
||||
try:
|
||||
body = json.loads(body)
|
||||
except ValueError:
|
||||
LOG.error('Could not decode response body as JSON')
|
||||
else:
|
||||
msg = "Only json/application content-type is supported."
|
||||
raise lib_exc.InvalidContentType(msg)
|
||||
|
||||
return resp, body
|
||||
|
||||
def raw_request(self, method, url, **kwargs):
|
||||
kwargs.setdefault('headers', {})
|
||||
kwargs['headers'].setdefault('Content-Type',
|
||||
'application/octet-stream')
|
||||
if 'body' in kwargs:
|
||||
if (hasattr(kwargs['body'], 'read')
|
||||
and method.lower() in ('post', 'put')):
|
||||
# We use 'Transfer-Encoding: chunked' because
|
||||
# body size may not always be known in advance.
|
||||
kwargs['headers']['Transfer-Encoding'] = 'chunked'
|
||||
|
||||
# Decorate the request with auth
|
||||
req_url, kwargs['headers'], kwargs['body'] = \
|
||||
self.auth_provider.auth_request(
|
||||
method=method, url=url, headers=kwargs['headers'],
|
||||
body=kwargs.get('body', None), filters=self.filters)
|
||||
return self._http_request(req_url, method, **kwargs)
|
||||
|
||||
|
||||
class OpenSSLConnectionDelegator(object):
|
||||
"""
|
||||
An OpenSSL.SSL.Connection delegator.
|
||||
|
||||
Supplies an additional 'makefile' method which httplib requires
|
||||
and is not present in OpenSSL.SSL.Connection.
|
||||
|
||||
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
|
||||
a delegator must be used.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.connection = OpenSSL.SSL.Connection(*args, **kwargs)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.connection, name)
|
||||
|
||||
def makefile(self, *args, **kwargs):
|
||||
# Ensure the socket is closed when this file is closed
|
||||
kwargs['close'] = True
|
||||
return socket._fileobject(self.connection, *args, **kwargs)
|
||||
|
||||
|
||||
class VerifiedHTTPSConnection(httplib.HTTPSConnection):
|
||||
"""
|
||||
Extended HTTPSConnection which uses the OpenSSL library
|
||||
for enhanced SSL support.
|
||||
Note: Much of this functionality can eventually be replaced
|
||||
with native Python 3.3 code.
|
||||
"""
|
||||
def __init__(self, host, port=None, key_file=None, cert_file=None,
|
||||
ca_certs=None, timeout=None, insecure=False,
|
||||
ssl_compression=True):
|
||||
httplib.HTTPSConnection.__init__(self, host, port,
|
||||
key_file=key_file,
|
||||
cert_file=cert_file)
|
||||
self.key_file = key_file
|
||||
self.cert_file = cert_file
|
||||
self.timeout = timeout
|
||||
self.insecure = insecure
|
||||
self.ssl_compression = ssl_compression
|
||||
self.ca_certs = ca_certs
|
||||
self.setcontext()
|
||||
|
||||
@staticmethod
|
||||
def host_matches_cert(host, x509):
|
||||
"""
|
||||
Verify that the the x509 certificate we have received
|
||||
from 'host' correctly identifies the server we are
|
||||
connecting to, ie that the certificate's Common Name
|
||||
or a Subject Alternative Name matches 'host'.
|
||||
"""
|
||||
# First see if we can match the CN
|
||||
if x509.get_subject().commonName == host:
|
||||
return True
|
||||
|
||||
# Also try Subject Alternative Names for a match
|
||||
san_list = None
|
||||
for i in moves.xrange(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
if ext.get_short_name() == 'subjectAltName':
|
||||
san_list = str(ext)
|
||||
for san in ''.join(san_list.split()).split(','):
|
||||
if san == "DNS:%s" % host:
|
||||
return True
|
||||
|
||||
# Server certificate does not match host
|
||||
msg = ('Host "%s" does not match x509 certificate contents: '
|
||||
'CommonName "%s"' % (host, x509.get_subject().commonName))
|
||||
if san_list is not None:
|
||||
msg = msg + ', subjectAltName "%s"' % san_list
|
||||
raise exc.SSLCertificateError(msg)
|
||||
|
||||
def verify_callback(self, connection, x509, errnum,
|
||||
depth, preverify_ok):
|
||||
if x509.has_expired():
|
||||
msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
|
||||
raise exc.SSLCertificateError(msg)
|
||||
|
||||
if depth == 0 and preverify_ok is True:
|
||||
# We verify that the host matches against the last
|
||||
# certificate in the chain
|
||||
return self.host_matches_cert(self.host, x509)
|
||||
else:
|
||||
# Pass through OpenSSL's default result
|
||||
return preverify_ok
|
||||
|
||||
def setcontext(self):
|
||||
"""
|
||||
Set up the OpenSSL context.
|
||||
"""
|
||||
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
|
||||
|
||||
if self.ssl_compression is False:
|
||||
self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
|
||||
|
||||
if self.insecure is not True:
|
||||
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
|
||||
self.verify_callback)
|
||||
else:
|
||||
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
|
||||
self.verify_callback)
|
||||
|
||||
if self.cert_file:
|
||||
try:
|
||||
self.context.use_certificate_file(self.cert_file)
|
||||
except Exception as e:
|
||||
msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
|
||||
raise exc.SSLConfigurationError(msg)
|
||||
if self.key_file is None:
|
||||
# We support having key and cert in same file
|
||||
try:
|
||||
self.context.use_privatekey_file(self.cert_file)
|
||||
except Exception as e:
|
||||
msg = ('No key file specified and unable to load key '
|
||||
'from "%s" %s' % (self.cert_file, e))
|
||||
raise exc.SSLConfigurationError(msg)
|
||||
|
||||
if self.key_file:
|
||||
try:
|
||||
self.context.use_privatekey_file(self.key_file)
|
||||
except Exception as e:
|
||||
msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
|
||||
raise exc.SSLConfigurationError(msg)
|
||||
|
||||
if self.ca_certs:
|
||||
try:
|
||||
self.context.load_verify_locations(self.ca_certs)
|
||||
except Exception as e:
|
||||
msg = 'Unable to load CA from "%s"' % (self.ca_certs, e)
|
||||
raise exc.SSLConfigurationError(msg)
|
||||
else:
|
||||
self.context.set_default_verify_paths()
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
Connect to an SSL port using the OpenSSL library and apply
|
||||
per-connection parameters.
|
||||
"""
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
if self.timeout is not None:
|
||||
# '0' microseconds
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
|
||||
struct.pack('LL', self.timeout, 0))
|
||||
self.sock = OpenSSLConnectionDelegator(self.context, sock)
|
||||
self.sock.connect((self.host, self.port))
|
||||
|
||||
def close(self):
|
||||
if self.sock:
|
||||
# Remove the reference to the socket but don't close it yet.
|
||||
# Response close will close both socket and associated
|
||||
# file. Closing socket too soon will cause response
|
||||
# reads to fail with socket IO error 'Bad file descriptor'.
|
||||
self.sock = None
|
||||
httplib.HTTPSConnection.close(self)
|
||||
|
||||
|
||||
class ResponseBodyIterator(object):
|
||||
"""A class that acts as an iterator over an HTTP response."""
|
||||
|
||||
def __init__(self, resp):
|
||||
self.resp = resp
|
||||
|
||||
def __iter__(self):
|
||||
while True:
|
||||
yield self.next()
|
||||
|
||||
def next(self):
|
||||
chunk = self.resp.read(CHUNKSIZE)
|
||||
if chunk:
|
||||
return chunk
|
||||
else:
|
||||
raise StopIteration()
|
392
neutron/tests/tempest/common/isolated_creds.py
Normal file
392
neutron/tests/tempest/common/isolated_creds.py
Normal file
@ -0,0 +1,392 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.api.contrib import clients
|
||||
from neutron.tests.tempest.common import cred_provider
|
||||
from neutron.tests.tempest.common.utils import data_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IsolatedCreds(cred_provider.CredentialProvider):
|
||||
|
||||
def __init__(self, name, password='pass', network_resources=None):
|
||||
super(IsolatedCreds, self).__init__(name, password, network_resources)
|
||||
self.network_resources = network_resources
|
||||
self.isolated_creds = {}
|
||||
self.isolated_net_resources = {}
|
||||
self.ports = []
|
||||
self.password = password
|
||||
self.identity_admin_client, self.network_admin_client = (
|
||||
self._get_admin_clients())
|
||||
|
||||
def _get_admin_clients(self):
|
||||
"""
|
||||
Returns a tuple with instances of the following admin clients (in this
|
||||
order):
|
||||
identity
|
||||
network
|
||||
"""
|
||||
os = clients.AdminManager()
|
||||
return os.identity_client, os.network_client
|
||||
|
||||
def _create_tenant(self, name, description):
|
||||
tenant = self.identity_admin_client.create_tenant(
|
||||
name=name, description=description)
|
||||
return tenant
|
||||
|
||||
def _get_tenant_by_name(self, name):
|
||||
tenant = self.identity_admin_client.get_tenant_by_name(name)
|
||||
return tenant
|
||||
|
||||
def _create_user(self, username, password, tenant, email):
|
||||
user = self.identity_admin_client.create_user(
|
||||
username, password, tenant['id'], email)
|
||||
return user
|
||||
|
||||
def _get_user(self, tenant, username):
|
||||
user = self.identity_admin_client.get_user_by_username(
|
||||
tenant['id'], username)
|
||||
return user
|
||||
|
||||
def _list_roles(self):
|
||||
roles = self.identity_admin_client.list_roles()
|
||||
return roles
|
||||
|
||||
def _assign_user_role(self, tenant, user, role_name):
|
||||
role = None
|
||||
try:
|
||||
roles = self._list_roles()
|
||||
role = next(r for r in roles if r['name'] == role_name)
|
||||
except StopIteration:
|
||||
msg = 'No "%s" role found' % role_name
|
||||
raise lib_exc.NotFound(msg)
|
||||
try:
|
||||
self.identity_admin_client.assign_user_role(tenant['id'],
|
||||
user['id'],
|
||||
role['id'])
|
||||
except lib_exc.Conflict:
|
||||
LOG.warning('Trying to add %s for user %s in tenant %s but they '
|
||||
' were already granted that role' % (role_name,
|
||||
user['name'],
|
||||
tenant['name']))
|
||||
|
||||
def _delete_user(self, user):
|
||||
self.identity_admin_client.delete_user(user)
|
||||
|
||||
def _delete_tenant(self, tenant):
|
||||
if CONF.service_available.neutron:
|
||||
self._cleanup_default_secgroup(tenant)
|
||||
self.identity_admin_client.delete_tenant(tenant)
|
||||
|
||||
def _create_creds(self, suffix="", admin=False, roles=None):
|
||||
"""Create random credentials under the following schema.
|
||||
|
||||
If the name contains a '.' is the full class path of something, and
|
||||
we don't really care. If it isn't, it's probably a meaningful name,
|
||||
so use it.
|
||||
|
||||
For logging purposes, -user and -tenant are long and redundant,
|
||||
don't use them. The user# will be sufficient to figure it out.
|
||||
"""
|
||||
if '.' in self.name:
|
||||
root = ""
|
||||
else:
|
||||
root = self.name
|
||||
|
||||
tenant_name = data_utils.rand_name(root) + suffix
|
||||
tenant_desc = tenant_name + "-desc"
|
||||
tenant = self._create_tenant(name=tenant_name,
|
||||
description=tenant_desc)
|
||||
|
||||
username = data_utils.rand_name(root) + suffix
|
||||
email = data_utils.rand_name(root) + suffix + "@example.com"
|
||||
user = self._create_user(username, self.password,
|
||||
tenant, email)
|
||||
if admin:
|
||||
self._assign_user_role(tenant, user, CONF.identity.admin_role)
|
||||
# Add roles specified in config file
|
||||
for conf_role in CONF.auth.tempest_roles:
|
||||
self._assign_user_role(tenant, user, conf_role)
|
||||
# Add roles requested by caller
|
||||
if roles:
|
||||
for role in roles:
|
||||
self._assign_user_role(tenant, user, role)
|
||||
return self._get_credentials(user, tenant)
|
||||
|
||||
def _get_credentials(self, user, tenant):
|
||||
return cred_provider.get_credentials(
|
||||
username=user['name'], user_id=user['id'],
|
||||
tenant_name=tenant['name'], tenant_id=tenant['id'],
|
||||
password=self.password)
|
||||
|
||||
def _create_network_resources(self, tenant_id):
|
||||
network = None
|
||||
subnet = None
|
||||
router = None
|
||||
# Make sure settings
|
||||
if self.network_resources:
|
||||
if self.network_resources['router']:
|
||||
if (not self.network_resources['subnet'] or
|
||||
not self.network_resources['network']):
|
||||
raise exceptions.InvalidConfiguration(
|
||||
'A router requires a subnet and network')
|
||||
elif self.network_resources['subnet']:
|
||||
if not self.network_resources['network']:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
'A subnet requires a network')
|
||||
elif self.network_resources['dhcp']:
|
||||
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
|
||||
|
||||
data_utils.rand_name_root = data_utils.rand_name(self.name)
|
||||
if not self.network_resources or self.network_resources['network']:
|
||||
network_name = data_utils.rand_name_root + "-network"
|
||||
network = self._create_network(network_name, tenant_id)
|
||||
try:
|
||||
if not self.network_resources or self.network_resources['subnet']:
|
||||
subnet_name = data_utils.rand_name_root + "-subnet"
|
||||
subnet = self._create_subnet(subnet_name, tenant_id,
|
||||
network['id'])
|
||||
if not self.network_resources or self.network_resources['router']:
|
||||
router_name = data_utils.rand_name_root + "-router"
|
||||
router = self._create_router(router_name, tenant_id)
|
||||
self._add_router_interface(router['id'], subnet['id'])
|
||||
except Exception:
|
||||
if router:
|
||||
self._clear_isolated_router(router['id'], router['name'])
|
||||
if subnet:
|
||||
self._clear_isolated_subnet(subnet['id'], subnet['name'])
|
||||
if network:
|
||||
self._clear_isolated_network(network['id'], network['name'])
|
||||
raise
|
||||
return network, subnet, router
|
||||
|
||||
def _create_network(self, name, tenant_id):
|
||||
resp_body = self.network_admin_client.create_network(
|
||||
name=name, tenant_id=tenant_id)
|
||||
return resp_body['network']
|
||||
|
||||
def _create_subnet(self, subnet_name, tenant_id, network_id):
|
||||
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
|
||||
mask_bits = CONF.network.tenant_network_mask_bits
|
||||
for subnet_cidr in base_cidr.subnet(mask_bits):
|
||||
try:
|
||||
if self.network_resources:
|
||||
resp_body = self.network_admin_client.\
|
||||
create_subnet(
|
||||
network_id=network_id, cidr=str(subnet_cidr),
|
||||
name=subnet_name,
|
||||
tenant_id=tenant_id,
|
||||
enable_dhcp=self.network_resources['dhcp'],
|
||||
ip_version=4)
|
||||
else:
|
||||
resp_body = self.network_admin_client.\
|
||||
create_subnet(network_id=network_id,
|
||||
cidr=str(subnet_cidr),
|
||||
name=subnet_name,
|
||||
tenant_id=tenant_id,
|
||||
ip_version=4)
|
||||
break
|
||||
except lib_exc.BadRequest as e:
|
||||
if 'overlaps with another subnet' not in str(e):
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise Exception(message)
|
||||
return resp_body['subnet']
|
||||
|
||||
def _create_router(self, router_name, tenant_id):
|
||||
external_net_id = dict(
|
||||
network_id=CONF.network.public_network_id)
|
||||
resp_body = self.network_admin_client.create_router(
|
||||
router_name,
|
||||
external_gateway_info=external_net_id,
|
||||
tenant_id=tenant_id)
|
||||
return resp_body['router']
|
||||
|
||||
def _add_router_interface(self, router_id, subnet_id):
|
||||
self.network_admin_client.add_router_interface_with_subnet_id(
|
||||
router_id, subnet_id)
|
||||
|
||||
def get_primary_network(self):
|
||||
return self.isolated_net_resources.get('primary')[0]
|
||||
|
||||
def get_primary_subnet(self):
|
||||
return self.isolated_net_resources.get('primary')[1]
|
||||
|
||||
def get_primary_router(self):
|
||||
return self.isolated_net_resources.get('primary')[2]
|
||||
|
||||
def get_admin_network(self):
|
||||
return self.isolated_net_resources.get('admin')[0]
|
||||
|
||||
def get_admin_subnet(self):
|
||||
return self.isolated_net_resources.get('admin')[1]
|
||||
|
||||
def get_admin_router(self):
|
||||
return self.isolated_net_resources.get('admin')[2]
|
||||
|
||||
def get_alt_network(self):
|
||||
return self.isolated_net_resources.get('alt')[0]
|
||||
|
||||
def get_alt_subnet(self):
|
||||
return self.isolated_net_resources.get('alt')[1]
|
||||
|
||||
def get_alt_router(self):
|
||||
return self.isolated_net_resources.get('alt')[2]
|
||||
|
||||
def get_credentials(self, credential_type):
|
||||
if self.isolated_creds.get(str(credential_type)):
|
||||
credentials = self.isolated_creds[str(credential_type)]
|
||||
else:
|
||||
if credential_type in ['primary', 'alt', 'admin']:
|
||||
is_admin = (credential_type == 'admin')
|
||||
credentials = self._create_creds(admin=is_admin)
|
||||
else:
|
||||
credentials = self._create_creds(roles=credential_type)
|
||||
self.isolated_creds[str(credential_type)] = credentials
|
||||
# Maintained until tests are ported
|
||||
LOG.info("Acquired isolated creds:\n credentials: %s"
|
||||
% credentials)
|
||||
if (CONF.service_available.neutron and
|
||||
not CONF.baremetal.driver_enabled):
|
||||
network, subnet, router = self._create_network_resources(
|
||||
credentials.tenant_id)
|
||||
self.isolated_net_resources[str(credential_type)] = (
|
||||
network, subnet, router,)
|
||||
LOG.info("Created isolated network resources for : \n"
|
||||
+ " credentials: %s" % credentials)
|
||||
return credentials
|
||||
|
||||
def get_primary_creds(self):
|
||||
return self.get_credentials('primary')
|
||||
|
||||
def get_admin_creds(self):
|
||||
return self.get_credentials('admin')
|
||||
|
||||
def get_alt_creds(self):
|
||||
return self.get_credentials('alt')
|
||||
|
||||
def get_creds_by_roles(self, roles, force_new=False):
|
||||
roles = list(set(roles))
|
||||
# The roles list as a str will become the index as the dict key for
|
||||
# the created credentials set in the isolated_creds dict.
|
||||
exist_creds = self.isolated_creds.get(str(roles))
|
||||
# If force_new flag is True 2 cred sets with the same roles are needed
|
||||
# handle this by creating a separate index for old one to store it
|
||||
# separately for cleanup
|
||||
if exist_creds and force_new:
|
||||
new_index = str(roles) + '-' + str(len(self.isolated_creds))
|
||||
self.isolated_creds[new_index] = exist_creds
|
||||
del self.isolated_creds[str(roles)]
|
||||
# Handle isolated neutron resouces if they exist too
|
||||
if CONF.service_available.neutron:
|
||||
exist_net = self.isolated_net_resources.get(str(roles))
|
||||
if exist_net:
|
||||
self.isolated_net_resources[new_index] = exist_net
|
||||
del self.isolated_net_resources[str(roles)]
|
||||
return self.get_credentials(roles)
|
||||
|
||||
def _clear_isolated_router(self, router_id, router_name):
|
||||
net_client = self.network_admin_client
|
||||
try:
|
||||
net_client.delete_router(router_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('router with name: %s not found for delete' %
|
||||
router_name)
|
||||
|
||||
def _clear_isolated_subnet(self, subnet_id, subnet_name):
|
||||
net_client = self.network_admin_client
|
||||
try:
|
||||
net_client.delete_subnet(subnet_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('subnet with name: %s not found for delete' %
|
||||
subnet_name)
|
||||
|
||||
def _clear_isolated_network(self, network_id, network_name):
|
||||
net_client = self.network_admin_client
|
||||
try:
|
||||
net_client.delete_network(network_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('network with name: %s not found for delete' %
|
||||
network_name)
|
||||
|
||||
def _cleanup_default_secgroup(self, tenant):
|
||||
net_client = self.network_admin_client
|
||||
resp_body = net_client.list_security_groups(tenant_id=tenant,
|
||||
name="default")
|
||||
secgroups_to_delete = resp_body['security_groups']
|
||||
for secgroup in secgroups_to_delete:
|
||||
try:
|
||||
net_client.delete_security_group(secgroup['id'])
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('Security group %s, id %s not found for clean-up' %
|
||||
(secgroup['name'], secgroup['id']))
|
||||
|
||||
def _clear_isolated_net_resources(self):
|
||||
net_client = self.network_admin_client
|
||||
for cred in self.isolated_net_resources:
|
||||
network, subnet, router = self.isolated_net_resources.get(cred)
|
||||
LOG.debug("Clearing network: %(network)s, "
|
||||
"subnet: %(subnet)s, router: %(router)s",
|
||||
{'network': network, 'subnet': subnet, 'router': router})
|
||||
if (not self.network_resources or
|
||||
self.network_resources.get('router')):
|
||||
try:
|
||||
net_client.remove_router_interface_with_subnet_id(
|
||||
router['id'], subnet['id'])
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn('router with name: %s not found for delete' %
|
||||
router['name'])
|
||||
self._clear_isolated_router(router['id'], router['name'])
|
||||
if (not self.network_resources or
|
||||
self.network_resources.get('subnet')):
|
||||
self._clear_isolated_subnet(subnet['id'], subnet['name'])
|
||||
if (not self.network_resources or
|
||||
self.network_resources.get('network')):
|
||||
self._clear_isolated_network(network['id'], network['name'])
|
||||
self.isolated_net_resources = {}
|
||||
|
||||
def clear_isolated_creds(self):
|
||||
if not self.isolated_creds:
|
||||
return
|
||||
self._clear_isolated_net_resources()
|
||||
for creds in self.isolated_creds.itervalues():
|
||||
try:
|
||||
self._delete_user(creds.user_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn("user with name: %s not found for delete" %
|
||||
creds.username)
|
||||
try:
|
||||
self._delete_tenant(creds.tenant_id)
|
||||
except lib_exc.NotFound:
|
||||
LOG.warn("tenant with name: %s not found for delete" %
|
||||
creds.tenant_name)
|
||||
self.isolated_creds = {}
|
||||
|
||||
def is_multi_user(self):
|
||||
return True
|
||||
|
||||
def is_multi_tenant(self):
|
||||
return True
|
||||
|
||||
def is_role_available(self, role):
|
||||
return True
|
71
neutron/tests/tempest/common/negative_rest_client.py
Normal file
71
neutron/tests/tempest/common/negative_rest_client.py
Normal file
@ -0,0 +1,71 @@
|
||||
# (c) 2014 Deutsche Telekom AG
|
||||
# Copyright 2014 Red Hat, Inc.
|
||||
# Copyright 2014 NEC Corporation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
from neutron.tests.tempest import config
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class NegativeRestClient(service_client.ServiceClient):
|
||||
"""
|
||||
Version of RestClient that does not raise exceptions.
|
||||
"""
|
||||
def __init__(self, auth_provider, service):
|
||||
region = self._get_region(service)
|
||||
super(NegativeRestClient, self).__init__(auth_provider,
|
||||
service, region)
|
||||
|
||||
def _get_region(self, service):
|
||||
"""
|
||||
Returns the region for a specific service
|
||||
"""
|
||||
service_region = None
|
||||
for cfgname in dir(CONF._config):
|
||||
# Find all config.FOO.catalog_type and assume FOO is a service.
|
||||
cfg = getattr(CONF, cfgname)
|
||||
catalog_type = getattr(cfg, 'catalog_type', None)
|
||||
if catalog_type == service:
|
||||
service_region = getattr(cfg, 'region', None)
|
||||
if not service_region:
|
||||
service_region = CONF.identity.region
|
||||
return service_region
|
||||
|
||||
def _error_checker(self, method, url,
|
||||
headers, body, resp, resp_body):
|
||||
pass
|
||||
|
||||
def send_request(self, method, url_template, resources, body=None):
|
||||
url = url_template % tuple(resources)
|
||||
if method == "GET":
|
||||
resp, body = self.get(url)
|
||||
elif method == "POST":
|
||||
resp, body = self.post(url, body)
|
||||
elif method == "PUT":
|
||||
resp, body = self.put(url, body)
|
||||
elif method == "PATCH":
|
||||
resp, body = self.patch(url, body)
|
||||
elif method == "HEAD":
|
||||
resp, body = self.head(url)
|
||||
elif method == "DELETE":
|
||||
resp, body = self.delete(url)
|
||||
elif method == "COPY":
|
||||
resp, body = self.copy(url)
|
||||
else:
|
||||
assert False
|
||||
|
||||
return resp, body
|
93
neutron/tests/tempest/common/service_client.py
Normal file
93
neutron/tests/tempest/common/service_client.py
Normal file
@ -0,0 +1,93 @@
|
||||
# Copyright 2015 NEC Corporation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest_lib.common import rest_client
|
||||
|
||||
from neutron.tests.tempest import config
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class ServiceClient(rest_client.RestClient):
|
||||
|
||||
def __init__(self, auth_provider, service, region,
|
||||
endpoint_type=None, build_interval=None, build_timeout=None,
|
||||
disable_ssl_certificate_validation=None, ca_certs=None,
|
||||
trace_requests=None):
|
||||
|
||||
# TODO(oomichi): This params setting should be removed after all
|
||||
# service clients pass these values, and we can make ServiceClient
|
||||
# free from CONF values.
|
||||
dscv = (disable_ssl_certificate_validation or
|
||||
CONF.identity.disable_ssl_certificate_validation)
|
||||
params = {
|
||||
'disable_ssl_certificate_validation': dscv,
|
||||
'ca_certs': ca_certs or CONF.identity.ca_certificates_file,
|
||||
'trace_requests': trace_requests or CONF.debug.trace_requests
|
||||
}
|
||||
|
||||
if endpoint_type is not None:
|
||||
params.update({'endpoint_type': endpoint_type})
|
||||
if build_interval is not None:
|
||||
params.update({'build_interval': build_interval})
|
||||
if build_timeout is not None:
|
||||
params.update({'build_timeout': build_timeout})
|
||||
super(ServiceClient, self).__init__(auth_provider, service, region,
|
||||
**params)
|
||||
|
||||
|
||||
class ResponseBody(dict):
|
||||
"""Class that wraps an http response and dict body into a single value.
|
||||
|
||||
Callers that receive this object will normally use it as a dict but
|
||||
can extract the response if needed.
|
||||
"""
|
||||
|
||||
def __init__(self, response, body=None):
|
||||
body_data = body or {}
|
||||
self.update(body_data)
|
||||
self.response = response
|
||||
|
||||
def __str__(self):
|
||||
body = super(ResponseBody, self).__str__()
|
||||
return "response: %s\nBody: %s" % (self.response, body)
|
||||
|
||||
|
||||
class ResponseBodyData(object):
|
||||
"""Class that wraps an http response and string data into a single value.
|
||||
"""
|
||||
|
||||
def __init__(self, response, data):
|
||||
self.response = response
|
||||
self.data = data
|
||||
|
||||
def __str__(self):
|
||||
return "response: %s\nBody: %s" % (self.response, self.data)
|
||||
|
||||
|
||||
class ResponseBodyList(list):
|
||||
"""Class that wraps an http response and list body into a single value.
|
||||
|
||||
Callers that receive this object will normally use it as a list but
|
||||
can extract the response if needed.
|
||||
"""
|
||||
|
||||
def __init__(self, response, body=None):
|
||||
body_data = body or []
|
||||
self.extend(body_data)
|
||||
self.response = response
|
||||
|
||||
def __str__(self):
|
||||
body = super(ResponseBodyList, self).__str__()
|
||||
return "response: %s\nBody: %s" % (self.response, body)
|
152
neutron/tests/tempest/common/ssh.py
Normal file
152
neutron/tests/tempest/common/ssh.py
Normal file
@ -0,0 +1,152 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import cStringIO
|
||||
import select
|
||||
import socket
|
||||
import time
|
||||
import warnings
|
||||
|
||||
import six
|
||||
|
||||
from neutron.tests.tempest import exceptions
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
import paramiko
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Client(object):
|
||||
|
||||
def __init__(self, host, username, password=None, timeout=300, pkey=None,
|
||||
channel_timeout=10, look_for_keys=False, key_filename=None):
|
||||
self.host = host
|
||||
self.username = username
|
||||
self.password = password
|
||||
if isinstance(pkey, six.string_types):
|
||||
pkey = paramiko.RSAKey.from_private_key(
|
||||
cStringIO.StringIO(str(pkey)))
|
||||
self.pkey = pkey
|
||||
self.look_for_keys = look_for_keys
|
||||
self.key_filename = key_filename
|
||||
self.timeout = int(timeout)
|
||||
self.channel_timeout = float(channel_timeout)
|
||||
self.buf_size = 1024
|
||||
|
||||
def _get_ssh_connection(self, sleep=1.5, backoff=1):
|
||||
"""Returns an ssh connection to the specified host."""
|
||||
bsleep = sleep
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(
|
||||
paramiko.AutoAddPolicy())
|
||||
_start_time = time.time()
|
||||
if self.pkey is not None:
|
||||
LOG.info("Creating ssh connection to '%s' as '%s'"
|
||||
" with public key authentication",
|
||||
self.host, self.username)
|
||||
else:
|
||||
LOG.info("Creating ssh connection to '%s' as '%s'"
|
||||
" with password %s",
|
||||
self.host, self.username, str(self.password))
|
||||
attempts = 0
|
||||
while True:
|
||||
try:
|
||||
ssh.connect(self.host, username=self.username,
|
||||
password=self.password,
|
||||
look_for_keys=self.look_for_keys,
|
||||
key_filename=self.key_filename,
|
||||
timeout=self.channel_timeout, pkey=self.pkey)
|
||||
LOG.info("ssh connection to %s@%s successfuly created",
|
||||
self.username, self.host)
|
||||
return ssh
|
||||
except (socket.error,
|
||||
paramiko.SSHException) as e:
|
||||
if self._is_timed_out(_start_time):
|
||||
LOG.exception("Failed to establish authenticated ssh"
|
||||
" connection to %s@%s after %d attempts",
|
||||
self.username, self.host, attempts)
|
||||
raise exceptions.SSHTimeout(host=self.host,
|
||||
user=self.username,
|
||||
password=self.password)
|
||||
bsleep += backoff
|
||||
attempts += 1
|
||||
LOG.warning("Failed to establish authenticated ssh"
|
||||
" connection to %s@%s (%s). Number attempts: %s."
|
||||
" Retry after %d seconds.",
|
||||
self.username, self.host, e, attempts, bsleep)
|
||||
time.sleep(bsleep)
|
||||
|
||||
def _is_timed_out(self, start_time):
|
||||
return (time.time() - self.timeout) > start_time
|
||||
|
||||
def exec_command(self, cmd):
|
||||
"""
|
||||
Execute the specified command on the server.
|
||||
|
||||
Note that this method is reading whole command outputs to memory, thus
|
||||
shouldn't be used for large outputs.
|
||||
|
||||
:returns: data read from standard output of the command.
|
||||
:raises: SSHExecCommandFailed if command returns nonzero
|
||||
status. The exception contains command status stderr content.
|
||||
"""
|
||||
ssh = self._get_ssh_connection()
|
||||
transport = ssh.get_transport()
|
||||
channel = transport.open_session()
|
||||
channel.fileno() # Register event pipe
|
||||
channel.exec_command(cmd)
|
||||
channel.shutdown_write()
|
||||
out_data = []
|
||||
err_data = []
|
||||
poll = select.poll()
|
||||
poll.register(channel, select.POLLIN)
|
||||
start_time = time.time()
|
||||
|
||||
while True:
|
||||
ready = poll.poll(self.channel_timeout)
|
||||
if not any(ready):
|
||||
if not self._is_timed_out(start_time):
|
||||
continue
|
||||
raise exceptions.TimeoutException(
|
||||
"Command: '{0}' executed on host '{1}'.".format(
|
||||
cmd, self.host))
|
||||
if not ready[0]: # If there is nothing to read.
|
||||
continue
|
||||
out_chunk = err_chunk = None
|
||||
if channel.recv_ready():
|
||||
out_chunk = channel.recv(self.buf_size)
|
||||
out_data += out_chunk,
|
||||
if channel.recv_stderr_ready():
|
||||
err_chunk = channel.recv_stderr(self.buf_size)
|
||||
err_data += err_chunk,
|
||||
if channel.closed and not err_chunk and not out_chunk:
|
||||
break
|
||||
exit_status = channel.recv_exit_status()
|
||||
if 0 != exit_status:
|
||||
raise exceptions.SSHExecCommandFailed(
|
||||
command=cmd, exit_status=exit_status,
|
||||
strerror=''.join(err_data))
|
||||
return ''.join(out_data)
|
||||
|
||||
def test_connection_auth(self):
|
||||
"""Raises an exception when we can not connect to server via ssh."""
|
||||
connection = self._get_ssh_connection()
|
||||
connection.close()
|
21
neutron/tests/tempest/common/tempest_fixtures.py
Normal file
21
neutron/tests/tempest/common/tempest_fixtures.py
Normal file
@ -0,0 +1,21 @@
|
||||
# Copyright 2013 IBM Corp.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.openstack.common.fixture import lockutils
|
||||
|
||||
|
||||
class LockFixture(lockutils.LockFixture):
|
||||
def __init__(self, name):
|
||||
super(LockFixture, self).__init__(name, 'tempest-')
|
3
neutron/tests/tempest/common/utils/__init__.py
Normal file
3
neutron/tests/tempest/common/utils/__init__.py
Normal file
@ -0,0 +1,3 @@
|
||||
PING_IPV4_COMMAND = 'ping -c 3 '
|
||||
PING_IPV6_COMMAND = 'ping6 -c 3 '
|
||||
PING_PACKET_LOSS_REGEX = '(\d{1,3})\.?\d*\% packet loss'
|
101
neutron/tests/tempest/common/utils/data_utils.py
Normal file
101
neutron/tests/tempest/common/utils/data_utils.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import itertools
|
||||
import netaddr
|
||||
import random
|
||||
import uuid
|
||||
|
||||
|
||||
def rand_uuid():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
|
||||
def rand_uuid_hex():
|
||||
return uuid.uuid4().hex
|
||||
|
||||
|
||||
def rand_name(name=''):
|
||||
randbits = str(random.randint(1, 0x7fffffff))
|
||||
if name:
|
||||
return name + '-' + randbits
|
||||
else:
|
||||
return randbits
|
||||
|
||||
|
||||
def rand_url():
|
||||
randbits = str(random.randint(1, 0x7fffffff))
|
||||
return 'https://url-' + randbits + '.com'
|
||||
|
||||
|
||||
def rand_int_id(start=0, end=0x7fffffff):
|
||||
return random.randint(start, end)
|
||||
|
||||
|
||||
def rand_mac_address():
|
||||
"""Generate an Ethernet MAC address."""
|
||||
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
|
||||
# bridge mac addresses don't change, but it appears to
|
||||
# conflict with libvirt, so we use the next highest octet
|
||||
# that has the unicast and locally administered bits set
|
||||
# properly: 0xfa.
|
||||
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
|
||||
mac = [0xfa, 0x16, 0x3e,
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff),
|
||||
random.randint(0x00, 0xff)]
|
||||
return ':'.join(["%02x" % x for x in mac])
|
||||
|
||||
|
||||
def parse_image_id(image_ref):
|
||||
"""Return the image id from a given image ref."""
|
||||
return image_ref.rsplit('/')[-1]
|
||||
|
||||
|
||||
def arbitrary_string(size=4, base_text=None):
|
||||
"""
|
||||
Return size characters from base_text, repeating the base_text infinitely
|
||||
if needed.
|
||||
"""
|
||||
if not base_text:
|
||||
base_text = 'test'
|
||||
return ''.join(itertools.islice(itertools.cycle(base_text), size))
|
||||
|
||||
|
||||
def random_bytes(size=1024):
|
||||
"""
|
||||
Return size randomly selected bytes as a string.
|
||||
"""
|
||||
return ''.join([chr(random.randint(0, 255))
|
||||
for i in range(size)])
|
||||
|
||||
|
||||
def get_ipv6_addr_by_EUI64(cidr, mac):
|
||||
# Check if the prefix is IPv4 address
|
||||
is_ipv4 = netaddr.valid_ipv4(cidr)
|
||||
if is_ipv4:
|
||||
msg = "Unable to generate IP address by EUI64 for IPv4 prefix"
|
||||
raise TypeError(msg)
|
||||
try:
|
||||
eui64 = int(netaddr.EUI(mac).eui64())
|
||||
prefix = netaddr.IPNetwork(cidr)
|
||||
return netaddr.IPAddress(prefix.first + eui64 ^ (1 << 57))
|
||||
except (ValueError, netaddr.AddrFormatError):
|
||||
raise TypeError('Bad prefix or mac format for generating IPv6 '
|
||||
'address by EUI-64: %(prefix)s, %(mac)s:'
|
||||
% {'prefix': cidr, 'mac': mac})
|
||||
except TypeError:
|
||||
raise TypeError('Bad prefix type for generate IPv6 address by '
|
||||
'EUI-64: %s' % cidr)
|
23
neutron/tests/tempest/common/utils/file_utils.py
Normal file
23
neutron/tests/tempest/common/utils/file_utils.py
Normal file
@ -0,0 +1,23 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def have_effective_read_access(path):
|
||||
try:
|
||||
fh = open(path, "rb")
|
||||
except IOError:
|
||||
return False
|
||||
fh.close()
|
||||
return True
|
87
neutron/tests/tempest/common/utils/misc.py
Normal file
87
neutron/tests/tempest/common/utils/misc.py
Normal file
@ -0,0 +1,87 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
import re
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def singleton(cls):
|
||||
"""Simple wrapper for classes that should only have a single instance."""
|
||||
instances = {}
|
||||
|
||||
def getinstance():
|
||||
if cls not in instances:
|
||||
instances[cls] = cls()
|
||||
return instances[cls]
|
||||
return getinstance
|
||||
|
||||
|
||||
def find_test_caller():
|
||||
"""Find the caller class and test name.
|
||||
|
||||
Because we know that the interesting things that call us are
|
||||
test_* methods, and various kinds of setUp / tearDown, we
|
||||
can look through the call stack to find appropriate methods,
|
||||
and the class we were in when those were called.
|
||||
"""
|
||||
caller_name = None
|
||||
names = []
|
||||
frame = inspect.currentframe()
|
||||
is_cleanup = False
|
||||
# Start climbing the ladder until we hit a good method
|
||||
while True:
|
||||
try:
|
||||
frame = frame.f_back
|
||||
name = frame.f_code.co_name
|
||||
names.append(name)
|
||||
if re.search("^(test_|setUp|tearDown)", name):
|
||||
cname = ""
|
||||
if 'self' in frame.f_locals:
|
||||
cname = frame.f_locals['self'].__class__.__name__
|
||||
if 'cls' in frame.f_locals:
|
||||
cname = frame.f_locals['cls'].__name__
|
||||
caller_name = cname + ":" + name
|
||||
break
|
||||
elif re.search("^_run_cleanup", name):
|
||||
is_cleanup = True
|
||||
elif name == 'main':
|
||||
caller_name = 'main'
|
||||
break
|
||||
else:
|
||||
cname = ""
|
||||
if 'self' in frame.f_locals:
|
||||
cname = frame.f_locals['self'].__class__.__name__
|
||||
if 'cls' in frame.f_locals:
|
||||
cname = frame.f_locals['cls'].__name__
|
||||
|
||||
# the fact that we are running cleanups is indicated pretty
|
||||
# deep in the stack, so if we see that we want to just
|
||||
# start looking for a real class name, and declare victory
|
||||
# once we do.
|
||||
if is_cleanup and cname:
|
||||
if not re.search("^RunTest", cname):
|
||||
caller_name = cname + ":_run_cleanups"
|
||||
break
|
||||
except Exception:
|
||||
break
|
||||
# prevents frame leaks
|
||||
del frame
|
||||
if caller_name is None:
|
||||
LOG.debug("Sane call name not found in %s" % names)
|
||||
return caller_name
|
160
neutron/tests/tempest/common/waiters.py
Normal file
160
neutron/tests/tempest/common/waiters.py
Normal file
@ -0,0 +1,160 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from neutron.tests.tempest.common.utils import misc as misc_utils
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NOTE(afazekas): This function needs to know a token and a subject.
|
||||
def wait_for_server_status(client, server_id, status, ready_wait=True,
|
||||
extra_timeout=0, raise_on_error=True):
|
||||
"""Waits for a server to reach a given status."""
|
||||
|
||||
def _get_task_state(body):
|
||||
return body.get('OS-EXT-STS:task_state', None)
|
||||
|
||||
# NOTE(afazekas): UNKNOWN status possible on ERROR
|
||||
# or in a very early stage.
|
||||
body = client.get_server(server_id)
|
||||
old_status = server_status = body['status']
|
||||
old_task_state = task_state = _get_task_state(body)
|
||||
start_time = int(time.time())
|
||||
timeout = client.build_timeout + extra_timeout
|
||||
while True:
|
||||
# NOTE(afazekas): Now the BUILD status only reached
|
||||
# between the UNKNOWN->ACTIVE transition.
|
||||
# TODO(afazekas): enumerate and validate the stable status set
|
||||
if status == 'BUILD' and server_status != 'UNKNOWN':
|
||||
return
|
||||
if server_status == status:
|
||||
if ready_wait:
|
||||
if status == 'BUILD':
|
||||
return
|
||||
# NOTE(afazekas): The instance is in "ready for action state"
|
||||
# when no task in progress
|
||||
# NOTE(afazekas): Converted to string bacuse of the XML
|
||||
# responses
|
||||
if str(task_state) == "None":
|
||||
# without state api extension 3 sec usually enough
|
||||
time.sleep(CONF.compute.ready_wait)
|
||||
return
|
||||
else:
|
||||
return
|
||||
|
||||
time.sleep(client.build_interval)
|
||||
body = client.get_server(server_id)
|
||||
server_status = body['status']
|
||||
task_state = _get_task_state(body)
|
||||
if (server_status != old_status) or (task_state != old_task_state):
|
||||
LOG.info('State transition "%s" ==> "%s" after %d second wait',
|
||||
'/'.join((old_status, str(old_task_state))),
|
||||
'/'.join((server_status, str(task_state))),
|
||||
time.time() - start_time)
|
||||
if (server_status == 'ERROR') and raise_on_error:
|
||||
if 'fault' in body:
|
||||
raise exceptions.BuildErrorException(body['fault'],
|
||||
server_id=server_id)
|
||||
else:
|
||||
raise exceptions.BuildErrorException(server_id=server_id)
|
||||
|
||||
timed_out = int(time.time()) - start_time >= timeout
|
||||
|
||||
if timed_out:
|
||||
expected_task_state = 'None' if ready_wait else 'n/a'
|
||||
message = ('Server %(server_id)s failed to reach %(status)s '
|
||||
'status and task state "%(expected_task_state)s" '
|
||||
'within the required time (%(timeout)s s).' %
|
||||
{'server_id': server_id,
|
||||
'status': status,
|
||||
'expected_task_state': expected_task_state,
|
||||
'timeout': timeout})
|
||||
message += ' Current status: %s.' % server_status
|
||||
message += ' Current task state: %s.' % task_state
|
||||
caller = misc_utils.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
||||
old_status = server_status
|
||||
old_task_state = task_state
|
||||
|
||||
|
||||
def wait_for_image_status(client, image_id, status):
|
||||
"""Waits for an image to reach a given status.
|
||||
|
||||
The client should have a get_image(image_id) method to get the image.
|
||||
The client should also have build_interval and build_timeout attributes.
|
||||
"""
|
||||
image = client.get_image(image_id)
|
||||
start = int(time.time())
|
||||
|
||||
while image['status'] != status:
|
||||
time.sleep(client.build_interval)
|
||||
image = client.get_image(image_id)
|
||||
status_curr = image['status']
|
||||
if status_curr == 'ERROR':
|
||||
raise exceptions.AddImageException(image_id=image_id)
|
||||
|
||||
# check the status again to avoid a false negative where we hit
|
||||
# the timeout at the same time that the image reached the expected
|
||||
# status
|
||||
if status_curr == status:
|
||||
return
|
||||
|
||||
if int(time.time()) - start >= client.build_timeout:
|
||||
message = ('Image %(image_id)s failed to reach %(status)s state'
|
||||
'(current state %(status_curr)s) '
|
||||
'within the required time (%(timeout)s s).' %
|
||||
{'image_id': image_id,
|
||||
'status': status,
|
||||
'status_curr': status_curr,
|
||||
'timeout': client.build_timeout})
|
||||
caller = misc_utils.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
||||
|
||||
|
||||
def wait_for_bm_node_status(client, node_id, attr, status):
|
||||
"""Waits for a baremetal node attribute to reach given status.
|
||||
|
||||
The client should have a show_node(node_uuid) method to get the node.
|
||||
"""
|
||||
_, node = client.show_node(node_id)
|
||||
start = int(time.time())
|
||||
|
||||
while node[attr] != status:
|
||||
time.sleep(client.build_interval)
|
||||
_, node = client.show_node(node_id)
|
||||
status_curr = node[attr]
|
||||
if status_curr == status:
|
||||
return
|
||||
|
||||
if int(time.time()) - start >= client.build_timeout:
|
||||
message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
|
||||
'within the required time (%(timeout)s s).' %
|
||||
{'node_id': node_id,
|
||||
'attr': attr,
|
||||
'status': status,
|
||||
'timeout': client.build_timeout})
|
||||
message += ' Current state of %s: %s.' % (attr, status_curr)
|
||||
caller = misc_utils.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
1236
neutron/tests/tempest/config.py
Normal file
1236
neutron/tests/tempest/config.py
Normal file
File diff suppressed because it is too large
Load Diff
194
neutron/tests/tempest/exceptions.py
Normal file
194
neutron/tests/tempest/exceptions.py
Normal file
@ -0,0 +1,194 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
|
||||
class TempestException(Exception):
|
||||
"""
|
||||
Base Tempest Exception
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = "An unknown exception occurred"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TempestException, self).__init__()
|
||||
try:
|
||||
self._error_string = self.message % kwargs
|
||||
except Exception:
|
||||
# at least get the core message out if something happened
|
||||
self._error_string = self.message
|
||||
if len(args) > 0:
|
||||
# If there is a non-kwarg parameter, assume it's the error
|
||||
# message or reason description and tack it on to the end
|
||||
# of the exception message
|
||||
# Convert all arguments into their string representations...
|
||||
args = ["%s" % arg for arg in args]
|
||||
self._error_string = (self._error_string +
|
||||
"\nDetails: %s" % '\n'.join(args))
|
||||
|
||||
def __str__(self):
|
||||
return self._error_string
|
||||
|
||||
|
||||
class RestClientException(TempestException,
|
||||
testtools.TestCase.failureException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidConfiguration(TempestException):
|
||||
message = "Invalid Configuration"
|
||||
|
||||
|
||||
class InvalidCredentials(TempestException):
|
||||
message = "Invalid Credentials"
|
||||
|
||||
|
||||
class InvalidServiceTag(TempestException):
|
||||
message = "Invalid service tag"
|
||||
|
||||
|
||||
class InvalidIdentityVersion(TempestException):
|
||||
message = "Invalid version %(identity_version) of the identity service"
|
||||
|
||||
|
||||
class TimeoutException(TempestException):
|
||||
message = "Request timed out"
|
||||
|
||||
|
||||
class BuildErrorException(TempestException):
|
||||
message = "Server %(server_id)s failed to build and is in ERROR status"
|
||||
|
||||
|
||||
class ImageKilledException(TempestException):
|
||||
message = "Image %(image_id)s 'killed' while waiting for '%(status)s'"
|
||||
|
||||
|
||||
class AddImageException(TempestException):
|
||||
message = "Image %(image_id)s failed to become ACTIVE in the allotted time"
|
||||
|
||||
|
||||
class EC2RegisterImageException(TempestException):
|
||||
message = ("Image %(image_id)s failed to become 'available' "
|
||||
"in the allotted time")
|
||||
|
||||
|
||||
class VolumeBuildErrorException(TempestException):
|
||||
message = "Volume %(volume_id)s failed to build and is in ERROR status"
|
||||
|
||||
|
||||
class SnapshotBuildErrorException(TempestException):
|
||||
message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
|
||||
|
||||
|
||||
class VolumeBackupException(TempestException):
|
||||
message = "Volume backup %(backup_id)s failed and is in ERROR status"
|
||||
|
||||
|
||||
class StackBuildErrorException(TempestException):
|
||||
message = ("Stack %(stack_identifier)s is in %(stack_status)s status "
|
||||
"due to '%(stack_status_reason)s'")
|
||||
|
||||
|
||||
class StackResourceBuildErrorException(TempestException):
|
||||
message = ("Resource %(resource_name)s in stack %(stack_identifier)s is "
|
||||
"in %(resource_status)s status due to "
|
||||
"'%(resource_status_reason)s'")
|
||||
|
||||
|
||||
class AuthenticationFailure(TempestException):
|
||||
message = ("Authentication with user %(user)s and password "
|
||||
"%(password)s failed auth using tenant %(tenant)s.")
|
||||
|
||||
|
||||
class EndpointNotFound(TempestException):
|
||||
message = "Endpoint not found"
|
||||
|
||||
|
||||
class ImageFault(TempestException):
|
||||
message = "Got image fault"
|
||||
|
||||
|
||||
class IdentityError(TempestException):
|
||||
message = "Got identity error"
|
||||
|
||||
|
||||
class SSHTimeout(TempestException):
|
||||
message = ("Connection to the %(host)s via SSH timed out.\n"
|
||||
"User: %(user)s, Password: %(password)s")
|
||||
|
||||
|
||||
class SSHExecCommandFailed(TempestException):
|
||||
"""Raised when remotely executed command returns nonzero status."""
|
||||
message = ("Command '%(command)s', exit status: %(exit_status)d, "
|
||||
"Error:\n%(strerror)s")
|
||||
|
||||
|
||||
class ServerUnreachable(TempestException):
|
||||
message = "The server is not reachable via the configured network"
|
||||
|
||||
|
||||
class TearDownException(TempestException):
|
||||
message = "%(num)d cleanUp operation failed"
|
||||
|
||||
|
||||
class RFCViolation(RestClientException):
|
||||
message = "RFC Violation"
|
||||
|
||||
|
||||
class InvalidHttpSuccessCode(RestClientException):
|
||||
message = "The success code is different than the expected one"
|
||||
|
||||
|
||||
class BadRequest(RestClientException):
|
||||
message = "Bad request"
|
||||
|
||||
|
||||
class ResponseWithNonEmptyBody(RFCViolation):
|
||||
message = ("RFC Violation! Response with %(status)d HTTP Status Code "
|
||||
"MUST NOT have a body")
|
||||
|
||||
|
||||
class ResponseWithEntity(RFCViolation):
|
||||
message = ("RFC Violation! Response with 205 HTTP Status Code "
|
||||
"MUST NOT have an entity")
|
||||
|
||||
|
||||
class InvalidHTTPResponseHeader(RestClientException):
|
||||
message = "HTTP response header is invalid"
|
||||
|
||||
|
||||
class InvalidStructure(TempestException):
|
||||
message = "Invalid structure of table with details"
|
||||
|
||||
|
||||
class CommandFailed(Exception):
|
||||
def __init__(self, returncode, cmd, output, stderr):
|
||||
super(CommandFailed, self).__init__()
|
||||
self.returncode = returncode
|
||||
self.cmd = cmd
|
||||
self.stdout = output
|
||||
self.stderr = stderr
|
||||
|
||||
def __str__(self):
|
||||
return ("Command '%s' returned non-zero exit status %d.\n"
|
||||
"stdout:\n%s\n"
|
||||
"stderr:\n%s" % (self.cmd,
|
||||
self.returncode,
|
||||
self.stdout,
|
||||
self.stderr))
|
74
neutron/tests/tempest/manager.py
Normal file
74
neutron/tests/tempest/manager.py
Normal file
@ -0,0 +1,74 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.tests.tempest import auth
|
||||
from neutron.tests.tempest.common import cred_provider
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class Manager(object):
|
||||
|
||||
"""
|
||||
Base manager class
|
||||
|
||||
Manager objects are responsible for providing a configuration object
|
||||
and a client object for a test case to use in performing actions.
|
||||
"""
|
||||
|
||||
def __init__(self, credentials=None):
|
||||
"""
|
||||
We allow overriding of the credentials used within the various
|
||||
client classes managed by the Manager object. Left as None, the
|
||||
standard username/password/tenant_name[/domain_name] is used.
|
||||
|
||||
:param credentials: Override of the credentials
|
||||
"""
|
||||
self.auth_version = CONF.identity.auth_version
|
||||
if credentials is None:
|
||||
self.credentials = cred_provider.get_configured_credentials('user')
|
||||
else:
|
||||
self.credentials = credentials
|
||||
# Check if passed or default credentials are valid
|
||||
if not self.credentials.is_valid():
|
||||
raise exceptions.InvalidCredentials()
|
||||
# Creates an auth provider for the credentials
|
||||
self.auth_provider = get_auth_provider(self.credentials)
|
||||
# FIXME(andreaf) unused
|
||||
self.client_attr_names = []
|
||||
|
||||
|
||||
def get_auth_provider_class(credentials):
|
||||
if isinstance(credentials, auth.KeystoneV3Credentials):
|
||||
return auth.KeystoneV3AuthProvider, CONF.identity.uri_v3
|
||||
else:
|
||||
return auth.KeystoneV2AuthProvider, CONF.identity.uri
|
||||
|
||||
|
||||
def get_auth_provider(credentials):
|
||||
default_params = {
|
||||
'disable_ssl_certificate_validation':
|
||||
CONF.identity.disable_ssl_certificate_validation,
|
||||
'ca_certs': CONF.identity.ca_certificates_file,
|
||||
'trace_requests': CONF.debug.trace_requests
|
||||
}
|
||||
if credentials is None:
|
||||
raise exceptions.InvalidCredentials(
|
||||
'Credentials must be specified')
|
||||
auth_provider_class, auth_url = get_auth_provider_class(
|
||||
credentials)
|
||||
return auth_provider_class(credentials, auth_url, **default_params)
|
0
neutron/tests/tempest/services/__init__.py
Normal file
0
neutron/tests/tempest/services/__init__.py
Normal file
235
neutron/tests/tempest/services/botoclients.py
Normal file
235
neutron/tests/tempest/services/botoclients.py
Normal file
@ -0,0 +1,235 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ConfigParser
|
||||
import contextlib
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
import types
|
||||
import urlparse
|
||||
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
|
||||
import boto
|
||||
import boto.ec2
|
||||
import boto.s3.connection
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class BotoClientBase(object):
|
||||
|
||||
ALLOWED_METHODS = set()
|
||||
|
||||
def __init__(self, username=None, password=None,
|
||||
auth_url=None, tenant_name=None,
|
||||
*args, **kwargs):
|
||||
# FIXME(andreaf) replace credentials and auth_url with auth_provider
|
||||
|
||||
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
|
||||
self.ca_cert = CONF.identity.ca_certificates_file
|
||||
|
||||
self.connection_timeout = str(CONF.boto.http_socket_timeout)
|
||||
self.num_retries = str(CONF.boto.num_retries)
|
||||
self.build_timeout = CONF.boto.build_timeout
|
||||
self.ks_cred = {"username": username,
|
||||
"password": password,
|
||||
"auth_url": auth_url,
|
||||
"tenant_name": tenant_name,
|
||||
"insecure": insecure_ssl,
|
||||
"cacert": self.ca_cert}
|
||||
|
||||
def _keystone_aws_get(self):
|
||||
# FIXME(andreaf) Move EC2 credentials to AuthProvider
|
||||
import keystoneclient.v2_0.client
|
||||
|
||||
keystone = keystoneclient.v2_0.client.Client(**self.ks_cred)
|
||||
ec2_cred_list = keystone.ec2.list(keystone.auth_user_id)
|
||||
ec2_cred = None
|
||||
for cred in ec2_cred_list:
|
||||
if cred.tenant_id == keystone.auth_tenant_id:
|
||||
ec2_cred = cred
|
||||
break
|
||||
else:
|
||||
ec2_cred = keystone.ec2.create(keystone.auth_user_id,
|
||||
keystone.auth_tenant_id)
|
||||
if not all((ec2_cred, ec2_cred.access, ec2_cred.secret)):
|
||||
raise lib_exc.NotFound("Unable to get access and secret keys")
|
||||
return ec2_cred
|
||||
|
||||
def _config_boto_timeout(self, timeout, retries):
|
||||
try:
|
||||
boto.config.add_section("Boto")
|
||||
except ConfigParser.DuplicateSectionError:
|
||||
pass
|
||||
boto.config.set("Boto", "http_socket_timeout", timeout)
|
||||
boto.config.set("Boto", "num_retries", retries)
|
||||
|
||||
def _config_boto_ca_certificates_file(self, ca_cert):
|
||||
if ca_cert is None:
|
||||
return
|
||||
|
||||
try:
|
||||
boto.config.add_section("Boto")
|
||||
except ConfigParser.DuplicateSectionError:
|
||||
pass
|
||||
boto.config.set("Boto", "ca_certificates_file", ca_cert)
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Automatically creates methods for the allowed methods set."""
|
||||
if name in self.ALLOWED_METHODS:
|
||||
def func(self, *args, **kwargs):
|
||||
with contextlib.closing(self.get_connection()) as conn:
|
||||
return getattr(conn, name)(*args, **kwargs)
|
||||
|
||||
func.__name__ = name
|
||||
setattr(self, name, types.MethodType(func, self, self.__class__))
|
||||
setattr(self.__class__, name,
|
||||
types.MethodType(func, None, self.__class__))
|
||||
return getattr(self, name)
|
||||
else:
|
||||
raise AttributeError(name)
|
||||
|
||||
def get_connection(self):
|
||||
self._config_boto_timeout(self.connection_timeout, self.num_retries)
|
||||
self._config_boto_ca_certificates_file(self.ca_cert)
|
||||
if not all((self.connection_data["aws_access_key_id"],
|
||||
self.connection_data["aws_secret_access_key"])):
|
||||
if all([self.ks_cred.get('auth_url'),
|
||||
self.ks_cred.get('username'),
|
||||
self.ks_cred.get('tenant_name'),
|
||||
self.ks_cred.get('password')]):
|
||||
ec2_cred = self._keystone_aws_get()
|
||||
self.connection_data["aws_access_key_id"] = \
|
||||
ec2_cred.access
|
||||
self.connection_data["aws_secret_access_key"] = \
|
||||
ec2_cred.secret
|
||||
else:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"Unable to get access and secret keys")
|
||||
return self.connect_method(**self.connection_data)
|
||||
|
||||
|
||||
class APIClientEC2(BotoClientBase):
|
||||
|
||||
def connect_method(self, *args, **kwargs):
|
||||
return boto.connect_ec2(*args, **kwargs)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(APIClientEC2, self).__init__(*args, **kwargs)
|
||||
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
|
||||
aws_access = CONF.boto.aws_access
|
||||
aws_secret = CONF.boto.aws_secret
|
||||
purl = urlparse.urlparse(CONF.boto.ec2_url)
|
||||
|
||||
region_name = CONF.compute.region
|
||||
if not region_name:
|
||||
region_name = CONF.identity.region
|
||||
region = boto.ec2.regioninfo.RegionInfo(name=region_name,
|
||||
endpoint=purl.hostname)
|
||||
port = purl.port
|
||||
if port is None:
|
||||
if purl.scheme is not "https":
|
||||
port = 80
|
||||
else:
|
||||
port = 443
|
||||
else:
|
||||
port = int(port)
|
||||
self.connection_data = {"aws_access_key_id": aws_access,
|
||||
"aws_secret_access_key": aws_secret,
|
||||
"is_secure": purl.scheme == "https",
|
||||
"validate_certs": not insecure_ssl,
|
||||
"region": region,
|
||||
"host": purl.hostname,
|
||||
"port": port,
|
||||
"path": purl.path}
|
||||
|
||||
ALLOWED_METHODS = set(('create_key_pair', 'get_key_pair',
|
||||
'delete_key_pair', 'import_key_pair',
|
||||
'get_all_key_pairs',
|
||||
'get_all_tags',
|
||||
'create_image', 'get_image',
|
||||
'register_image', 'deregister_image',
|
||||
'get_all_images', 'get_image_attribute',
|
||||
'modify_image_attribute', 'reset_image_attribute',
|
||||
'get_all_kernels',
|
||||
'create_volume', 'delete_volume',
|
||||
'get_all_volume_status', 'get_all_volumes',
|
||||
'get_volume_attribute', 'modify_volume_attribute'
|
||||
'bundle_instance', 'cancel_spot_instance_requests',
|
||||
'confirm_product_instanc',
|
||||
'get_all_instance_status', 'get_all_instances',
|
||||
'get_all_reserved_instances',
|
||||
'get_all_spot_instance_requests',
|
||||
'get_instance_attribute', 'monitor_instance',
|
||||
'monitor_instances', 'unmonitor_instance',
|
||||
'unmonitor_instances',
|
||||
'purchase_reserved_instance_offering',
|
||||
'reboot_instances', 'request_spot_instances',
|
||||
'reset_instance_attribute', 'run_instances',
|
||||
'start_instances', 'stop_instances',
|
||||
'terminate_instances',
|
||||
'attach_network_interface', 'attach_volume',
|
||||
'detach_network_interface', 'detach_volume',
|
||||
'get_console_output',
|
||||
'delete_network_interface', 'create_subnet',
|
||||
'create_network_interface', 'delete_subnet',
|
||||
'get_all_network_interfaces',
|
||||
'allocate_address', 'associate_address',
|
||||
'disassociate_address', 'get_all_addresses',
|
||||
'release_address',
|
||||
'create_snapshot', 'delete_snapshot',
|
||||
'get_all_snapshots', 'get_snapshot_attribute',
|
||||
'modify_snapshot_attribute',
|
||||
'reset_snapshot_attribute', 'trim_snapshots',
|
||||
'get_all_regions', 'get_all_zones',
|
||||
'get_all_security_groups', 'create_security_group',
|
||||
'delete_security_group', 'authorize_security_group',
|
||||
'authorize_security_group_egress',
|
||||
'revoke_security_group',
|
||||
'revoke_security_group_egress'))
|
||||
|
||||
|
||||
class ObjectClientS3(BotoClientBase):
|
||||
|
||||
def connect_method(self, *args, **kwargs):
|
||||
return boto.connect_s3(*args, **kwargs)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ObjectClientS3, self).__init__(*args, **kwargs)
|
||||
insecure_ssl = CONF.identity.disable_ssl_certificate_validation
|
||||
aws_access = CONF.boto.aws_access
|
||||
aws_secret = CONF.boto.aws_secret
|
||||
purl = urlparse.urlparse(CONF.boto.s3_url)
|
||||
port = purl.port
|
||||
if port is None:
|
||||
if purl.scheme is not "https":
|
||||
port = 80
|
||||
else:
|
||||
port = 443
|
||||
else:
|
||||
port = int(port)
|
||||
self.connection_data = {"aws_access_key_id": aws_access,
|
||||
"aws_secret_access_key": aws_secret,
|
||||
"is_secure": purl.scheme == "https",
|
||||
"validate_certs": not insecure_ssl,
|
||||
"host": purl.hostname,
|
||||
"port": port,
|
||||
"calling_format": boto.s3.connection.
|
||||
OrdinaryCallingFormat()}
|
||||
|
||||
ALLOWED_METHODS = set(('create_bucket', 'delete_bucket', 'generate_url',
|
||||
'get_all_buckets', 'get_bucket', 'delete_key',
|
||||
'lookup'))
|
0
neutron/tests/tempest/services/identity/__init__.py
Normal file
0
neutron/tests/tempest/services/identity/__init__.py
Normal file
0
neutron/tests/tempest/services/identity/v2/__init__.py
Normal file
0
neutron/tests/tempest/services/identity/v2/__init__.py
Normal file
@ -0,0 +1,271 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
|
||||
|
||||
class IdentityClientJSON(service_client.ServiceClient):
|
||||
|
||||
def has_admin_extensions(self):
|
||||
"""
|
||||
Returns True if the KSADM Admin Extensions are supported
|
||||
False otherwise
|
||||
"""
|
||||
if hasattr(self, '_has_admin_extensions'):
|
||||
return self._has_admin_extensions
|
||||
# Try something that requires admin
|
||||
try:
|
||||
self.list_roles()
|
||||
self._has_admin_extensions = True
|
||||
except Exception:
|
||||
self._has_admin_extensions = False
|
||||
return self._has_admin_extensions
|
||||
|
||||
def create_role(self, name):
|
||||
"""Create a role."""
|
||||
post_body = {
|
||||
'name': name,
|
||||
}
|
||||
post_body = json.dumps({'role': post_body})
|
||||
resp, body = self.post('OS-KSADM/roles', post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def get_role(self, role_id):
|
||||
"""Get a role by its id."""
|
||||
resp, body = self.get('OS-KSADM/roles/%s' % role_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def create_tenant(self, name, **kwargs):
|
||||
"""
|
||||
Create a tenant
|
||||
name (required): New tenant name
|
||||
description: Description of new tenant (default is none)
|
||||
enabled <true|false>: Initial tenant status (default is true)
|
||||
"""
|
||||
post_body = {
|
||||
'name': name,
|
||||
'description': kwargs.get('description', ''),
|
||||
'enabled': kwargs.get('enabled', True),
|
||||
}
|
||||
post_body = json.dumps({'tenant': post_body})
|
||||
resp, body = self.post('tenants', post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def delete_role(self, role_id):
|
||||
"""Delete a role."""
|
||||
resp, body = self.delete('OS-KSADM/roles/%s' % str(role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return resp, body
|
||||
|
||||
def list_user_roles(self, tenant_id, user_id):
|
||||
"""Returns a list of roles assigned to a user for a tenant."""
|
||||
url = '/tenants/%s/users/%s/roles' % (tenant_id, user_id)
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def assign_user_role(self, tenant_id, user_id, role_id):
|
||||
"""Add roles to a user on a tenant."""
|
||||
resp, body = self.put('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
|
||||
(tenant_id, user_id, role_id), "")
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def remove_user_role(self, tenant_id, user_id, role_id):
|
||||
"""Removes a role assignment for a user on a tenant."""
|
||||
resp, body = self.delete('/tenants/%s/users/%s/roles/OS-KSADM/%s' %
|
||||
(tenant_id, user_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def delete_tenant(self, tenant_id):
|
||||
"""Delete a tenant."""
|
||||
resp, body = self.delete('tenants/%s' % str(tenant_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def get_tenant(self, tenant_id):
|
||||
"""Get tenant details."""
|
||||
resp, body = self.get('tenants/%s' % str(tenant_id))
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def list_roles(self):
|
||||
"""Returns roles."""
|
||||
resp, body = self.get('OS-KSADM/roles')
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def list_tenants(self):
|
||||
"""Returns tenants."""
|
||||
resp, body = self.get('tenants')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['tenants'])
|
||||
|
||||
def get_tenant_by_name(self, tenant_name):
|
||||
tenants = self.list_tenants()
|
||||
for tenant in tenants:
|
||||
if tenant['name'] == tenant_name:
|
||||
return tenant
|
||||
raise lib_exc.NotFound('No such tenant')
|
||||
|
||||
def update_tenant(self, tenant_id, **kwargs):
|
||||
"""Updates a tenant."""
|
||||
body = self.get_tenant(tenant_id)
|
||||
name = kwargs.get('name', body['name'])
|
||||
desc = kwargs.get('description', body['description'])
|
||||
en = kwargs.get('enabled', body['enabled'])
|
||||
post_body = {
|
||||
'id': tenant_id,
|
||||
'name': name,
|
||||
'description': desc,
|
||||
'enabled': en,
|
||||
}
|
||||
post_body = json.dumps({'tenant': post_body})
|
||||
resp, body = self.post('tenants/%s' % tenant_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def create_user(self, name, password, tenant_id, email, **kwargs):
|
||||
"""Create a user."""
|
||||
post_body = {
|
||||
'name': name,
|
||||
'password': password,
|
||||
'email': email
|
||||
}
|
||||
if tenant_id is not None:
|
||||
post_body['tenantId'] = tenant_id
|
||||
if kwargs.get('enabled') is not None:
|
||||
post_body['enabled'] = kwargs.get('enabled')
|
||||
post_body = json.dumps({'user': post_body})
|
||||
resp, body = self.post('users', post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def update_user(self, user_id, **kwargs):
|
||||
"""Updates a user."""
|
||||
put_body = json.dumps({'user': kwargs})
|
||||
resp, body = self.put('users/%s' % user_id, put_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def get_user(self, user_id):
|
||||
"""GET a user."""
|
||||
resp, body = self.get("users/%s" % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def delete_user(self, user_id):
|
||||
"""Delete a user."""
|
||||
resp, body = self.delete("users/%s" % user_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def get_users(self):
|
||||
"""Get the list of users."""
|
||||
resp, body = self.get("users")
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def enable_disable_user(self, user_id, enabled):
|
||||
"""Enables or disables a user."""
|
||||
put_body = {
|
||||
'enabled': enabled
|
||||
}
|
||||
put_body = json.dumps({'user': put_body})
|
||||
resp, body = self.put('users/%s/enabled' % user_id, put_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def get_token(self, token_id):
|
||||
"""Get token details."""
|
||||
resp, body = self.get("tokens/%s" % token_id)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def delete_token(self, token_id):
|
||||
"""Delete a token."""
|
||||
resp, body = self.delete("tokens/%s" % token_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_users_for_tenant(self, tenant_id):
|
||||
"""List users for a Tenant."""
|
||||
resp, body = self.get('/tenants/%s/users' % tenant_id)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def get_user_by_username(self, tenant_id, username):
|
||||
users = self.list_users_for_tenant(tenant_id)
|
||||
for user in users:
|
||||
if user['name'] == username:
|
||||
return user
|
||||
raise lib_exc.NotFound('No such user')
|
||||
|
||||
def create_service(self, name, type, **kwargs):
|
||||
"""Create a service."""
|
||||
post_body = {
|
||||
'name': name,
|
||||
'type': type,
|
||||
'description': kwargs.get('description')
|
||||
}
|
||||
post_body = json.dumps({'OS-KSADM:service': post_body})
|
||||
resp, body = self.post('/OS-KSADM/services', post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def get_service(self, service_id):
|
||||
"""Get Service."""
|
||||
url = '/OS-KSADM/services/%s' % service_id
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def list_services(self):
|
||||
"""List Service - Returns Services."""
|
||||
resp, body = self.get('/OS-KSADM/services')
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
||||
|
||||
def delete_service(self, service_id):
|
||||
"""Delete Service."""
|
||||
url = '/OS-KSADM/services/%s' % service_id
|
||||
resp, body = self.delete(url)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_user_password(self, user_id, new_pass):
|
||||
"""Update User Password."""
|
||||
put_body = {
|
||||
'password': new_pass,
|
||||
'id': user_id
|
||||
}
|
||||
put_body = json.dumps({'user': put_body})
|
||||
resp, body = self.put('users/%s/OS-KSADM/password' % user_id, put_body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, self._parse_resp(body))
|
||||
|
||||
def list_extensions(self):
|
||||
"""List all the extensions."""
|
||||
resp, body = self.get('/extensions')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp,
|
||||
body['extensions']['values'])
|
110
neutron/tests/tempest/services/identity/v2/json/token_client.py
Normal file
110
neutron/tests/tempest/services/identity/v2/json/token_client.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Copyright 2015 NEC Corporation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
from tempest_lib.common import rest_client
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
from neutron.tests.tempest import exceptions
|
||||
|
||||
|
||||
class TokenClientJSON(rest_client.RestClient):
|
||||
|
||||
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
|
||||
ca_certs=None, trace_requests=None):
|
||||
dscv = disable_ssl_certificate_validation
|
||||
super(TokenClientJSON, self).__init__(
|
||||
None, None, None, disable_ssl_certificate_validation=dscv,
|
||||
ca_certs=ca_certs, trace_requests=trace_requests)
|
||||
|
||||
# Normalize URI to ensure /tokens is in it.
|
||||
if 'tokens' not in auth_url:
|
||||
auth_url = auth_url.rstrip('/') + '/tokens'
|
||||
|
||||
self.auth_url = auth_url
|
||||
|
||||
def auth(self, user, password, tenant=None):
|
||||
creds = {
|
||||
'auth': {
|
||||
'passwordCredentials': {
|
||||
'username': user,
|
||||
'password': password,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if tenant:
|
||||
creds['auth']['tenantName'] = tenant
|
||||
|
||||
body = json.dumps(creds)
|
||||
resp, body = self.post(self.auth_url, body=body)
|
||||
self.expected_success(200, resp.status)
|
||||
|
||||
return service_client.ResponseBody(resp, body['access'])
|
||||
|
||||
def auth_token(self, token_id, tenant=None):
|
||||
creds = {
|
||||
'auth': {
|
||||
'token': {
|
||||
'id': token_id,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if tenant:
|
||||
creds['auth']['tenantName'] = tenant
|
||||
|
||||
body = json.dumps(creds)
|
||||
resp, body = self.post(self.auth_url, body=body)
|
||||
self.expected_success(200, resp.status)
|
||||
|
||||
return service_client.ResponseBody(resp, body['access'])
|
||||
|
||||
def request(self, method, url, extra_headers=False, headers=None,
|
||||
body=None):
|
||||
"""A simple HTTP request interface."""
|
||||
if headers is None:
|
||||
headers = self.get_headers(accept_type="json")
|
||||
elif extra_headers:
|
||||
try:
|
||||
headers.update(self.get_headers(accept_type="json"))
|
||||
except (ValueError, TypeError):
|
||||
headers = self.get_headers(accept_type="json")
|
||||
|
||||
resp, resp_body = self.raw_request(url, method,
|
||||
headers=headers, body=body)
|
||||
self._log_request(method, url, resp)
|
||||
|
||||
if resp.status in [401, 403]:
|
||||
resp_body = json.loads(resp_body)
|
||||
raise lib_exc.Unauthorized(resp_body['error']['message'])
|
||||
elif resp.status not in [200, 201]:
|
||||
raise exceptions.IdentityError(
|
||||
'Unexpected status code {0}'.format(resp.status))
|
||||
|
||||
if isinstance(resp_body, str):
|
||||
resp_body = json.loads(resp_body)
|
||||
return resp, resp_body
|
||||
|
||||
def get_token(self, user, password, tenant, auth_data=False):
|
||||
"""
|
||||
Returns (token id, token data) for supplied credentials
|
||||
"""
|
||||
body = self.auth(user, password, tenant)
|
||||
|
||||
if auth_data:
|
||||
return body['token']['id'], body
|
||||
else:
|
||||
return body['token']['id']
|
0
neutron/tests/tempest/services/identity/v3/__init__.py
Normal file
0
neutron/tests/tempest/services/identity/v3/__init__.py
Normal file
@ -0,0 +1,83 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
|
||||
|
||||
class CredentialsClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def create_credential(self, access_key, secret_key, user_id, project_id):
|
||||
"""Creates a credential."""
|
||||
blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
|
||||
access_key, secret_key)
|
||||
post_body = {
|
||||
"blob": blob,
|
||||
"project_id": project_id,
|
||||
"type": "ec2",
|
||||
"user_id": user_id
|
||||
}
|
||||
post_body = json.dumps({'credential': post_body})
|
||||
resp, body = self.post('credentials', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
body['credential']['blob'] = json.loads(body['credential']['blob'])
|
||||
return service_client.ResponseBody(resp, body['credential'])
|
||||
|
||||
def update_credential(self, credential_id, **kwargs):
|
||||
"""Updates a credential."""
|
||||
body = self.get_credential(credential_id)
|
||||
cred_type = kwargs.get('type', body['type'])
|
||||
access_key = kwargs.get('access_key', body['blob']['access'])
|
||||
secret_key = kwargs.get('secret_key', body['blob']['secret'])
|
||||
project_id = kwargs.get('project_id', body['project_id'])
|
||||
user_id = kwargs.get('user_id', body['user_id'])
|
||||
blob = "{\"access\": \"%s\", \"secret\": \"%s\"}" % (
|
||||
access_key, secret_key)
|
||||
post_body = {
|
||||
"blob": blob,
|
||||
"project_id": project_id,
|
||||
"type": cred_type,
|
||||
"user_id": user_id
|
||||
}
|
||||
post_body = json.dumps({'credential': post_body})
|
||||
resp, body = self.patch('credentials/%s' % credential_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
body['credential']['blob'] = json.loads(body['credential']['blob'])
|
||||
return service_client.ResponseBody(resp, body['credential'])
|
||||
|
||||
def get_credential(self, credential_id):
|
||||
"""To GET Details of a credential."""
|
||||
resp, body = self.get('credentials/%s' % credential_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
body['credential']['blob'] = json.loads(body['credential']['blob'])
|
||||
return service_client.ResponseBody(resp, body['credential'])
|
||||
|
||||
def list_credentials(self):
|
||||
"""Lists out all the available credentials."""
|
||||
resp, body = self.get('credentials')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['credentials'])
|
||||
|
||||
def delete_credential(self, credential_id):
|
||||
"""Deletes a credential."""
|
||||
resp, body = self.delete('credentials/%s' % credential_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
@ -0,0 +1,87 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
|
||||
|
||||
class EndPointClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def list_endpoints(self):
|
||||
"""GET endpoints."""
|
||||
resp, body = self.get('endpoints')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['endpoints'])
|
||||
|
||||
def create_endpoint(self, service_id, interface, url, **kwargs):
|
||||
"""Create endpoint.
|
||||
|
||||
Normally this function wouldn't allow setting values that are not
|
||||
allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
|
||||
|
||||
"""
|
||||
region = kwargs.get('region', None)
|
||||
if 'force_enabled' in kwargs:
|
||||
enabled = kwargs.get('force_enabled', None)
|
||||
else:
|
||||
enabled = kwargs.get('enabled', None)
|
||||
post_body = {
|
||||
'service_id': service_id,
|
||||
'interface': interface,
|
||||
'url': url,
|
||||
'region': region,
|
||||
'enabled': enabled
|
||||
}
|
||||
post_body = json.dumps({'endpoint': post_body})
|
||||
resp, body = self.post('endpoints', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['endpoint'])
|
||||
|
||||
def update_endpoint(self, endpoint_id, service_id=None, interface=None,
|
||||
url=None, region=None, enabled=None, **kwargs):
|
||||
"""Updates an endpoint with given parameters.
|
||||
|
||||
Normally this function wouldn't allow setting values that are not
|
||||
allowed for 'enabled'. Use `force_enabled` to set a non-boolean.
|
||||
|
||||
"""
|
||||
post_body = {}
|
||||
if service_id is not None:
|
||||
post_body['service_id'] = service_id
|
||||
if interface is not None:
|
||||
post_body['interface'] = interface
|
||||
if url is not None:
|
||||
post_body['url'] = url
|
||||
if region is not None:
|
||||
post_body['region'] = region
|
||||
if 'force_enabled' in kwargs:
|
||||
post_body['enabled'] = kwargs['force_enabled']
|
||||
elif enabled is not None:
|
||||
post_body['enabled'] = enabled
|
||||
post_body = json.dumps({'endpoint': post_body})
|
||||
resp, body = self.patch('endpoints/%s' % endpoint_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['endpoint'])
|
||||
|
||||
def delete_endpoint(self, endpoint_id):
|
||||
"""Delete endpoint."""
|
||||
resp_header, resp_body = self.delete('endpoints/%s' % endpoint_id)
|
||||
self.expected_success(204, resp_header.status)
|
||||
return service_client.ResponseBody(resp_header, resp_body)
|
@ -0,0 +1,523 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import urllib
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
|
||||
|
||||
class IdentityV3ClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def create_user(self, user_name, password=None, project_id=None,
|
||||
email=None, domain_id='default', **kwargs):
|
||||
"""Creates a user."""
|
||||
en = kwargs.get('enabled', True)
|
||||
description = kwargs.get('description', None)
|
||||
default_project_id = kwargs.get('default_project_id')
|
||||
post_body = {
|
||||
'project_id': project_id,
|
||||
'default_project_id': default_project_id,
|
||||
'description': description,
|
||||
'domain_id': domain_id,
|
||||
'email': email,
|
||||
'enabled': en,
|
||||
'name': user_name,
|
||||
'password': password
|
||||
}
|
||||
post_body = json.dumps({'user': post_body})
|
||||
resp, body = self.post('users', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['user'])
|
||||
|
||||
def update_user(self, user_id, name, **kwargs):
|
||||
"""Updates a user."""
|
||||
body = self.get_user(user_id)
|
||||
email = kwargs.get('email', body['email'])
|
||||
en = kwargs.get('enabled', body['enabled'])
|
||||
project_id = kwargs.get('project_id', body['project_id'])
|
||||
if 'default_project_id' in body.keys():
|
||||
default_project_id = kwargs.get('default_project_id',
|
||||
body['default_project_id'])
|
||||
else:
|
||||
default_project_id = kwargs.get('default_project_id')
|
||||
description = kwargs.get('description', body['description'])
|
||||
domain_id = kwargs.get('domain_id', body['domain_id'])
|
||||
post_body = {
|
||||
'name': name,
|
||||
'email': email,
|
||||
'enabled': en,
|
||||
'project_id': project_id,
|
||||
'default_project_id': default_project_id,
|
||||
'id': user_id,
|
||||
'domain_id': domain_id,
|
||||
'description': description
|
||||
}
|
||||
post_body = json.dumps({'user': post_body})
|
||||
resp, body = self.patch('users/%s' % user_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['user'])
|
||||
|
||||
def update_user_password(self, user_id, password, original_password):
|
||||
"""Updates a user password."""
|
||||
update_user = {
|
||||
'password': password,
|
||||
'original_password': original_password
|
||||
}
|
||||
update_user = json.dumps({'user': update_user})
|
||||
resp, _ = self.post('users/%s/password' % user_id, update_user)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp)
|
||||
|
||||
def list_user_projects(self, user_id):
|
||||
"""Lists the projects on which a user has roles assigned."""
|
||||
resp, body = self.get('users/%s/projects' % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['projects'])
|
||||
|
||||
def get_users(self, params=None):
|
||||
"""Get the list of users."""
|
||||
url = 'users'
|
||||
if params:
|
||||
url += '?%s' % urllib.urlencode(params)
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['users'])
|
||||
|
||||
def get_user(self, user_id):
|
||||
"""GET a user."""
|
||||
resp, body = self.get("users/%s" % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['user'])
|
||||
|
||||
def delete_user(self, user_id):
|
||||
"""Deletes a User."""
|
||||
resp, body = self.delete("users/%s" % user_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_project(self, name, **kwargs):
|
||||
"""Creates a project."""
|
||||
description = kwargs.get('description', None)
|
||||
en = kwargs.get('enabled', True)
|
||||
domain_id = kwargs.get('domain_id', 'default')
|
||||
post_body = {
|
||||
'description': description,
|
||||
'domain_id': domain_id,
|
||||
'enabled': en,
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'project': post_body})
|
||||
resp, body = self.post('projects', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['project'])
|
||||
|
||||
def list_projects(self, params=None):
|
||||
url = "projects"
|
||||
if params:
|
||||
url += '?%s' % urllib.urlencode(params)
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['projects'])
|
||||
|
||||
def update_project(self, project_id, **kwargs):
|
||||
body = self.get_project(project_id)
|
||||
name = kwargs.get('name', body['name'])
|
||||
desc = kwargs.get('description', body['description'])
|
||||
en = kwargs.get('enabled', body['enabled'])
|
||||
domain_id = kwargs.get('domain_id', body['domain_id'])
|
||||
post_body = {
|
||||
'id': project_id,
|
||||
'name': name,
|
||||
'description': desc,
|
||||
'enabled': en,
|
||||
'domain_id': domain_id,
|
||||
}
|
||||
post_body = json.dumps({'project': post_body})
|
||||
resp, body = self.patch('projects/%s' % project_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['project'])
|
||||
|
||||
def get_project(self, project_id):
|
||||
"""GET a Project."""
|
||||
resp, body = self.get("projects/%s" % project_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['project'])
|
||||
|
||||
def delete_project(self, project_id):
|
||||
"""Delete a project."""
|
||||
resp, body = self.delete('projects/%s' % str(project_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_role(self, name):
|
||||
"""Create a Role."""
|
||||
post_body = {
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'role': post_body})
|
||||
resp, body = self.post('roles', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def get_role(self, role_id):
|
||||
"""GET a Role."""
|
||||
resp, body = self.get('roles/%s' % str(role_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def list_roles(self):
|
||||
"""Get the list of Roles."""
|
||||
resp, body = self.get("roles")
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def update_role(self, name, role_id):
|
||||
"""Create a Role."""
|
||||
post_body = {
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'role': post_body})
|
||||
resp, body = self.patch('roles/%s' % str(role_id), post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def delete_role(self, role_id):
|
||||
"""Delete a role."""
|
||||
resp, body = self.delete('roles/%s' % str(role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_user_role(self, project_id, user_id, role_id):
|
||||
"""Add roles to a user on a project."""
|
||||
resp, body = self.put('projects/%s/users/%s/roles/%s' %
|
||||
(project_id, user_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_domain(self, name, **kwargs):
|
||||
"""Creates a domain."""
|
||||
description = kwargs.get('description', None)
|
||||
en = kwargs.get('enabled', True)
|
||||
post_body = {
|
||||
'description': description,
|
||||
'enabled': en,
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'domain': post_body})
|
||||
resp, body = self.post('domains', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['domain'])
|
||||
|
||||
def delete_domain(self, domain_id):
|
||||
"""Delete a domain."""
|
||||
resp, body = self.delete('domains/%s' % str(domain_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_domains(self):
|
||||
"""List Domains."""
|
||||
resp, body = self.get('domains')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['domains'])
|
||||
|
||||
def update_domain(self, domain_id, **kwargs):
|
||||
"""Updates a domain."""
|
||||
body = self.get_domain(domain_id)
|
||||
description = kwargs.get('description', body['description'])
|
||||
en = kwargs.get('enabled', body['enabled'])
|
||||
name = kwargs.get('name', body['name'])
|
||||
post_body = {
|
||||
'description': description,
|
||||
'enabled': en,
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'domain': post_body})
|
||||
resp, body = self.patch('domains/%s' % domain_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['domain'])
|
||||
|
||||
def get_domain(self, domain_id):
|
||||
"""Get Domain details."""
|
||||
resp, body = self.get('domains/%s' % domain_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['domain'])
|
||||
|
||||
def get_token(self, resp_token):
|
||||
"""Get token details."""
|
||||
headers = {'X-Subject-Token': resp_token}
|
||||
resp, body = self.get("auth/tokens", headers=headers)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['token'])
|
||||
|
||||
def delete_token(self, resp_token):
|
||||
"""Deletes token."""
|
||||
headers = {'X-Subject-Token': resp_token}
|
||||
resp, body = self.delete("auth/tokens", headers=headers)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_group(self, name, **kwargs):
|
||||
"""Creates a group."""
|
||||
description = kwargs.get('description', None)
|
||||
domain_id = kwargs.get('domain_id', 'default')
|
||||
project_id = kwargs.get('project_id', None)
|
||||
post_body = {
|
||||
'description': description,
|
||||
'domain_id': domain_id,
|
||||
'project_id': project_id,
|
||||
'name': name
|
||||
}
|
||||
post_body = json.dumps({'group': post_body})
|
||||
resp, body = self.post('groups', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['group'])
|
||||
|
||||
def get_group(self, group_id):
|
||||
"""Get group details."""
|
||||
resp, body = self.get('groups/%s' % group_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['group'])
|
||||
|
||||
def list_groups(self):
|
||||
"""Lists the groups."""
|
||||
resp, body = self.get('groups')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['groups'])
|
||||
|
||||
def update_group(self, group_id, **kwargs):
|
||||
"""Updates a group."""
|
||||
body = self.get_group(group_id)
|
||||
name = kwargs.get('name', body['name'])
|
||||
description = kwargs.get('description', body['description'])
|
||||
post_body = {
|
||||
'name': name,
|
||||
'description': description
|
||||
}
|
||||
post_body = json.dumps({'group': post_body})
|
||||
resp, body = self.patch('groups/%s' % group_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['group'])
|
||||
|
||||
def delete_group(self, group_id):
|
||||
"""Delete a group."""
|
||||
resp, body = self.delete('groups/%s' % str(group_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def add_group_user(self, group_id, user_id):
|
||||
"""Add user into group."""
|
||||
resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),
|
||||
None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_group_users(self, group_id):
|
||||
"""List users in group."""
|
||||
resp, body = self.get('groups/%s/users' % group_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['users'])
|
||||
|
||||
def list_user_groups(self, user_id):
|
||||
"""Lists groups which a user belongs to."""
|
||||
resp, body = self.get('users/%s/groups' % user_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['groups'])
|
||||
|
||||
def delete_group_user(self, group_id, user_id):
|
||||
"""Delete user in group."""
|
||||
resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_user_role_on_project(self, project_id, user_id, role_id):
|
||||
"""Add roles to a user on a project."""
|
||||
resp, body = self.put('projects/%s/users/%s/roles/%s' %
|
||||
(project_id, user_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_user_role_on_domain(self, domain_id, user_id, role_id):
|
||||
"""Add roles to a user on a domain."""
|
||||
resp, body = self.put('domains/%s/users/%s/roles/%s' %
|
||||
(domain_id, user_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_user_roles_on_project(self, project_id, user_id):
|
||||
"""list roles of a user on a project."""
|
||||
resp, body = self.get('projects/%s/users/%s/roles' %
|
||||
(project_id, user_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def list_user_roles_on_domain(self, domain_id, user_id):
|
||||
"""list roles of a user on a domain."""
|
||||
resp, body = self.get('domains/%s/users/%s/roles' %
|
||||
(domain_id, user_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def revoke_role_from_user_on_project(self, project_id, user_id, role_id):
|
||||
"""Delete role of a user on a project."""
|
||||
resp, body = self.delete('projects/%s/users/%s/roles/%s' %
|
||||
(project_id, user_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def revoke_role_from_user_on_domain(self, domain_id, user_id, role_id):
|
||||
"""Delete role of a user on a domain."""
|
||||
resp, body = self.delete('domains/%s/users/%s/roles/%s' %
|
||||
(domain_id, user_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_group_role_on_project(self, project_id, group_id, role_id):
|
||||
"""Add roles to a user on a project."""
|
||||
resp, body = self.put('projects/%s/groups/%s/roles/%s' %
|
||||
(project_id, group_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def assign_group_role_on_domain(self, domain_id, group_id, role_id):
|
||||
"""Add roles to a user on a domain."""
|
||||
resp, body = self.put('domains/%s/groups/%s/roles/%s' %
|
||||
(domain_id, group_id, role_id), None)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_group_roles_on_project(self, project_id, group_id):
|
||||
"""list roles of a user on a project."""
|
||||
resp, body = self.get('projects/%s/groups/%s/roles' %
|
||||
(project_id, group_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def list_group_roles_on_domain(self, domain_id, group_id):
|
||||
"""list roles of a user on a domain."""
|
||||
resp, body = self.get('domains/%s/groups/%s/roles' %
|
||||
(domain_id, group_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def revoke_role_from_group_on_project(self, project_id, group_id, role_id):
|
||||
"""Delete role of a user on a project."""
|
||||
resp, body = self.delete('projects/%s/groups/%s/roles/%s' %
|
||||
(project_id, group_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def revoke_role_from_group_on_domain(self, domain_id, group_id, role_id):
|
||||
"""Delete role of a user on a domain."""
|
||||
resp, body = self.delete('domains/%s/groups/%s/roles/%s' %
|
||||
(domain_id, group_id, role_id))
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_trust(self, trustor_user_id, trustee_user_id, project_id,
|
||||
role_names, impersonation, expires_at):
|
||||
"""Creates a trust."""
|
||||
roles = [{'name': n} for n in role_names]
|
||||
post_body = {
|
||||
'trustor_user_id': trustor_user_id,
|
||||
'trustee_user_id': trustee_user_id,
|
||||
'project_id': project_id,
|
||||
'impersonation': impersonation,
|
||||
'roles': roles,
|
||||
'expires_at': expires_at
|
||||
}
|
||||
post_body = json.dumps({'trust': post_body})
|
||||
resp, body = self.post('OS-TRUST/trusts', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['trust'])
|
||||
|
||||
def delete_trust(self, trust_id):
|
||||
"""Deletes a trust."""
|
||||
resp, body = self.delete("OS-TRUST/trusts/%s" % trust_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def get_trusts(self, trustor_user_id=None, trustee_user_id=None):
|
||||
"""GET trusts."""
|
||||
if trustor_user_id:
|
||||
resp, body = self.get("OS-TRUST/trusts?trustor_user_id=%s"
|
||||
% trustor_user_id)
|
||||
elif trustee_user_id:
|
||||
resp, body = self.get("OS-TRUST/trusts?trustee_user_id=%s"
|
||||
% trustee_user_id)
|
||||
else:
|
||||
resp, body = self.get("OS-TRUST/trusts")
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['trusts'])
|
||||
|
||||
def get_trust(self, trust_id):
|
||||
"""GET trust."""
|
||||
resp, body = self.get("OS-TRUST/trusts/%s" % trust_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['trust'])
|
||||
|
||||
def get_trust_roles(self, trust_id):
|
||||
"""GET roles delegated by a trust."""
|
||||
resp, body = self.get("OS-TRUST/trusts/%s/roles" % trust_id)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['roles'])
|
||||
|
||||
def get_trust_role(self, trust_id, role_id):
|
||||
"""GET role delegated by a trust."""
|
||||
resp, body = self.get("OS-TRUST/trusts/%s/roles/%s"
|
||||
% (trust_id, role_id))
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['role'])
|
||||
|
||||
def check_trust_role(self, trust_id, role_id):
|
||||
"""HEAD Check if role is delegated by a trust."""
|
||||
resp, body = self.head("OS-TRUST/trusts/%s/roles/%s"
|
||||
% (trust_id, role_id))
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
@ -0,0 +1,69 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
|
||||
|
||||
class PolicyClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def create_policy(self, blob, type):
|
||||
"""Creates a Policy."""
|
||||
post_body = {
|
||||
"blob": blob,
|
||||
"type": type
|
||||
}
|
||||
post_body = json.dumps({'policy': post_body})
|
||||
resp, body = self.post('policies', post_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['policy'])
|
||||
|
||||
def list_policies(self):
|
||||
"""Lists the policies."""
|
||||
resp, body = self.get('policies')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['policies'])
|
||||
|
||||
def get_policy(self, policy_id):
|
||||
"""Lists out the given policy."""
|
||||
url = 'policies/%s' % policy_id
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['policy'])
|
||||
|
||||
def update_policy(self, policy_id, **kwargs):
|
||||
"""Updates a policy."""
|
||||
type = kwargs.get('type')
|
||||
post_body = {
|
||||
'type': type
|
||||
}
|
||||
post_body = json.dumps({'policy': post_body})
|
||||
url = 'policies/%s' % policy_id
|
||||
resp, body = self.patch(url, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['policy'])
|
||||
|
||||
def delete_policy(self, policy_id):
|
||||
"""Deletes the policy."""
|
||||
url = "policies/%s" % policy_id
|
||||
resp, body = self.delete(url)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
@ -0,0 +1,77 @@
|
||||
# Copyright 2014 Hewlett-Packard Development Company, L.P
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import urllib
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
|
||||
|
||||
class RegionClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def create_region(self, description, **kwargs):
|
||||
"""Create region."""
|
||||
req_body = {
|
||||
'description': description,
|
||||
}
|
||||
if kwargs.get('parent_region_id'):
|
||||
req_body['parent_region_id'] = kwargs.get('parent_region_id')
|
||||
req_body = json.dumps({'region': req_body})
|
||||
if kwargs.get('unique_region_id'):
|
||||
resp, body = self.put(
|
||||
'regions/%s' % kwargs.get('unique_region_id'), req_body)
|
||||
else:
|
||||
resp, body = self.post('regions', req_body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['region'])
|
||||
|
||||
def update_region(self, region_id, **kwargs):
|
||||
"""Updates a region."""
|
||||
post_body = {}
|
||||
if 'description' in kwargs:
|
||||
post_body['description'] = kwargs.get('description')
|
||||
if 'parent_region_id' in kwargs:
|
||||
post_body['parent_region_id'] = kwargs.get('parent_region_id')
|
||||
post_body = json.dumps({'region': post_body})
|
||||
resp, body = self.patch('regions/%s' % region_id, post_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['region'])
|
||||
|
||||
def get_region(self, region_id):
|
||||
"""Get region."""
|
||||
url = 'regions/%s' % region_id
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['region'])
|
||||
|
||||
def list_regions(self, params=None):
|
||||
"""List regions."""
|
||||
url = 'regions'
|
||||
if params:
|
||||
url += '?%s' % urllib.urlencode(params)
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['regions'])
|
||||
|
||||
def delete_region(self, region_id):
|
||||
"""Delete region."""
|
||||
resp, body = self.delete('regions/%s' % region_id)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
@ -0,0 +1,73 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
|
||||
|
||||
class ServiceClientJSON(service_client.ServiceClient):
|
||||
api_version = "v3"
|
||||
|
||||
def update_service(self, service_id, **kwargs):
|
||||
"""Updates a service."""
|
||||
body = self.get_service(service_id)
|
||||
name = kwargs.get('name', body['name'])
|
||||
type = kwargs.get('type', body['type'])
|
||||
desc = kwargs.get('description', body['description'])
|
||||
patch_body = {
|
||||
'description': desc,
|
||||
'type': type,
|
||||
'name': name
|
||||
}
|
||||
patch_body = json.dumps({'service': patch_body})
|
||||
resp, body = self.patch('services/%s' % service_id, patch_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['service'])
|
||||
|
||||
def get_service(self, service_id):
|
||||
"""Get Service."""
|
||||
url = 'services/%s' % service_id
|
||||
resp, body = self.get(url)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['service'])
|
||||
|
||||
def create_service(self, serv_type, name=None, description=None,
|
||||
enabled=True):
|
||||
body_dict = {
|
||||
'name': name,
|
||||
'type': serv_type,
|
||||
'enabled': enabled,
|
||||
'description': description,
|
||||
}
|
||||
body = json.dumps({'service': body_dict})
|
||||
resp, body = self.post("services", body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body["service"])
|
||||
|
||||
def delete_service(self, serv_id):
|
||||
url = "services/" + serv_id
|
||||
resp, body = self.delete(url)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_services(self):
|
||||
resp, body = self.get('services')
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBodyList(resp, body['services'])
|
137
neutron/tests/tempest/services/identity/v3/json/token_client.py
Normal file
137
neutron/tests/tempest/services/identity/v3/json/token_client.py
Normal file
@ -0,0 +1,137 @@
|
||||
# Copyright 2015 NEC Corporation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
from tempest_lib.common import rest_client
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
from neutron.tests.tempest import exceptions
|
||||
|
||||
|
||||
class V3TokenClientJSON(rest_client.RestClient):
|
||||
|
||||
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
|
||||
ca_certs=None, trace_requests=None):
|
||||
dscv = disable_ssl_certificate_validation
|
||||
super(V3TokenClientJSON, self).__init__(
|
||||
None, None, None, disable_ssl_certificate_validation=dscv,
|
||||
ca_certs=ca_certs, trace_requests=trace_requests)
|
||||
if not auth_url:
|
||||
raise exceptions.InvalidConfiguration('you must specify a v3 uri '
|
||||
'if using the v3 identity '
|
||||
'api')
|
||||
if 'auth/tokens' not in auth_url:
|
||||
auth_url = auth_url.rstrip('/') + '/auth/tokens'
|
||||
|
||||
self.auth_url = auth_url
|
||||
|
||||
def auth(self, user=None, password=None, project=None, user_type='id',
|
||||
user_domain=None, project_domain=None, token=None):
|
||||
"""
|
||||
:param user: user id or name, as specified in user_type
|
||||
:param user_domain: the user domain
|
||||
:param project_domain: the project domain
|
||||
:param token: a token to re-scope.
|
||||
|
||||
Accepts different combinations of credentials. Restrictions:
|
||||
- project and domain are only name (no id)
|
||||
Sample sample valid combinations:
|
||||
- token
|
||||
- token, project, project_domain
|
||||
- user_id, password
|
||||
- username, password, user_domain
|
||||
- username, password, project, user_domain, project_domain
|
||||
Validation is left to the server side.
|
||||
"""
|
||||
creds = {
|
||||
'auth': {
|
||||
'identity': {
|
||||
'methods': [],
|
||||
}
|
||||
}
|
||||
}
|
||||
id_obj = creds['auth']['identity']
|
||||
if token:
|
||||
id_obj['methods'].append('token')
|
||||
id_obj['token'] = {
|
||||
'id': token
|
||||
}
|
||||
if user and password:
|
||||
id_obj['methods'].append('password')
|
||||
id_obj['password'] = {
|
||||
'user': {
|
||||
'password': password,
|
||||
}
|
||||
}
|
||||
if user_type == 'id':
|
||||
id_obj['password']['user']['id'] = user
|
||||
else:
|
||||
id_obj['password']['user']['name'] = user
|
||||
if user_domain is not None:
|
||||
_domain = dict(name=user_domain)
|
||||
id_obj['password']['user']['domain'] = _domain
|
||||
if project is not None:
|
||||
_domain = dict(name=project_domain)
|
||||
_project = dict(name=project, domain=_domain)
|
||||
scope = dict(project=_project)
|
||||
creds['auth']['scope'] = scope
|
||||
|
||||
body = json.dumps(creds)
|
||||
resp, body = self.post(self.auth_url, body=body)
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def request(self, method, url, extra_headers=False, headers=None,
|
||||
body=None):
|
||||
"""A simple HTTP request interface."""
|
||||
if headers is None:
|
||||
# Always accept 'json', for xml token client too.
|
||||
# Because XML response is not easily
|
||||
# converted to the corresponding JSON one
|
||||
headers = self.get_headers(accept_type="json")
|
||||
elif extra_headers:
|
||||
try:
|
||||
headers.update(self.get_headers(accept_type="json"))
|
||||
except (ValueError, TypeError):
|
||||
headers = self.get_headers(accept_type="json")
|
||||
|
||||
resp, resp_body = self.raw_request(url, method,
|
||||
headers=headers, body=body)
|
||||
self._log_request(method, url, resp)
|
||||
|
||||
if resp.status in [401, 403]:
|
||||
resp_body = json.loads(resp_body)
|
||||
raise lib_exc.Unauthorized(resp_body['error']['message'])
|
||||
elif resp.status not in [200, 201, 204]:
|
||||
raise exceptions.IdentityError(
|
||||
'Unexpected status code {0}'.format(resp.status))
|
||||
|
||||
return resp, json.loads(resp_body)
|
||||
|
||||
def get_token(self, user, password, project=None, project_domain='Default',
|
||||
user_domain='Default', auth_data=False):
|
||||
"""
|
||||
:param user: username
|
||||
Returns (token id, token data) for supplied credentials
|
||||
"""
|
||||
body = self.auth(user, password, project, user_type='name',
|
||||
user_domain=user_domain,
|
||||
project_domain=project_domain)
|
||||
|
||||
token = body.response.get('x-subject-token')
|
||||
if auth_data:
|
||||
return token, body['token']
|
||||
else:
|
||||
return token
|
0
neutron/tests/tempest/services/network/__init__.py
Normal file
0
neutron/tests/tempest/services/network/__init__.py
Normal file
0
neutron/tests/tempest/services/network/json/__init__.py
Normal file
0
neutron/tests/tempest/services/network/json/__init__.py
Normal file
581
neutron/tests/tempest/services/network/json/network_client.py
Normal file
581
neutron/tests/tempest/services/network/json/network_client.py
Normal file
@ -0,0 +1,581 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import time
|
||||
import urllib
|
||||
|
||||
from tempest_lib import exceptions as lib_exc
|
||||
|
||||
from neutron.tests.tempest.common import service_client
|
||||
from neutron.tests.tempest.common.utils import misc
|
||||
from neutron.tests.tempest import exceptions
|
||||
|
||||
|
||||
class NetworkClientJSON(service_client.ServiceClient):
|
||||
|
||||
"""
|
||||
Tempest REST client for Neutron. Uses v2 of the Neutron API, since the
|
||||
V1 API has been removed from the code base.
|
||||
|
||||
Implements create, delete, update, list and show for the basic Neutron
|
||||
abstractions (networks, sub-networks, routers, ports and floating IP):
|
||||
|
||||
Implements add/remove interface to router using subnet ID / port ID
|
||||
|
||||
It also implements list, show, update and reset for OpenStack Networking
|
||||
quotas
|
||||
"""
|
||||
|
||||
version = '2.0'
|
||||
uri_prefix = "v2.0"
|
||||
|
||||
def get_uri(self, plural_name):
|
||||
# get service prefix from resource name
|
||||
|
||||
# The following list represents resource names that do not require
|
||||
# changing underscore to a hyphen
|
||||
hyphen_exceptions = ["health_monitors", "firewall_rules",
|
||||
"firewall_policies"]
|
||||
# the following map is used to construct proper URI
|
||||
# for the given neutron resource
|
||||
service_resource_prefix_map = {
|
||||
'networks': '',
|
||||
'subnets': '',
|
||||
'ports': '',
|
||||
'pools': 'lb',
|
||||
'vips': 'lb',
|
||||
'health_monitors': 'lb',
|
||||
'members': 'lb',
|
||||
'ipsecpolicies': 'vpn',
|
||||
'vpnservices': 'vpn',
|
||||
'ikepolicies': 'vpn',
|
||||
'ipsec-site-connections': 'vpn',
|
||||
'metering_labels': 'metering',
|
||||
'metering_label_rules': 'metering',
|
||||
'firewall_rules': 'fw',
|
||||
'firewall_policies': 'fw',
|
||||
'firewalls': 'fw'
|
||||
}
|
||||
service_prefix = service_resource_prefix_map.get(
|
||||
plural_name)
|
||||
if plural_name not in hyphen_exceptions:
|
||||
plural_name = plural_name.replace("_", "-")
|
||||
if service_prefix:
|
||||
uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
|
||||
plural_name)
|
||||
else:
|
||||
uri = '%s/%s' % (self.uri_prefix, plural_name)
|
||||
return uri
|
||||
|
||||
def pluralize(self, resource_name):
|
||||
# get plural from map or just add 's'
|
||||
|
||||
# map from resource name to a plural name
|
||||
# needed only for those which can't be constructed as name + 's'
|
||||
resource_plural_map = {
|
||||
'security_groups': 'security_groups',
|
||||
'security_group_rules': 'security_group_rules',
|
||||
'ipsecpolicy': 'ipsecpolicies',
|
||||
'ikepolicy': 'ikepolicies',
|
||||
'ipsec_site_connection': 'ipsec-site-connections',
|
||||
'quotas': 'quotas',
|
||||
'firewall_policy': 'firewall_policies'
|
||||
}
|
||||
return resource_plural_map.get(resource_name, resource_name + 's')
|
||||
|
||||
def _lister(self, plural_name):
|
||||
def _list(**filters):
|
||||
uri = self.get_uri(plural_name)
|
||||
if filters:
|
||||
uri += '?' + urllib.urlencode(filters, doseq=1)
|
||||
resp, body = self.get(uri)
|
||||
result = {plural_name: self.deserialize_list(body)}
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, result)
|
||||
|
||||
return _list
|
||||
|
||||
def _deleter(self, resource_name):
|
||||
def _delete(resource_id):
|
||||
plural = self.pluralize(resource_name)
|
||||
uri = '%s/%s' % (self.get_uri(plural), resource_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
return _delete
|
||||
|
||||
def _shower(self, resource_name):
|
||||
def _show(resource_id, **fields):
|
||||
# fields is a dict which key is 'fields' and value is a
|
||||
# list of field's name. An example:
|
||||
# {'fields': ['id', 'name']}
|
||||
plural = self.pluralize(resource_name)
|
||||
uri = '%s/%s' % (self.get_uri(plural), resource_id)
|
||||
if fields:
|
||||
uri += '?' + urllib.urlencode(fields, doseq=1)
|
||||
resp, body = self.get(uri)
|
||||
body = self.deserialize_single(body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
return _show
|
||||
|
||||
def _creater(self, resource_name):
|
||||
def _create(**kwargs):
|
||||
plural = self.pluralize(resource_name)
|
||||
uri = self.get_uri(plural)
|
||||
post_data = self.serialize({resource_name: kwargs})
|
||||
resp, body = self.post(uri, post_data)
|
||||
body = self.deserialize_single(body)
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
return _create
|
||||
|
||||
def _updater(self, resource_name):
|
||||
def _update(res_id, **kwargs):
|
||||
plural = self.pluralize(resource_name)
|
||||
uri = '%s/%s' % (self.get_uri(plural), res_id)
|
||||
post_data = self.serialize({resource_name: kwargs})
|
||||
resp, body = self.put(uri, post_data)
|
||||
body = self.deserialize_single(body)
|
||||
self.expected_success(200, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
return _update
|
||||
|
||||
def __getattr__(self, name):
|
||||
method_prefixes = ["list_", "delete_", "show_", "create_", "update_"]
|
||||
method_functors = [self._lister,
|
||||
self._deleter,
|
||||
self._shower,
|
||||
self._creater,
|
||||
self._updater]
|
||||
for index, prefix in enumerate(method_prefixes):
|
||||
prefix_len = len(prefix)
|
||||
if name[:prefix_len] == prefix:
|
||||
return method_functors[index](name[prefix_len:])
|
||||
raise AttributeError(name)
|
||||
|
||||
# Common methods that are hard to automate
|
||||
def create_bulk_network(self, names):
|
||||
network_list = [{'name': name} for name in names]
|
||||
post_data = {'networks': network_list}
|
||||
body = self.serialize_list(post_data, "networks", "network")
|
||||
uri = self.get_uri("networks")
|
||||
resp, body = self.post(uri, body)
|
||||
body = {'networks': self.deserialize_list(body)}
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_bulk_subnet(self, subnet_list):
|
||||
post_data = {'subnets': subnet_list}
|
||||
body = self.serialize_list(post_data, 'subnets', 'subnet')
|
||||
uri = self.get_uri('subnets')
|
||||
resp, body = self.post(uri, body)
|
||||
body = {'subnets': self.deserialize_list(body)}
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_bulk_port(self, port_list):
|
||||
post_data = {'ports': port_list}
|
||||
body = self.serialize_list(post_data, 'ports', 'port')
|
||||
uri = self.get_uri('ports')
|
||||
resp, body = self.post(uri, body)
|
||||
body = {'ports': self.deserialize_list(body)}
|
||||
self.expected_success(201, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def wait_for_resource_deletion(self, resource_type, id):
|
||||
"""Waits for a resource to be deleted."""
|
||||
start_time = int(time.time())
|
||||
while True:
|
||||
if self.is_resource_deleted(resource_type, id):
|
||||
return
|
||||
if int(time.time()) - start_time >= self.build_timeout:
|
||||
raise exceptions.TimeoutException
|
||||
time.sleep(self.build_interval)
|
||||
|
||||
def is_resource_deleted(self, resource_type, id):
|
||||
method = 'show_' + resource_type
|
||||
try:
|
||||
getattr(self, method)(id)
|
||||
except AttributeError:
|
||||
raise Exception("Unknown resource type %s " % resource_type)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
def wait_for_resource_status(self, fetch, status, interval=None,
|
||||
timeout=None):
|
||||
"""
|
||||
@summary: Waits for a network resource to reach a status
|
||||
@param fetch: the callable to be used to query the resource status
|
||||
@type fecth: callable that takes no parameters and returns the resource
|
||||
@param status: the status that the resource has to reach
|
||||
@type status: String
|
||||
@param interval: the number of seconds to wait between each status
|
||||
query
|
||||
@type interval: Integer
|
||||
@param timeout: the maximum number of seconds to wait for the resource
|
||||
to reach the desired status
|
||||
@type timeout: Integer
|
||||
"""
|
||||
if not interval:
|
||||
interval = self.build_interval
|
||||
if not timeout:
|
||||
timeout = self.build_timeout
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time <= timeout:
|
||||
resource = fetch()
|
||||
if resource['status'] == status:
|
||||
return
|
||||
time.sleep(interval)
|
||||
|
||||
# At this point, the wait has timed out
|
||||
message = 'Resource %s' % (str(resource))
|
||||
message += ' failed to reach status %s' % status
|
||||
message += ' (current: %s)' % resource['status']
|
||||
message += ' within the required time %s' % timeout
|
||||
caller = misc.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
||||
|
||||
def deserialize_single(self, body):
|
||||
return json.loads(body)
|
||||
|
||||
def deserialize_list(self, body):
|
||||
res = json.loads(body)
|
||||
# expecting response in form
|
||||
# {'resources': [ res1, res2] } => when pagination disabled
|
||||
# {'resources': [..], 'resources_links': {}} => if pagination enabled
|
||||
for k in res.keys():
|
||||
if k.endswith("_links"):
|
||||
continue
|
||||
return res[k]
|
||||
|
||||
def serialize(self, data):
|
||||
return json.dumps(data)
|
||||
|
||||
def serialize_list(self, data, root=None, item=None):
|
||||
return self.serialize(data)
|
||||
|
||||
def update_quotas(self, tenant_id, **kwargs):
|
||||
put_body = {'quota': kwargs}
|
||||
body = json.dumps(put_body)
|
||||
uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body['quota'])
|
||||
|
||||
def reset_quotas(self, tenant_id):
|
||||
uri = '%s/quotas/%s' % (self.uri_prefix, tenant_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_router(self, name, admin_state_up=True, **kwargs):
|
||||
post_body = {'router': kwargs}
|
||||
post_body['router']['name'] = name
|
||||
post_body['router']['admin_state_up'] = admin_state_up
|
||||
body = json.dumps(post_body)
|
||||
uri = '%s/routers' % (self.uri_prefix)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def _update_router(self, router_id, set_enable_snat, **kwargs):
|
||||
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
update_body = {}
|
||||
update_body['name'] = kwargs.get('name', body['router']['name'])
|
||||
update_body['admin_state_up'] = kwargs.get(
|
||||
'admin_state_up', body['router']['admin_state_up'])
|
||||
cur_gw_info = body['router']['external_gateway_info']
|
||||
if cur_gw_info:
|
||||
# TODO(kevinbenton): setting the external gateway info is not
|
||||
# allowed for a regular tenant. If the ability to update is also
|
||||
# merged, a test case for this will need to be added similar to
|
||||
# the SNAT case.
|
||||
cur_gw_info.pop('external_fixed_ips', None)
|
||||
if not set_enable_snat:
|
||||
cur_gw_info.pop('enable_snat', None)
|
||||
update_body['external_gateway_info'] = kwargs.get(
|
||||
'external_gateway_info', body['router']['external_gateway_info'])
|
||||
if 'distributed' in kwargs:
|
||||
update_body['distributed'] = kwargs['distributed']
|
||||
update_body = dict(router=update_body)
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_router(self, router_id, **kwargs):
|
||||
"""Update a router leaving enable_snat to its default value."""
|
||||
# If external_gateway_info contains enable_snat the request will fail
|
||||
# with 404 unless executed with admin client, and therefore we instruct
|
||||
# _update_router to not set this attribute
|
||||
# NOTE(salv-orlando): The above applies as long as Neutron's default
|
||||
# policy is to restrict enable_snat usage to admins only.
|
||||
return self._update_router(router_id, set_enable_snat=False, **kwargs)
|
||||
|
||||
def update_router_with_snat_gw_info(self, router_id, **kwargs):
|
||||
"""Update a router passing also the enable_snat attribute.
|
||||
|
||||
This method must be execute with admin credentials, otherwise the API
|
||||
call will return a 404 error.
|
||||
"""
|
||||
return self._update_router(router_id, set_enable_snat=True, **kwargs)
|
||||
|
||||
def add_router_interface_with_subnet_id(self, router_id, subnet_id):
|
||||
uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
|
||||
router_id)
|
||||
update_body = {"subnet_id": subnet_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def add_router_interface_with_port_id(self, router_id, port_id):
|
||||
uri = '%s/routers/%s/add_router_interface' % (self.uri_prefix,
|
||||
router_id)
|
||||
update_body = {"port_id": port_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_router_interface_with_subnet_id(self, router_id, subnet_id):
|
||||
uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
|
||||
router_id)
|
||||
update_body = {"subnet_id": subnet_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_router_interface_with_port_id(self, router_id, port_id):
|
||||
uri = '%s/routers/%s/remove_router_interface' % (self.uri_prefix,
|
||||
router_id)
|
||||
update_body = {"port_id": port_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def associate_health_monitor_with_pool(self, health_monitor_id,
|
||||
pool_id):
|
||||
post_body = {
|
||||
"health_monitor": {
|
||||
"id": health_monitor_id,
|
||||
}
|
||||
}
|
||||
body = json.dumps(post_body)
|
||||
uri = '%s/lb/pools/%s/health_monitors' % (self.uri_prefix,
|
||||
pool_id)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def disassociate_health_monitor_with_pool(self, health_monitor_id,
|
||||
pool_id):
|
||||
uri = '%s/lb/pools/%s/health_monitors/%s' % (self.uri_prefix, pool_id,
|
||||
health_monitor_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_router_interfaces(self, uuid):
|
||||
uri = '%s/ports?device_id=%s' % (self.uri_prefix, uuid)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_agent(self, agent_id, agent_info):
|
||||
"""
|
||||
:param agent_info: Agent update information.
|
||||
E.g {"admin_state_up": True}
|
||||
"""
|
||||
uri = '%s/agents/%s' % (self.uri_prefix, agent_id)
|
||||
agent = {"agent": agent_info}
|
||||
body = json.dumps(agent)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_pools_hosted_by_one_lbaas_agent(self, agent_id):
|
||||
uri = '%s/agents/%s/loadbalancer-pools' % (self.uri_prefix, agent_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def show_lbaas_agent_hosting_pool(self, pool_id):
|
||||
uri = ('%s/lb/pools/%s/loadbalancer-agent' %
|
||||
(self.uri_prefix, pool_id))
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_routers_on_l3_agent(self, agent_id):
|
||||
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_l3_agents_hosting_router(self, router_id):
|
||||
uri = '%s/routers/%s/l3-agents' % (self.uri_prefix, router_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def add_router_to_l3_agent(self, agent_id, router_id):
|
||||
uri = '%s/agents/%s/l3-routers' % (self.uri_prefix, agent_id)
|
||||
post_body = {"router_id": router_id}
|
||||
body = json.dumps(post_body)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_router_from_l3_agent(self, agent_id, router_id):
|
||||
uri = '%s/agents/%s/l3-routers/%s' % (
|
||||
self.uri_prefix, agent_id, router_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_dhcp_agent_hosting_network(self, network_id):
|
||||
uri = '%s/networks/%s/dhcp-agents' % (self.uri_prefix, network_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_networks_hosted_by_one_dhcp_agent(self, agent_id):
|
||||
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_network_from_dhcp_agent(self, agent_id, network_id):
|
||||
uri = '%s/agents/%s/dhcp-networks/%s' % (self.uri_prefix, agent_id,
|
||||
network_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def create_ikepolicy(self, name, **kwargs):
|
||||
post_body = {
|
||||
"ikepolicy": {
|
||||
"name": name,
|
||||
}
|
||||
}
|
||||
for key, val in kwargs.items():
|
||||
post_body['ikepolicy'][key] = val
|
||||
body = json.dumps(post_body)
|
||||
uri = '%s/vpn/ikepolicies' % (self.uri_prefix)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def update_extra_routes(self, router_id, nexthop, destination):
|
||||
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
|
||||
put_body = {
|
||||
'router': {
|
||||
'routes': [{'nexthop': nexthop,
|
||||
"destination": destination}]
|
||||
}
|
||||
}
|
||||
body = json.dumps(put_body)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def delete_extra_routes(self, router_id):
|
||||
uri = '%s/routers/%s' % (self.uri_prefix, router_id)
|
||||
null_routes = None
|
||||
put_body = {
|
||||
'router': {
|
||||
'routes': null_routes
|
||||
}
|
||||
}
|
||||
body = json.dumps(put_body)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def list_lb_pool_stats(self, pool_id):
|
||||
uri = '%s/lb/pools/%s/stats' % (self.uri_prefix, pool_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def add_dhcp_agent_to_network(self, agent_id, network_id):
|
||||
post_body = {'network_id': network_id}
|
||||
body = json.dumps(post_body)
|
||||
uri = '%s/agents/%s/dhcp-networks' % (self.uri_prefix, agent_id)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def insert_firewall_rule_in_policy(self, firewall_policy_id,
|
||||
firewall_rule_id, insert_after="",
|
||||
insert_before=""):
|
||||
uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
|
||||
firewall_policy_id)
|
||||
body = {
|
||||
"firewall_rule_id": firewall_rule_id,
|
||||
"insert_after": insert_after,
|
||||
"insert_before": insert_before
|
||||
}
|
||||
body = json.dumps(body)
|
||||
resp, body = self.put(uri, body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
||||
|
||||
def remove_firewall_rule_from_policy(self, firewall_policy_id,
|
||||
firewall_rule_id):
|
||||
uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
|
||||
firewall_policy_id)
|
||||
update_body = {"firewall_rule_id": firewall_rule_id}
|
||||
update_body = json.dumps(update_body)
|
||||
resp, body = self.put(uri, update_body)
|
||||
self.expected_success(200, resp.status)
|
||||
body = json.loads(body)
|
||||
return service_client.ResponseBody(resp, body)
|
189
neutron/tests/tempest/services/network/resources.py
Normal file
189
neutron/tests/tempest/services/network/resources.py
Normal file
@ -0,0 +1,189 @@
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
|
||||
class AttributeDict(dict):
|
||||
|
||||
"""
|
||||
Provide attribute access (dict.key) to dictionary values.
|
||||
"""
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Allow attribute access for all keys in the dict."""
|
||||
if name in self:
|
||||
return self[name]
|
||||
return super(AttributeDict, self).__getattribute__(name)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class DeletableResource(AttributeDict):
|
||||
|
||||
"""
|
||||
Support deletion of neutron resources (networks, subnets) via a
|
||||
delete() method, as is supported by keystone and nova resources.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.client = kwargs.pop('client', None)
|
||||
super(DeletableResource, self).__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return '<%s id="%s" name="%s">' % (self.__class__.__name__,
|
||||
self.id, self.name)
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def refresh(self):
|
||||
return
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.id)
|
||||
|
||||
def wait_for_status(self, status):
|
||||
if not hasattr(self, 'status'):
|
||||
return
|
||||
|
||||
def helper_get():
|
||||
self.refresh()
|
||||
return self
|
||||
|
||||
return self.client.wait_for_resource_status(helper_get, status)
|
||||
|
||||
|
||||
class DeletableNetwork(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_network(self.id)
|
||||
|
||||
|
||||
class DeletableSubnet(DeletableResource):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeletableSubnet, self).__init__(*args, **kwargs)
|
||||
self._router_ids = set()
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.client.update_subnet(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
return super(DeletableSubnet, self).update(**result['subnet'])
|
||||
|
||||
def add_to_router(self, router_id):
|
||||
self._router_ids.add(router_id)
|
||||
self.client.add_router_interface_with_subnet_id(router_id,
|
||||
subnet_id=self.id)
|
||||
|
||||
def delete(self):
|
||||
for router_id in self._router_ids.copy():
|
||||
self.client.remove_router_interface_with_subnet_id(
|
||||
router_id,
|
||||
subnet_id=self.id)
|
||||
self._router_ids.remove(router_id)
|
||||
self.client.delete_subnet(self.id)
|
||||
|
||||
|
||||
class DeletableRouter(DeletableResource):
|
||||
|
||||
def set_gateway(self, network_id):
|
||||
return self.update(external_gateway_info=dict(network_id=network_id))
|
||||
|
||||
def unset_gateway(self):
|
||||
return self.update(external_gateway_info=dict())
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.client.update_router(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
return super(DeletableRouter, self).update(**result['router'])
|
||||
|
||||
def delete(self):
|
||||
self.unset_gateway()
|
||||
self.client.delete_router(self.id)
|
||||
|
||||
|
||||
class DeletableFloatingIp(DeletableResource):
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
result = self.client.show_floatingip(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
super(DeletableFloatingIp, self).update(**result['floatingip'])
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.client.update_floatingip(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
super(DeletableFloatingIp, self).update(**result['floatingip'])
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s addr="%s">' % (self.__class__.__name__,
|
||||
self.floating_ip_address)
|
||||
|
||||
def __str__(self):
|
||||
return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
|
||||
self.id)
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_floatingip(self.id)
|
||||
|
||||
|
||||
class DeletablePort(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_port(self.id)
|
||||
|
||||
|
||||
class DeletableSecurityGroup(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_security_group(self.id)
|
||||
|
||||
|
||||
class DeletableSecurityGroupRule(DeletableResource):
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s id="%s">' % (self.__class__.__name__, self.id)
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_security_group_rule(self.id)
|
||||
|
||||
|
||||
class DeletablePool(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_pool(self.id)
|
||||
|
||||
|
||||
class DeletableMember(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_member(self.id)
|
||||
|
||||
|
||||
class DeletableVip(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_vip(self.id)
|
||||
|
||||
def refresh(self):
|
||||
result = self.client.show_vip(self.id)
|
||||
super(DeletableVip, self).update(**result['vip'])
|
674
neutron/tests/tempest/test.py
Normal file
674
neutron/tests/tempest/test.py
Normal file
@ -0,0 +1,674 @@
|
||||
# Copyright 2012 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import atexit
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
import uuid
|
||||
|
||||
import fixtures
|
||||
import six
|
||||
import testscenarios
|
||||
import testtools
|
||||
|
||||
from neutron.tests.api.contrib import clients
|
||||
from neutron.tests.tempest.common import credentials
|
||||
import neutron.tests.tempest.common.generator.valid_generator as valid
|
||||
from neutron.tests.tempest import config
|
||||
from neutron.tests.tempest import exceptions
|
||||
from oslo_utils import importutils
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
def attr(*args, **kwargs):
|
||||
"""A decorator which applies the testtools attr decorator
|
||||
|
||||
This decorator applies the testtools.testcase.attr if it is in the list of
|
||||
attributes to testtools we want to apply.
|
||||
"""
|
||||
|
||||
def decorator(f):
|
||||
if 'type' in kwargs and isinstance(kwargs['type'], str):
|
||||
f = testtools.testcase.attr(kwargs['type'])(f)
|
||||
if kwargs['type'] == 'smoke':
|
||||
f = testtools.testcase.attr('gate')(f)
|
||||
elif 'type' in kwargs and isinstance(kwargs['type'], list):
|
||||
for attr in kwargs['type']:
|
||||
f = testtools.testcase.attr(attr)(f)
|
||||
if attr == 'smoke':
|
||||
f = testtools.testcase.attr('gate')(f)
|
||||
return f
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def idempotent_id(id):
|
||||
"""Stub for metadata decorator"""
|
||||
if not isinstance(id, six.string_types):
|
||||
raise TypeError('Test idempotent_id must be string not %s'
|
||||
'' % type(id).__name__)
|
||||
uuid.UUID(id)
|
||||
|
||||
def decorator(f):
|
||||
f = testtools.testcase.attr('id-%s' % id)(f)
|
||||
if f.__doc__:
|
||||
f.__doc__ = 'Test idempotent id: %s\n%s' % (id, f.__doc__)
|
||||
else:
|
||||
f.__doc__ = 'Test idempotent id: %s' % id
|
||||
return f
|
||||
return decorator
|
||||
|
||||
|
||||
def get_service_list():
|
||||
service_list = {
|
||||
'compute': CONF.service_available.nova,
|
||||
'image': CONF.service_available.glance,
|
||||
'baremetal': CONF.service_available.ironic,
|
||||
'volume': CONF.service_available.cinder,
|
||||
'orchestration': CONF.service_available.heat,
|
||||
# NOTE(mtreinish) nova-network will provide networking functionality
|
||||
# if neutron isn't available, so always set to True.
|
||||
'network': True,
|
||||
'identity': True,
|
||||
'object_storage': CONF.service_available.swift,
|
||||
'dashboard': CONF.service_available.horizon,
|
||||
'telemetry': CONF.service_available.ceilometer,
|
||||
'data_processing': CONF.service_available.sahara
|
||||
}
|
||||
return service_list
|
||||
|
||||
|
||||
def services(*args, **kwargs):
|
||||
"""A decorator used to set an attr for each service used in a test case
|
||||
|
||||
This decorator applies a testtools attr for each service that gets
|
||||
exercised by a test case.
|
||||
"""
|
||||
def decorator(f):
|
||||
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
|
||||
'network', 'identity', 'object_storage', 'dashboard',
|
||||
'telemetry', 'data_processing']
|
||||
for service in args:
|
||||
if service not in services:
|
||||
raise exceptions.InvalidServiceTag('%s is not a valid '
|
||||
'service' % service)
|
||||
attr(type=list(args))(f)
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapper(self, *func_args, **func_kwargs):
|
||||
service_list = get_service_list()
|
||||
|
||||
for service in args:
|
||||
if not service_list[service]:
|
||||
msg = 'Skipped because the %s service is not available' % (
|
||||
service)
|
||||
raise testtools.TestCase.skipException(msg)
|
||||
return f(self, *func_args, **func_kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def stresstest(*args, **kwargs):
|
||||
"""Add stress test decorator
|
||||
|
||||
For all functions with this decorator a attr stress will be
|
||||
set automatically.
|
||||
|
||||
@param class_setup_per: allowed values are application, process, action
|
||||
``application``: once in the stress job lifetime
|
||||
``process``: once in the worker process lifetime
|
||||
``action``: on each action
|
||||
@param allow_inheritance: allows inheritance of this attribute
|
||||
"""
|
||||
def decorator(f):
|
||||
if 'class_setup_per' in kwargs:
|
||||
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
|
||||
else:
|
||||
setattr(f, "st_class_setup_per", 'process')
|
||||
if 'allow_inheritance' in kwargs:
|
||||
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
|
||||
else:
|
||||
setattr(f, "st_allow_inheritance", False)
|
||||
attr(type='stress')(f)
|
||||
return f
|
||||
return decorator
|
||||
|
||||
|
||||
def requires_ext(*args, **kwargs):
|
||||
"""A decorator to skip tests if an extension is not enabled
|
||||
|
||||
@param extension
|
||||
@param service
|
||||
"""
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*func_args, **func_kwargs):
|
||||
if not is_extension_enabled(kwargs['extension'],
|
||||
kwargs['service']):
|
||||
msg = "Skipped because %s extension: %s is not enabled" % (
|
||||
kwargs['service'], kwargs['extension'])
|
||||
raise testtools.TestCase.skipException(msg)
|
||||
return func(*func_args, **func_kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def is_extension_enabled(extension_name, service):
|
||||
"""A function that will check the list of enabled extensions from config
|
||||
|
||||
"""
|
||||
config_dict = {
|
||||
'compute': CONF.compute_feature_enabled.api_extensions,
|
||||
'volume': CONF.volume_feature_enabled.api_extensions,
|
||||
'network': CONF.network_feature_enabled.api_extensions,
|
||||
'object': CONF.object_storage_feature_enabled.discoverable_apis,
|
||||
}
|
||||
if len(config_dict[service]) == 0:
|
||||
return False
|
||||
if config_dict[service][0] == 'all':
|
||||
return True
|
||||
if extension_name in config_dict[service]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
at_exit_set = set()
|
||||
|
||||
|
||||
def validate_tearDownClass():
|
||||
if at_exit_set:
|
||||
LOG.error(
|
||||
"tearDownClass does not call the super's "
|
||||
"tearDownClass in these classes: \n"
|
||||
+ str(at_exit_set))
|
||||
|
||||
|
||||
atexit.register(validate_tearDownClass)
|
||||
|
||||
|
||||
class BaseTestCase(testtools.testcase.WithAttributes,
|
||||
testtools.TestCase):
|
||||
"""The test base class defines Tempest framework for class level fixtures.
|
||||
`setUpClass` and `tearDownClass` are defined here and cannot be overwritten
|
||||
by subclasses (enforced via hacking rule T105).
|
||||
|
||||
Set-up is split in a series of steps (setup stages), which can be
|
||||
overwritten by test classes. Set-up stages are:
|
||||
- skip_checks
|
||||
- setup_credentials
|
||||
- setup_clients
|
||||
- resource_setup
|
||||
|
||||
Tear-down is also split in a series of steps (teardown stages), which are
|
||||
stacked for execution only if the corresponding setup stage had been
|
||||
reached during the setup phase. Tear-down stages are:
|
||||
- clear_isolated_creds (defined in the base test class)
|
||||
- resource_cleanup
|
||||
"""
|
||||
|
||||
setUpClassCalled = False
|
||||
_service = None
|
||||
|
||||
network_resources = {}
|
||||
|
||||
# NOTE(sdague): log_format is defined inline here instead of using the oslo
|
||||
# default because going through the config path recouples config to the
|
||||
# stress tests too early, and depending on testr order will fail unit tests
|
||||
log_format = ('%(asctime)s %(process)d %(levelname)-8s '
|
||||
'[%(name)s] %(message)s')
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# It should never be overridden by descendants
|
||||
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
|
||||
super(BaseTestCase, cls).setUpClass()
|
||||
cls.setUpClassCalled = True
|
||||
# Stack of (name, callable) to be invoked in reverse order at teardown
|
||||
cls.teardowns = []
|
||||
# All the configuration checks that may generate a skip
|
||||
cls.skip_checks()
|
||||
try:
|
||||
# Allocation of all required credentials and client managers
|
||||
cls.teardowns.append(('credentials', cls.clear_isolated_creds))
|
||||
cls.setup_credentials()
|
||||
# Shortcuts to clients
|
||||
cls.setup_clients()
|
||||
# Additional class-wide test resources
|
||||
cls.teardowns.append(('resources', cls.resource_cleanup))
|
||||
cls.resource_setup()
|
||||
except Exception:
|
||||
etype, value, trace = sys.exc_info()
|
||||
LOG.info("%s raised in %s.setUpClass. Invoking tearDownClass." % (
|
||||
etype, cls.__name__))
|
||||
cls.tearDownClass()
|
||||
try:
|
||||
raise etype, value, trace
|
||||
finally:
|
||||
del trace # to avoid circular refs
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
at_exit_set.discard(cls)
|
||||
# It should never be overridden by descendants
|
||||
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
|
||||
super(BaseTestCase, cls).tearDownClass()
|
||||
# Save any existing exception, we always want to re-raise the original
|
||||
# exception only
|
||||
etype, value, trace = sys.exc_info()
|
||||
# If there was no exception during setup we shall re-raise the first
|
||||
# exception in teardown
|
||||
re_raise = (etype is None)
|
||||
while cls.teardowns:
|
||||
name, teardown = cls.teardowns.pop()
|
||||
# Catch any exception in tearDown so we can re-raise the original
|
||||
# exception at the end
|
||||
try:
|
||||
teardown()
|
||||
except Exception as te:
|
||||
sys_exec_info = sys.exc_info()
|
||||
tetype = sys_exec_info[0]
|
||||
# TODO(andreaf): Till we have the ability to cleanup only
|
||||
# resources that were successfully setup in resource_cleanup,
|
||||
# log AttributeError as info instead of exception.
|
||||
if tetype is AttributeError and name == 'resources':
|
||||
LOG.info("tearDownClass of %s failed: %s" % (name, te))
|
||||
else:
|
||||
LOG.exception("teardown of %s failed: %s" % (name, te))
|
||||
if not etype:
|
||||
etype, value, trace = sys_exec_info
|
||||
# If exceptions were raised during teardown, an not before, re-raise
|
||||
# the first one
|
||||
if re_raise and etype is not None:
|
||||
try:
|
||||
raise etype, value, trace
|
||||
finally:
|
||||
del trace # to avoid circular refs
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
"""Class level skip checks. Subclasses verify in here all
|
||||
conditions that might prevent the execution of the entire test class.
|
||||
Checks implemented here may not make use API calls, and should rely on
|
||||
configuration alone.
|
||||
In general skip checks that require an API call are discouraged.
|
||||
If one is really needed it may be implemented either in the
|
||||
resource_setup or at test level.
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def setup_credentials(cls):
|
||||
"""Allocate credentials and the client managers from them."""
|
||||
# TODO(andreaf) There is a fair amount of code that could me moved from
|
||||
# base / test classes in here. Ideally tests should be able to only
|
||||
# specify a list of (additional) credentials the need to use.
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
"""Create links to the clients into the test object."""
|
||||
# TODO(andreaf) There is a fair amount of code that could me moved from
|
||||
# base / test classes in here. Ideally tests should be able to only
|
||||
# specify which client is `client` and nothing else.
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""Class level resource setup for test cases.
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
"""Class level resource cleanup for test cases.
|
||||
Resource cleanup must be able to handle the case of partially setup
|
||||
resources, in case a failure during `resource_setup` should happen.
|
||||
"""
|
||||
pass
|
||||
|
||||
def setUp(self):
|
||||
super(BaseTestCase, self).setUp()
|
||||
if not self.setUpClassCalled:
|
||||
raise RuntimeError("setUpClass does not calls the super's"
|
||||
"setUpClass in the "
|
||||
+ self.__class__.__name__)
|
||||
at_exit_set.add(self.__class__)
|
||||
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
|
||||
try:
|
||||
test_timeout = int(test_timeout)
|
||||
except ValueError:
|
||||
test_timeout = 0
|
||||
if test_timeout > 0:
|
||||
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
|
||||
|
||||
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
|
||||
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
|
||||
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
|
||||
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
|
||||
os.environ.get('OS_STDERR_CAPTURE') == '1'):
|
||||
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
|
||||
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
|
||||
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
|
||||
os.environ.get('OS_LOG_CAPTURE') != '0'):
|
||||
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
|
||||
format=self.log_format,
|
||||
level=None))
|
||||
|
||||
@classmethod
|
||||
def get_client_manager(cls):
|
||||
"""
|
||||
Returns an OpenStack client manager
|
||||
"""
|
||||
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
|
||||
|
||||
if (not hasattr(cls, 'isolated_creds') or
|
||||
not cls.isolated_creds.name == cls.__name__):
|
||||
cls.isolated_creds = credentials.get_isolated_credentials(
|
||||
name=cls.__name__, network_resources=cls.network_resources,
|
||||
force_tenant_isolation=force_tenant_isolation,
|
||||
)
|
||||
|
||||
creds = cls.isolated_creds.get_primary_creds()
|
||||
os = clients.Manager(credentials=creds, service=cls._service)
|
||||
return os
|
||||
|
||||
@classmethod
|
||||
def clear_isolated_creds(cls):
|
||||
"""
|
||||
Clears isolated creds if set
|
||||
"""
|
||||
if hasattr(cls, 'isolated_creds'):
|
||||
cls.isolated_creds.clear_isolated_creds()
|
||||
|
||||
@classmethod
|
||||
def _get_identity_admin_client(cls):
|
||||
"""
|
||||
Returns an instance of the Identity Admin API client
|
||||
"""
|
||||
os = clients.AdminManager(service=cls._service)
|
||||
admin_client = os.identity_client
|
||||
return admin_client
|
||||
|
||||
@classmethod
|
||||
def set_network_resources(cls, network=False, router=False, subnet=False,
|
||||
dhcp=False):
|
||||
"""Specify which network resources should be created
|
||||
|
||||
@param network
|
||||
@param router
|
||||
@param subnet
|
||||
@param dhcp
|
||||
"""
|
||||
# network resources should be set only once from callers
|
||||
# in order to ensure that even if it's called multiple times in
|
||||
# a chain of overloaded methods, the attribute is set only
|
||||
# in the leaf class
|
||||
if not cls.network_resources:
|
||||
cls.network_resources = {
|
||||
'network': network,
|
||||
'router': router,
|
||||
'subnet': subnet,
|
||||
'dhcp': dhcp}
|
||||
|
||||
def assertEmpty(self, list, msg=None):
|
||||
self.assertTrue(len(list) == 0, msg)
|
||||
|
||||
def assertNotEmpty(self, list, msg=None):
|
||||
self.assertTrue(len(list) > 0, msg)
|
||||
|
||||
|
||||
class NegativeAutoTest(BaseTestCase):
|
||||
|
||||
_resources = {}
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(NegativeAutoTest, cls).setUpClass()
|
||||
os = cls.get_client_manager()
|
||||
cls.client = os.negative_client
|
||||
os_admin = clients.AdminManager(service=cls._service)
|
||||
cls.admin_client = os_admin.negative_client
|
||||
|
||||
@staticmethod
|
||||
def load_tests(*args):
|
||||
"""
|
||||
Wrapper for testscenarios to set the mandatory scenarios variable
|
||||
only in case a real test loader is in place. Will be automatically
|
||||
called in case the variable "load_tests" is set.
|
||||
"""
|
||||
if getattr(args[0], 'suiteClass', None) is not None:
|
||||
loader, standard_tests, pattern = args
|
||||
else:
|
||||
standard_tests, module, loader = args
|
||||
for test in testtools.iterate_tests(standard_tests):
|
||||
schema = getattr(test, '_schema', None)
|
||||
if schema is not None:
|
||||
setattr(test, 'scenarios',
|
||||
NegativeAutoTest.generate_scenario(schema))
|
||||
return testscenarios.load_tests_apply_scenarios(*args)
|
||||
|
||||
@staticmethod
|
||||
def generate_scenario(description):
|
||||
"""
|
||||
Generates the test scenario list for a given description.
|
||||
|
||||
:param description: A file or dictionary with the following entries:
|
||||
name (required) name for the api
|
||||
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
|
||||
url (required) the url to be appended to the catalog url with '%s'
|
||||
for each resource mentioned
|
||||
resources: (optional) A list of resource names such as "server",
|
||||
"flavor", etc. with an element for each '%s' in the url. This
|
||||
method will call self.get_resource for each element when
|
||||
constructing the positive test case template so negative
|
||||
subclasses are expected to return valid resource ids when
|
||||
appropriate.
|
||||
json-schema (optional) A valid json schema that will be used to
|
||||
create invalid data for the api calls. For "GET" and "HEAD",
|
||||
the data is used to generate query strings appended to the url,
|
||||
otherwise for the body of the http call.
|
||||
"""
|
||||
LOG.debug(description)
|
||||
generator = importutils.import_class(
|
||||
CONF.negative.test_generator)()
|
||||
generator.validate_schema(description)
|
||||
schema = description.get("json-schema", None)
|
||||
resources = description.get("resources", [])
|
||||
scenario_list = []
|
||||
expected_result = None
|
||||
for resource in resources:
|
||||
if isinstance(resource, dict):
|
||||
expected_result = resource['expected_result']
|
||||
resource = resource['name']
|
||||
LOG.debug("Add resource to test %s" % resource)
|
||||
scn_name = "inv_res_%s" % (resource)
|
||||
scenario_list.append((scn_name, {"resource": (resource,
|
||||
str(uuid.uuid4())),
|
||||
"expected_result": expected_result
|
||||
}))
|
||||
if schema is not None:
|
||||
for scenario in generator.generate_scenarios(schema):
|
||||
scenario_list.append((scenario['_negtest_name'],
|
||||
scenario))
|
||||
LOG.debug(scenario_list)
|
||||
return scenario_list
|
||||
|
||||
def execute(self, description):
|
||||
"""
|
||||
Execute a http call on an api that are expected to
|
||||
result in client errors. First it uses invalid resources that are part
|
||||
of the url, and then invalid data for queries and http request bodies.
|
||||
|
||||
:param description: A json file or dictionary with the following
|
||||
entries:
|
||||
name (required) name for the api
|
||||
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
|
||||
url (required) the url to be appended to the catalog url with '%s'
|
||||
for each resource mentioned
|
||||
resources: (optional) A list of resource names such as "server",
|
||||
"flavor", etc. with an element for each '%s' in the url. This
|
||||
method will call self.get_resource for each element when
|
||||
constructing the positive test case template so negative
|
||||
subclasses are expected to return valid resource ids when
|
||||
appropriate.
|
||||
json-schema (optional) A valid json schema that will be used to
|
||||
create invalid data for the api calls. For "GET" and "HEAD",
|
||||
the data is used to generate query strings appended to the url,
|
||||
otherwise for the body of the http call.
|
||||
|
||||
"""
|
||||
LOG.info("Executing %s" % description["name"])
|
||||
LOG.debug(description)
|
||||
generator = importutils.import_class(
|
||||
CONF.negative.test_generator)()
|
||||
schema = description.get("json-schema", None)
|
||||
method = description["http-method"]
|
||||
url = description["url"]
|
||||
expected_result = None
|
||||
if "default_result_code" in description:
|
||||
expected_result = description["default_result_code"]
|
||||
|
||||
resources = [self.get_resource(r) for
|
||||
r in description.get("resources", [])]
|
||||
|
||||
if hasattr(self, "resource"):
|
||||
# Note(mkoderer): The resources list already contains an invalid
|
||||
# entry (see get_resource).
|
||||
# We just send a valid json-schema with it
|
||||
valid_schema = None
|
||||
if schema:
|
||||
valid_schema = \
|
||||
valid.ValidTestGenerator().generate_valid(schema)
|
||||
new_url, body = self._http_arguments(valid_schema, url, method)
|
||||
elif hasattr(self, "_negtest_name"):
|
||||
schema_under_test = \
|
||||
valid.ValidTestGenerator().generate_valid(schema)
|
||||
local_expected_result = \
|
||||
generator.generate_payload(self, schema_under_test)
|
||||
if local_expected_result is not None:
|
||||
expected_result = local_expected_result
|
||||
new_url, body = \
|
||||
self._http_arguments(schema_under_test, url, method)
|
||||
else:
|
||||
raise Exception("testscenarios are not active. Please make sure "
|
||||
"that your test runner supports the load_tests "
|
||||
"mechanism")
|
||||
|
||||
if "admin_client" in description and description["admin_client"]:
|
||||
client = self.admin_client
|
||||
else:
|
||||
client = self.client
|
||||
resp, resp_body = client.send_request(method, new_url,
|
||||
resources, body=body)
|
||||
self._check_negative_response(expected_result, resp.status, resp_body)
|
||||
|
||||
def _http_arguments(self, json_dict, url, method):
|
||||
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
|
||||
if not json_dict:
|
||||
return url, None
|
||||
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
|
||||
return "%s?%s" % (url, urllib.urlencode(json_dict)), None
|
||||
else:
|
||||
return url, json.dumps(json_dict)
|
||||
|
||||
def _check_negative_response(self, expected_result, result, body):
|
||||
self.assertTrue(result >= 400 and result < 500 and result != 413,
|
||||
"Expected client error, got %s:%s" %
|
||||
(result, body))
|
||||
self.assertTrue(expected_result is None or expected_result == result,
|
||||
"Expected %s, got %s:%s" %
|
||||
(expected_result, result, body))
|
||||
|
||||
@classmethod
|
||||
def set_resource(cls, name, resource):
|
||||
"""
|
||||
This function can be used in setUpClass context to register a resoruce
|
||||
for a test.
|
||||
|
||||
:param name: The name of the kind of resource such as "flavor", "role",
|
||||
etc.
|
||||
:resource: The id of the resource
|
||||
"""
|
||||
cls._resources[name] = resource
|
||||
|
||||
def get_resource(self, name):
|
||||
"""
|
||||
Return a valid uuid for a type of resource. If a real resource is
|
||||
needed as part of a url then this method should return one. Otherwise
|
||||
it can return None.
|
||||
|
||||
:param name: The name of the kind of resource such as "flavor", "role",
|
||||
etc.
|
||||
"""
|
||||
if isinstance(name, dict):
|
||||
name = name['name']
|
||||
if hasattr(self, "resource") and self.resource[0] == name:
|
||||
LOG.debug("Return invalid resource (%s) value: %s" %
|
||||
(self.resource[0], self.resource[1]))
|
||||
return self.resource[1]
|
||||
if name in self._resources:
|
||||
return self._resources[name]
|
||||
return None
|
||||
|
||||
|
||||
def SimpleNegativeAutoTest(klass):
|
||||
"""
|
||||
This decorator registers a test function on basis of the class name.
|
||||
"""
|
||||
@attr(type=['negative', 'gate'])
|
||||
def generic_test(self):
|
||||
if hasattr(self, '_schema'):
|
||||
self.execute(self._schema)
|
||||
|
||||
cn = klass.__name__
|
||||
cn = cn.replace('JSON', '')
|
||||
cn = cn.replace('Test', '')
|
||||
# NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
|
||||
lower_cn = re.sub('(?<!^)(?=[A-Z])', '_', cn).lower()
|
||||
func_name = 'test_%s' % lower_cn
|
||||
setattr(klass, func_name, generic_test)
|
||||
return klass
|
||||
|
||||
|
||||
def call_until_true(func, duration, sleep_for):
|
||||
"""
|
||||
Call the given function until it returns True (and return True) or
|
||||
until the specified duration (in seconds) elapses (and return
|
||||
False).
|
||||
|
||||
:param func: A zero argument callable that returns True on success.
|
||||
:param duration: The number of seconds for which to attempt a
|
||||
successful call of the function.
|
||||
:param sleep_for: The number of seconds to sleep after an unsuccessful
|
||||
invocation of the function.
|
||||
"""
|
||||
now = time.time()
|
||||
timeout = now + duration
|
||||
while now < timeout:
|
||||
if func():
|
||||
return True
|
||||
time.sleep(sleep_for)
|
||||
now = time.time()
|
||||
return False
|
Loading…
x
Reference in New Issue
Block a user