remove unnecessary neutron files under neutron/tests
Change-Id: Id1f4ed4a5752a6622cb59a4836cd4189d7ee615c
This commit is contained in:
parent
a310e46077
commit
ff34dae544
|
@ -1,74 +0,0 @@
|
||||||
# Copyright 2014 Cisco Systems, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import random
|
|
||||||
|
|
||||||
from neutron.agent.linux import ovs_lib
|
|
||||||
from neutron.agent.linux import utils
|
|
||||||
from neutron.plugins.common import constants as q_const
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
BR_PREFIX = 'test-br'
|
|
||||||
|
|
||||||
|
|
||||||
class BaseLinuxTestCase(base.BaseTestCase):
|
|
||||||
def setUp(self, root_helper='sudo'):
|
|
||||||
super(BaseLinuxTestCase, self).setUp()
|
|
||||||
|
|
||||||
self.root_helper = root_helper
|
|
||||||
|
|
||||||
def check_command(self, cmd, error_text, skip_msg):
|
|
||||||
try:
|
|
||||||
utils.execute(cmd)
|
|
||||||
except RuntimeError as e:
|
|
||||||
if error_text in str(e):
|
|
||||||
self.skipTest(skip_msg)
|
|
||||||
raise
|
|
||||||
|
|
||||||
def check_sudo_enabled(self):
|
|
||||||
if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING:
|
|
||||||
self.skipTest('testing with sudo is not enabled')
|
|
||||||
|
|
||||||
def get_rand_name(self, max_length, prefix='test'):
|
|
||||||
name = prefix + str(random.randint(1, 0x7fffffff))
|
|
||||||
return name[:max_length]
|
|
||||||
|
|
||||||
def create_resource(self, name_prefix, creation_func, *args, **kwargs):
|
|
||||||
"""Create a new resource that does not already exist.
|
|
||||||
|
|
||||||
:param name_prefix: The prefix for a randomly generated name
|
|
||||||
:param creation_func: A function taking the name of the resource
|
|
||||||
to be created as it's first argument. An error is assumed
|
|
||||||
to indicate a name collision.
|
|
||||||
:param *args *kwargs: These will be passed to the create function.
|
|
||||||
"""
|
|
||||||
while True:
|
|
||||||
name = self.get_rand_name(q_const.MAX_DEV_NAME_LEN, name_prefix)
|
|
||||||
try:
|
|
||||||
return creation_func(name, *args, **kwargs)
|
|
||||||
except RuntimeError:
|
|
||||||
continue
|
|
||||||
|
|
||||||
|
|
||||||
class BaseOVSLinuxTestCase(BaseLinuxTestCase):
|
|
||||||
def setUp(self, root_helper='sudo'):
|
|
||||||
super(BaseOVSLinuxTestCase, self).setUp(root_helper)
|
|
||||||
self.ovs = ovs_lib.BaseOVS(self.root_helper)
|
|
||||||
|
|
||||||
def create_ovs_bridge(self, br_prefix=BR_PREFIX):
|
|
||||||
br = self.create_resource(br_prefix, self.ovs.add_bridge)
|
|
||||||
self.addCleanup(br.destroy)
|
|
||||||
return br
|
|
|
@ -1,71 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import fixtures
|
|
||||||
|
|
||||||
from six import moves
|
|
||||||
|
|
||||||
from neutron.agent.linux import async_process
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestAsyncProcess(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestAsyncProcess, self).setUp()
|
|
||||||
self.test_file_path = self.useFixture(
|
|
||||||
fixtures.TempDir()).join("test_async_process.tmp")
|
|
||||||
self.data = [str(x) for x in moves.xrange(4)]
|
|
||||||
with file(self.test_file_path, 'w') as f:
|
|
||||||
f.writelines('%s\n' % item for item in self.data)
|
|
||||||
|
|
||||||
def _check_stdout(self, proc):
|
|
||||||
# Ensure that all the output from the file is read
|
|
||||||
output = []
|
|
||||||
while output != self.data:
|
|
||||||
new_output = list(proc.iter_stdout())
|
|
||||||
if new_output:
|
|
||||||
output += new_output
|
|
||||||
eventlet.sleep(0.01)
|
|
||||||
|
|
||||||
def test_stopping_async_process_lifecycle(self):
|
|
||||||
with self.assert_max_execution_time():
|
|
||||||
proc = async_process.AsyncProcess(['tail', '-f',
|
|
||||||
self.test_file_path])
|
|
||||||
proc.start()
|
|
||||||
self._check_stdout(proc)
|
|
||||||
proc.stop()
|
|
||||||
|
|
||||||
# Ensure that the process and greenthreads have stopped
|
|
||||||
proc._process.wait()
|
|
||||||
self.assertEqual(proc._process.returncode, -9)
|
|
||||||
for watcher in proc._watchers:
|
|
||||||
watcher.wait()
|
|
||||||
|
|
||||||
def test_async_process_respawns(self):
|
|
||||||
with self.assert_max_execution_time():
|
|
||||||
proc = async_process.AsyncProcess(['tail', '-f',
|
|
||||||
self.test_file_path],
|
|
||||||
respawn_interval=0)
|
|
||||||
proc.start()
|
|
||||||
|
|
||||||
# Ensure that the same output is read twice
|
|
||||||
self._check_stdout(proc)
|
|
||||||
pid = proc._get_pid_to_kill()
|
|
||||||
proc._kill_process(pid)
|
|
||||||
self._check_stdout(proc)
|
|
||||||
proc.stop()
|
|
|
@ -1,108 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Tests in this module will be skipped unless:
|
|
||||||
|
|
||||||
- ovsdb-client is installed
|
|
||||||
|
|
||||||
- ovsdb-client can be invoked via password-less sudo
|
|
||||||
|
|
||||||
- OS_SUDO_TESTING is set to '1' or 'True' in the test execution
|
|
||||||
environment
|
|
||||||
|
|
||||||
|
|
||||||
The jenkins gate does not allow direct sudo invocation during test
|
|
||||||
runs, but configuring OS_SUDO_TESTING ensures that developers are
|
|
||||||
still able to execute tests that require the capability.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
|
|
||||||
from neutron.agent.linux import ovsdb_monitor
|
|
||||||
from neutron.tests.functional.agent.linux import base as base_agent
|
|
||||||
|
|
||||||
|
|
||||||
class BaseMonitorTest(base_agent.BaseOVSLinuxTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
# Emulate using a rootwrap script with sudo
|
|
||||||
super(BaseMonitorTest, self).setUp(root_helper='sudo sudo')
|
|
||||||
|
|
||||||
self._check_test_requirements()
|
|
||||||
self.bridge = self.create_ovs_bridge()
|
|
||||||
|
|
||||||
def _check_test_requirements(self):
|
|
||||||
self.check_sudo_enabled()
|
|
||||||
self.check_command(['which', 'ovsdb-client'],
|
|
||||||
'Exit code: 1', 'ovsdb-client is not installed')
|
|
||||||
self.check_command(['sudo', '-n', 'ovsdb-client', 'list-dbs'],
|
|
||||||
'Exit code: 1',
|
|
||||||
'password-less sudo not granted for ovsdb-client')
|
|
||||||
|
|
||||||
|
|
||||||
class TestOvsdbMonitor(BaseMonitorTest):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestOvsdbMonitor, self).setUp()
|
|
||||||
|
|
||||||
self.monitor = ovsdb_monitor.OvsdbMonitor('Bridge',
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
self.addCleanup(self.monitor.stop)
|
|
||||||
self.monitor.start()
|
|
||||||
|
|
||||||
def collect_initial_output(self):
|
|
||||||
while True:
|
|
||||||
output = list(self.monitor.iter_stdout())
|
|
||||||
if output:
|
|
||||||
return output[0]
|
|
||||||
eventlet.sleep(0.01)
|
|
||||||
|
|
||||||
def test_killed_monitor_respawns(self):
|
|
||||||
with self.assert_max_execution_time():
|
|
||||||
self.monitor.respawn_interval = 0
|
|
||||||
old_pid = self.monitor._process.pid
|
|
||||||
output1 = self.collect_initial_output()
|
|
||||||
pid = self.monitor._get_pid_to_kill()
|
|
||||||
self.monitor._kill_process(pid)
|
|
||||||
self.monitor._reset_queues()
|
|
||||||
while (self.monitor._process.pid == old_pid):
|
|
||||||
eventlet.sleep(0.01)
|
|
||||||
output2 = self.collect_initial_output()
|
|
||||||
# Initial output should appear twice
|
|
||||||
self.assertEqual(output1, output2)
|
|
||||||
|
|
||||||
|
|
||||||
class TestSimpleInterfaceMonitor(BaseMonitorTest):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestSimpleInterfaceMonitor, self).setUp()
|
|
||||||
|
|
||||||
self.monitor = ovsdb_monitor.SimpleInterfaceMonitor(
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
self.addCleanup(self.monitor.stop)
|
|
||||||
self.monitor.start(block=True)
|
|
||||||
|
|
||||||
def test_has_updates(self):
|
|
||||||
self.assertTrue(self.monitor.has_updates,
|
|
||||||
'Initial call should always be true')
|
|
||||||
self.assertFalse(self.monitor.has_updates,
|
|
||||||
'has_updates without port addition should be False')
|
|
||||||
self.create_resource('test-port-', self.bridge.add_port)
|
|
||||||
with self.assert_max_execution_time():
|
|
||||||
# has_updates after port addition should become True
|
|
||||||
while not self.monitor.has_updates:
|
|
||||||
eventlet.sleep(0.01)
|
|
|
@ -1,46 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (c) 2014 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from neutron.cmd.sanity import checks
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class OVSSanityTestCase(base.BaseTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(OVSSanityTestCase, self).setUp()
|
|
||||||
|
|
||||||
self.root_helper = 'sudo'
|
|
||||||
|
|
||||||
def check_sudo_enabled(self):
|
|
||||||
if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING:
|
|
||||||
self.skipTest('testing with sudo is not enabled')
|
|
||||||
|
|
||||||
def test_ovs_vxlan_support_runs(self):
|
|
||||||
"""This test just ensures that the test in neutron-sanity-check
|
|
||||||
can run through without error, without mocking anything out
|
|
||||||
"""
|
|
||||||
self.check_sudo_enabled()
|
|
||||||
checks.vxlan_supported(self.root_helper)
|
|
||||||
|
|
||||||
def test_ovs_patch_support_runs(self):
|
|
||||||
"""This test just ensures that the test in neutron-sanity-check
|
|
||||||
can run through without error, without mocking anything out
|
|
||||||
"""
|
|
||||||
self.check_sudo_enabled()
|
|
||||||
checks.patch_supported(self.root_helper)
|
|
|
@ -1,377 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 NEC Corporation
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Akihiro Motoki, NEC Corporation
|
|
||||||
#
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import httplib
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
from webob import exc
|
|
||||||
|
|
||||||
from neutron import context
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.tests.unit import test_db_plugin
|
|
||||||
|
|
||||||
|
|
||||||
class PortBindingsTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
|
|
||||||
# VIF_TYPE must be overridden according to plugin vif_type
|
|
||||||
VIF_TYPE = portbindings.VIF_TYPE_OTHER
|
|
||||||
# The plugin supports the port security feature such as
|
|
||||||
# security groups and anti spoofing.
|
|
||||||
HAS_PORT_FILTER = False
|
|
||||||
|
|
||||||
def _check_response_portbindings(self, port):
|
|
||||||
self.assertEqual(port[portbindings.VIF_TYPE], self.VIF_TYPE)
|
|
||||||
vif_details = port[portbindings.VIF_DETAILS]
|
|
||||||
# REVISIT(rkukura): Consider reworking tests to enable ML2 to bind
|
|
||||||
if self.VIF_TYPE not in [portbindings.VIF_TYPE_UNBOUND,
|
|
||||||
portbindings.VIF_TYPE_BINDING_FAILED]:
|
|
||||||
# TODO(rkukura): Replace with new VIF security details
|
|
||||||
self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER],
|
|
||||||
self.HAS_PORT_FILTER)
|
|
||||||
|
|
||||||
def _check_response_no_portbindings(self, port):
|
|
||||||
self.assertIn('status', port)
|
|
||||||
self.assertNotIn(portbindings.VIF_TYPE, port)
|
|
||||||
self.assertNotIn(portbindings.VIF_DETAILS, port)
|
|
||||||
|
|
||||||
def _get_non_admin_context(self):
|
|
||||||
return context.Context(user_id=None,
|
|
||||||
tenant_id=self._tenant_id,
|
|
||||||
is_admin=False,
|
|
||||||
read_deleted="no")
|
|
||||||
|
|
||||||
def test_port_vif_details(self):
|
|
||||||
with self.port(name='name') as port:
|
|
||||||
port_id = port['port']['id']
|
|
||||||
# Check a response of create_port
|
|
||||||
self._check_response_portbindings(port['port'])
|
|
||||||
# Check a response of get_port
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
port = self._show('ports', port_id, neutron_context=ctx)['port']
|
|
||||||
self._check_response_portbindings(port)
|
|
||||||
# By default user is admin - now test non admin user
|
|
||||||
ctx = self._get_non_admin_context()
|
|
||||||
non_admin_port = self._show(
|
|
||||||
'ports', port_id, neutron_context=ctx)['port']
|
|
||||||
self._check_response_no_portbindings(non_admin_port)
|
|
||||||
|
|
||||||
def test_ports_vif_details(self):
|
|
||||||
plugin = manager.NeutronManager.get_plugin()
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
|
||||||
with contextlib.nested(self.port(), self.port()):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
ports = plugin.get_ports(ctx)
|
|
||||||
self.assertEqual(len(ports), 2)
|
|
||||||
for port in ports:
|
|
||||||
self._check_response_portbindings(port)
|
|
||||||
# By default user is admin - now test non admin user
|
|
||||||
ctx = self._get_non_admin_context()
|
|
||||||
ports = self._list('ports', neutron_context=ctx)['ports']
|
|
||||||
self.assertEqual(len(ports), 2)
|
|
||||||
for non_admin_port in ports:
|
|
||||||
self._check_response_no_portbindings(non_admin_port)
|
|
||||||
|
|
||||||
def _check_port_binding_profile(self, port, profile=None):
|
|
||||||
# For plugins which does not use binding:profile attr
|
|
||||||
# we just check an operation for the port succeed.
|
|
||||||
self.assertIn('id', port)
|
|
||||||
|
|
||||||
def _test_create_port_binding_profile(self, profile):
|
|
||||||
profile_arg = {portbindings.PROFILE: profile}
|
|
||||||
with self.port(arg_list=(portbindings.PROFILE,),
|
|
||||||
**profile_arg) as port:
|
|
||||||
port_id = port['port']['id']
|
|
||||||
self._check_port_binding_profile(port['port'], profile)
|
|
||||||
port = self._show('ports', port_id)
|
|
||||||
self._check_port_binding_profile(port['port'], profile)
|
|
||||||
|
|
||||||
def test_create_port_binding_profile_none(self):
|
|
||||||
self._test_create_port_binding_profile(None)
|
|
||||||
|
|
||||||
def test_create_port_binding_profile_with_empty_dict(self):
|
|
||||||
self._test_create_port_binding_profile({})
|
|
||||||
|
|
||||||
def _test_update_port_binding_profile(self, profile):
|
|
||||||
profile_arg = {portbindings.PROFILE: profile}
|
|
||||||
with self.port() as port:
|
|
||||||
# print "(1) %s" % port
|
|
||||||
self._check_port_binding_profile(port['port'])
|
|
||||||
port_id = port['port']['id']
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
port = self._update('ports', port_id, {'port': profile_arg},
|
|
||||||
neutron_context=ctx)['port']
|
|
||||||
self._check_port_binding_profile(port, profile)
|
|
||||||
port = self._show('ports', port_id)['port']
|
|
||||||
self._check_port_binding_profile(port, profile)
|
|
||||||
|
|
||||||
def test_update_port_binding_profile_none(self):
|
|
||||||
self._test_update_port_binding_profile(None)
|
|
||||||
|
|
||||||
def test_update_port_binding_profile_with_empty_dict(self):
|
|
||||||
self._test_update_port_binding_profile({})
|
|
||||||
|
|
||||||
def test_port_create_portinfo_non_admin(self):
|
|
||||||
profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
|
|
||||||
with self.network(set_context=True, tenant_id='test') as net1:
|
|
||||||
with self.subnet(network=net1) as subnet1:
|
|
||||||
# succeed without binding:profile
|
|
||||||
with self.port(subnet=subnet1,
|
|
||||||
set_context=True, tenant_id='test'):
|
|
||||||
pass
|
|
||||||
# fail with binding:profile
|
|
||||||
try:
|
|
||||||
with self.port(subnet=subnet1,
|
|
||||||
expected_res_status=403,
|
|
||||||
arg_list=(portbindings.PROFILE,),
|
|
||||||
set_context=True, tenant_id='test',
|
|
||||||
**profile_arg):
|
|
||||||
pass
|
|
||||||
except exc.HTTPClientError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_port_update_portinfo_non_admin(self):
|
|
||||||
profile_arg = {portbindings.PROFILE: {'dummy': 'dummy'}}
|
|
||||||
with self.network() as net1:
|
|
||||||
with self.subnet(network=net1) as subnet1:
|
|
||||||
with self.port(subnet=subnet1) as port:
|
|
||||||
# By default user is admin - now test non admin user
|
|
||||||
# Note that 404 is returned when prohibit by policy.
|
|
||||||
# See comment for PolicyNotAuthorized except clause
|
|
||||||
# in update() in neutron.api.v2.base.Controller.
|
|
||||||
port_id = port['port']['id']
|
|
||||||
ctx = self._get_non_admin_context()
|
|
||||||
port = self._update('ports', port_id,
|
|
||||||
{'port': profile_arg},
|
|
||||||
expected_code=404,
|
|
||||||
neutron_context=ctx)
|
|
||||||
|
|
||||||
|
|
||||||
class PortBindingsHostTestCaseMixin(object):
|
|
||||||
fmt = 'json'
|
|
||||||
hostname = 'testhost'
|
|
||||||
|
|
||||||
def _check_response_portbindings_host(self, port):
|
|
||||||
self.assertEqual(port[portbindings.HOST_ID], self.hostname)
|
|
||||||
|
|
||||||
def _check_response_no_portbindings_host(self, port):
|
|
||||||
self.assertIn('status', port)
|
|
||||||
self.assertNotIn(portbindings.HOST_ID, port)
|
|
||||||
|
|
||||||
def test_port_vif_non_admin(self):
|
|
||||||
with self.network(set_context=True,
|
|
||||||
tenant_id='test') as net1:
|
|
||||||
with self.subnet(network=net1) as subnet1:
|
|
||||||
host_arg = {portbindings.HOST_ID: self.hostname}
|
|
||||||
try:
|
|
||||||
with self.port(subnet=subnet1,
|
|
||||||
expected_res_status=403,
|
|
||||||
arg_list=(portbindings.HOST_ID,),
|
|
||||||
set_context=True,
|
|
||||||
tenant_id='test',
|
|
||||||
**host_arg):
|
|
||||||
pass
|
|
||||||
except exc.HTTPClientError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_port_vif_host(self):
|
|
||||||
host_arg = {portbindings.HOST_ID: self.hostname}
|
|
||||||
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
|
|
||||||
**host_arg) as port:
|
|
||||||
port_id = port['port']['id']
|
|
||||||
# Check a response of create_port
|
|
||||||
self._check_response_portbindings_host(port['port'])
|
|
||||||
# Check a response of get_port
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
port = self._show('ports', port_id, neutron_context=ctx)['port']
|
|
||||||
self._check_response_portbindings_host(port)
|
|
||||||
# By default user is admin - now test non admin user
|
|
||||||
ctx = context.Context(user_id=None,
|
|
||||||
tenant_id=self._tenant_id,
|
|
||||||
is_admin=False,
|
|
||||||
read_deleted="no")
|
|
||||||
non_admin_port = self._show(
|
|
||||||
'ports', port_id, neutron_context=ctx)['port']
|
|
||||||
self._check_response_no_portbindings_host(non_admin_port)
|
|
||||||
|
|
||||||
def test_ports_vif_host(self):
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
|
||||||
host_arg = {portbindings.HOST_ID: self.hostname}
|
|
||||||
with contextlib.nested(
|
|
||||||
self.port(name='name1',
|
|
||||||
arg_list=(portbindings.HOST_ID,),
|
|
||||||
**host_arg),
|
|
||||||
self.port(name='name2')):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
ports = self._list('ports', neutron_context=ctx)['ports']
|
|
||||||
self.assertEqual(2, len(ports))
|
|
||||||
for port in ports:
|
|
||||||
if port['name'] == 'name1':
|
|
||||||
self._check_response_portbindings_host(port)
|
|
||||||
else:
|
|
||||||
self.assertFalse(port[portbindings.HOST_ID])
|
|
||||||
# By default user is admin - now test non admin user
|
|
||||||
ctx = context.Context(user_id=None,
|
|
||||||
tenant_id=self._tenant_id,
|
|
||||||
is_admin=False,
|
|
||||||
read_deleted="no")
|
|
||||||
ports = self._list('ports', neutron_context=ctx)['ports']
|
|
||||||
self.assertEqual(2, len(ports))
|
|
||||||
for non_admin_port in ports:
|
|
||||||
self._check_response_no_portbindings_host(non_admin_port)
|
|
||||||
|
|
||||||
def test_ports_vif_host_update(self):
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
|
||||||
host_arg = {portbindings.HOST_ID: self.hostname}
|
|
||||||
with contextlib.nested(
|
|
||||||
self.port(name='name1',
|
|
||||||
arg_list=(portbindings.HOST_ID,),
|
|
||||||
**host_arg),
|
|
||||||
self.port(name='name2')) as (port1, port2):
|
|
||||||
data = {'port': {portbindings.HOST_ID: 'testhosttemp'}}
|
|
||||||
req = self.new_update_request('ports', data, port1['port']['id'])
|
|
||||||
req.get_response(self.api)
|
|
||||||
req = self.new_update_request('ports', data, port2['port']['id'])
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
req.get_response(self.api)
|
|
||||||
ports = self._list('ports', neutron_context=ctx)['ports']
|
|
||||||
self.assertEqual(2, len(ports))
|
|
||||||
for port in ports:
|
|
||||||
self.assertEqual('testhosttemp', port[portbindings.HOST_ID])
|
|
||||||
|
|
||||||
def test_ports_vif_non_host_update(self):
|
|
||||||
host_arg = {portbindings.HOST_ID: self.hostname}
|
|
||||||
with self.port(name='name', arg_list=(portbindings.HOST_ID,),
|
|
||||||
**host_arg) as port:
|
|
||||||
data = {'port': {'admin_state_up': False}}
|
|
||||||
req = self.new_update_request('ports', data, port['port']['id'])
|
|
||||||
res = self.deserialize(self.fmt, req.get_response(self.api))
|
|
||||||
self.assertEqual(port['port'][portbindings.HOST_ID],
|
|
||||||
res['port'][portbindings.HOST_ID])
|
|
||||||
|
|
||||||
def test_ports_vif_non_host_update_when_host_null(self):
|
|
||||||
with self.port() as port:
|
|
||||||
data = {'port': {'admin_state_up': False}}
|
|
||||||
req = self.new_update_request('ports', data, port['port']['id'])
|
|
||||||
res = self.deserialize(self.fmt, req.get_response(self.api))
|
|
||||||
self.assertEqual(port['port'][portbindings.HOST_ID],
|
|
||||||
res['port'][portbindings.HOST_ID])
|
|
||||||
|
|
||||||
def test_ports_vif_host_list(self):
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
|
||||||
host_arg = {portbindings.HOST_ID: self.hostname}
|
|
||||||
with contextlib.nested(
|
|
||||||
self.port(name='name1',
|
|
||||||
arg_list=(portbindings.HOST_ID,),
|
|
||||||
**host_arg),
|
|
||||||
self.port(name='name2'),
|
|
||||||
self.port(name='name3',
|
|
||||||
arg_list=(portbindings.HOST_ID,),
|
|
||||||
**host_arg),) as (port1, _port2, port3):
|
|
||||||
self._test_list_resources(
|
|
||||||
'port', (port1, port3),
|
|
||||||
query_params='%s=%s' % (portbindings.HOST_ID, self.hostname))
|
|
||||||
|
|
||||||
|
|
||||||
class PortBindingsVnicTestCaseMixin(object):
|
|
||||||
fmt = 'json'
|
|
||||||
vnic_type = portbindings.VNIC_NORMAL
|
|
||||||
|
|
||||||
def _check_response_portbindings_vnic_type(self, port):
|
|
||||||
self.assertIn('status', port)
|
|
||||||
self.assertEqual(port[portbindings.VNIC_TYPE], self.vnic_type)
|
|
||||||
|
|
||||||
def test_port_vnic_type_non_admin(self):
|
|
||||||
with self.network(set_context=True,
|
|
||||||
tenant_id='test') as net1:
|
|
||||||
with self.subnet(network=net1) as subnet1:
|
|
||||||
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
|
|
||||||
with self.port(subnet=subnet1,
|
|
||||||
expected_res_status=httplib.CREATED,
|
|
||||||
arg_list=(portbindings.VNIC_TYPE,),
|
|
||||||
set_context=True,
|
|
||||||
tenant_id='test',
|
|
||||||
**vnic_arg) as port:
|
|
||||||
# Check a response of create_port
|
|
||||||
self._check_response_portbindings_vnic_type(port['port'])
|
|
||||||
|
|
||||||
def test_port_vnic_type(self):
|
|
||||||
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
|
|
||||||
with self.port(name='name', arg_list=(portbindings.VNIC_TYPE,),
|
|
||||||
**vnic_arg) as port:
|
|
||||||
port_id = port['port']['id']
|
|
||||||
# Check a response of create_port
|
|
||||||
self._check_response_portbindings_vnic_type(port['port'])
|
|
||||||
# Check a response of get_port
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
port = self._show('ports', port_id, neutron_context=ctx)['port']
|
|
||||||
self._check_response_portbindings_vnic_type(port)
|
|
||||||
# By default user is admin - now test non admin user
|
|
||||||
ctx = context.Context(user_id=None,
|
|
||||||
tenant_id=self._tenant_id,
|
|
||||||
is_admin=False,
|
|
||||||
read_deleted="no")
|
|
||||||
non_admin_port = self._show(
|
|
||||||
'ports', port_id, neutron_context=ctx)['port']
|
|
||||||
self._check_response_portbindings_vnic_type(non_admin_port)
|
|
||||||
|
|
||||||
def test_ports_vnic_type(self):
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
|
||||||
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
|
|
||||||
with contextlib.nested(
|
|
||||||
self.port(name='name1',
|
|
||||||
arg_list=(portbindings.VNIC_TYPE,),
|
|
||||||
**vnic_arg),
|
|
||||||
self.port(name='name2')):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
ports = self._list('ports', neutron_context=ctx)['ports']
|
|
||||||
self.assertEqual(2, len(ports))
|
|
||||||
for port in ports:
|
|
||||||
if port['name'] == 'name1':
|
|
||||||
self._check_response_portbindings_vnic_type(port)
|
|
||||||
else:
|
|
||||||
self.assertEqual(portbindings.VNIC_NORMAL,
|
|
||||||
port[portbindings.VNIC_TYPE])
|
|
||||||
# By default user is admin - now test non admin user
|
|
||||||
ctx = context.Context(user_id=None,
|
|
||||||
tenant_id=self._tenant_id,
|
|
||||||
is_admin=False,
|
|
||||||
read_deleted="no")
|
|
||||||
ports = self._list('ports', neutron_context=ctx)['ports']
|
|
||||||
self.assertEqual(2, len(ports))
|
|
||||||
for non_admin_port in ports:
|
|
||||||
self._check_response_portbindings_vnic_type(non_admin_port)
|
|
||||||
|
|
||||||
def test_ports_vnic_type_list(self):
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
|
||||||
vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type}
|
|
||||||
with contextlib.nested(
|
|
||||||
self.port(name='name1',
|
|
||||||
arg_list=(portbindings.VNIC_TYPE,),
|
|
||||||
**vnic_arg),
|
|
||||||
self.port(name='name2'),
|
|
||||||
self.port(name='name3',
|
|
||||||
arg_list=(portbindings.VNIC_TYPE,),
|
|
||||||
**vnic_arg),) as (port1, port2, port3):
|
|
||||||
self._test_list_resources(
|
|
||||||
'port', (port1, port2, port3),
|
|
||||||
query_params='%s=%s' % (portbindings.VNIC_TYPE,
|
|
||||||
self.vnic_type))
|
|
|
@ -1,967 +0,0 @@
|
||||||
# Copyright 2012, VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from neutron.agent.linux import ovs_lib
|
|
||||||
from neutron.agent.linux import utils
|
|
||||||
from neutron.common import exceptions
|
|
||||||
from neutron.openstack.common import jsonutils
|
|
||||||
from neutron.openstack.common import uuidutils
|
|
||||||
from neutron.plugins.common import constants as p_const
|
|
||||||
from neutron.plugins.openvswitch.common import constants as const
|
|
||||||
from neutron.tests import base
|
|
||||||
from neutron.tests import tools
|
|
||||||
|
|
||||||
try:
|
|
||||||
OrderedDict = collections.OrderedDict
|
|
||||||
except AttributeError:
|
|
||||||
import ordereddict
|
|
||||||
OrderedDict = ordereddict.OrderedDict
|
|
||||||
|
|
||||||
OVS_LINUX_KERN_VERS_WITHOUT_VXLAN = "3.12.0"
|
|
||||||
|
|
||||||
|
|
||||||
class TestBaseOVS(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestBaseOVS, self).setUp()
|
|
||||||
self.root_helper = 'sudo'
|
|
||||||
self.ovs = ovs_lib.BaseOVS(self.root_helper)
|
|
||||||
self.br_name = 'bridge1'
|
|
||||||
|
|
||||||
def test_add_bridge(self):
|
|
||||||
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
|
|
||||||
bridge = self.ovs.add_bridge(self.br_name)
|
|
||||||
|
|
||||||
mock_vsctl.assert_called_with(["--", "--may-exist",
|
|
||||||
"add-br", self.br_name])
|
|
||||||
self.assertEqual(bridge.br_name, self.br_name)
|
|
||||||
self.assertEqual(bridge.root_helper, self.ovs.root_helper)
|
|
||||||
|
|
||||||
def test_delete_bridge(self):
|
|
||||||
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
|
|
||||||
self.ovs.delete_bridge(self.br_name)
|
|
||||||
mock_vsctl.assert_called_with(["--", "--if-exists", "del-br",
|
|
||||||
self.br_name])
|
|
||||||
|
|
||||||
def test_bridge_exists_returns_true(self):
|
|
||||||
with mock.patch.object(self.ovs, 'run_vsctl') as mock_vsctl:
|
|
||||||
self.assertTrue(self.ovs.bridge_exists(self.br_name))
|
|
||||||
mock_vsctl.assert_called_with(['br-exists', self.br_name],
|
|
||||||
check_error=True)
|
|
||||||
|
|
||||||
def test_bridge_exists_returns_false_for_exit_code_2(self):
|
|
||||||
with mock.patch.object(self.ovs, 'run_vsctl',
|
|
||||||
side_effect=RuntimeError('Exit code: 2\n')):
|
|
||||||
self.assertFalse(self.ovs.bridge_exists('bridge1'))
|
|
||||||
|
|
||||||
def test_bridge_exists_raises_unknown_exception(self):
|
|
||||||
with mock.patch.object(self.ovs, 'run_vsctl',
|
|
||||||
side_effect=RuntimeError()):
|
|
||||||
with testtools.ExpectedException(RuntimeError):
|
|
||||||
self.ovs.bridge_exists('bridge1')
|
|
||||||
|
|
||||||
def test_get_bridge_name_for_port_name_returns_bridge_for_valid_port(self):
|
|
||||||
port_name = 'bar'
|
|
||||||
with mock.patch.object(self.ovs, 'run_vsctl',
|
|
||||||
return_value=self.br_name) as mock_vsctl:
|
|
||||||
bridge = self.ovs.get_bridge_name_for_port_name(port_name)
|
|
||||||
self.assertEqual(bridge, self.br_name)
|
|
||||||
mock_vsctl.assert_called_with(['port-to-br', port_name],
|
|
||||||
check_error=True)
|
|
||||||
|
|
||||||
def test_get_bridge_name_for_port_name_returns_none_for_exit_code_1(self):
|
|
||||||
with mock.patch.object(self.ovs, 'run_vsctl',
|
|
||||||
side_effect=RuntimeError('Exit code: 1\n')):
|
|
||||||
self.assertFalse(self.ovs.get_bridge_name_for_port_name('bridge1'))
|
|
||||||
|
|
||||||
def test_get_bridge_name_for_port_name_raises_unknown_exception(self):
|
|
||||||
with mock.patch.object(self.ovs, 'run_vsctl',
|
|
||||||
side_effect=RuntimeError()):
|
|
||||||
with testtools.ExpectedException(RuntimeError):
|
|
||||||
self.ovs.get_bridge_name_for_port_name('bridge1')
|
|
||||||
|
|
||||||
def _test_port_exists(self, br_name, result):
|
|
||||||
with mock.patch.object(self.ovs,
|
|
||||||
'get_bridge_name_for_port_name',
|
|
||||||
return_value=br_name):
|
|
||||||
self.assertEqual(self.ovs.port_exists('bar'), result)
|
|
||||||
|
|
||||||
def test_port_exists_returns_true_for_bridge_name(self):
|
|
||||||
self._test_port_exists(self.br_name, True)
|
|
||||||
|
|
||||||
def test_port_exists_returns_false_for_none(self):
|
|
||||||
self._test_port_exists(None, False)
|
|
||||||
|
|
||||||
|
|
||||||
class OVS_Lib_Test(base.BaseTestCase):
|
|
||||||
"""A test suite to exercise the OVS libraries shared by Neutron agents.
|
|
||||||
|
|
||||||
Note: these tests do not actually execute ovs-* utilities, and thus
|
|
||||||
can run on any system. That does, however, limit their scope.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(OVS_Lib_Test, self).setUp()
|
|
||||||
self.BR_NAME = "br-int"
|
|
||||||
self.TO = "--timeout=10"
|
|
||||||
|
|
||||||
self.root_helper = 'sudo'
|
|
||||||
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
|
|
||||||
self.execute = mock.patch.object(
|
|
||||||
utils, "execute", spec=utils.execute).start()
|
|
||||||
|
|
||||||
def test_vifport(self):
|
|
||||||
"""Create and stringify vif port, confirm no exceptions."""
|
|
||||||
|
|
||||||
pname = "vif1.0"
|
|
||||||
ofport = 5
|
|
||||||
vif_id = uuidutils.generate_uuid()
|
|
||||||
mac = "ca:fe:de:ad:be:ef"
|
|
||||||
|
|
||||||
# test __init__
|
|
||||||
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
|
|
||||||
self.assertEqual(port.port_name, pname)
|
|
||||||
self.assertEqual(port.ofport, ofport)
|
|
||||||
self.assertEqual(port.vif_id, vif_id)
|
|
||||||
self.assertEqual(port.vif_mac, mac)
|
|
||||||
self.assertEqual(port.switch.br_name, self.BR_NAME)
|
|
||||||
|
|
||||||
# test __str__
|
|
||||||
str(port)
|
|
||||||
|
|
||||||
def test_set_controller(self):
|
|
||||||
controller_names = ['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555']
|
|
||||||
self.br.set_controller(controller_names)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
['ovs-vsctl', self.TO, '--', 'set-controller', self.BR_NAME,
|
|
||||||
'tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_del_controller(self):
|
|
||||||
self.br.del_controller()
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
['ovs-vsctl', self.TO, '--', 'del-controller', self.BR_NAME],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_get_controller(self):
|
|
||||||
self.execute.return_value = 'tcp:127.0.0.1:6633\ntcp:172.17.16.10:5555'
|
|
||||||
names = self.br.get_controller()
|
|
||||||
self.assertEqual(names,
|
|
||||||
['tcp:127.0.0.1:6633', 'tcp:172.17.16.10:5555'])
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
['ovs-vsctl', self.TO, '--', 'get-controller', self.BR_NAME],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_set_secure_mode(self):
|
|
||||||
self.br.set_secure_mode()
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
['ovs-vsctl', self.TO, '--', 'set-fail-mode', self.BR_NAME,
|
|
||||||
'secure'], root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_set_protocols(self):
|
|
||||||
protocols = 'OpenFlow13'
|
|
||||||
self.br.set_protocols(protocols)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
['ovs-vsctl', self.TO, '--', 'set', 'bridge', self.BR_NAME,
|
|
||||||
"protocols=%s" % protocols],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_create(self):
|
|
||||||
self.br.add_bridge(self.BR_NAME)
|
|
||||||
|
|
||||||
self.br.create()
|
|
||||||
|
|
||||||
def test_destroy(self):
|
|
||||||
self.br.delete_bridge(self.BR_NAME)
|
|
||||||
|
|
||||||
self.br.destroy()
|
|
||||||
|
|
||||||
def test_reset_bridge(self):
|
|
||||||
self.br.destroy()
|
|
||||||
self.br.create()
|
|
||||||
|
|
||||||
self.br.reset_bridge()
|
|
||||||
|
|
||||||
def _build_timeout_opt(self, exp_timeout):
|
|
||||||
return "--timeout=%d" % exp_timeout if exp_timeout else self.TO
|
|
||||||
|
|
||||||
def _test_delete_port(self, exp_timeout=None):
|
|
||||||
exp_timeout_str = self._build_timeout_opt(exp_timeout)
|
|
||||||
pname = "tap5"
|
|
||||||
self.br.delete_port(pname)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-vsctl", exp_timeout_str, "--", "--if-exists",
|
|
||||||
"del-port", self.BR_NAME, pname],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_delete_port(self):
|
|
||||||
self._test_delete_port()
|
|
||||||
|
|
||||||
def test_call_command_non_default_timeput(self):
|
|
||||||
# This test is only for verifying a non-default timeout
|
|
||||||
# is correctly applied. Does not need to be repeated for
|
|
||||||
# every ovs_lib method
|
|
||||||
new_timeout = 5
|
|
||||||
self.br.vsctl_timeout = new_timeout
|
|
||||||
self._test_delete_port(new_timeout)
|
|
||||||
|
|
||||||
def test_add_flow(self):
|
|
||||||
ofport = "99"
|
|
||||||
vid = 4000
|
|
||||||
lsw_id = 18
|
|
||||||
cidr = '192.168.1.0/24'
|
|
||||||
|
|
||||||
flow_dict_1 = OrderedDict([('priority', 2),
|
|
||||||
('dl_src', 'ca:fe:de:ad:be:ef'),
|
|
||||||
('actions', 'strip_vlan,output:0')])
|
|
||||||
flow_dict_2 = OrderedDict([('priority', 1),
|
|
||||||
('actions', 'normal')])
|
|
||||||
flow_dict_3 = OrderedDict([('priority', 2),
|
|
||||||
('actions', 'drop')])
|
|
||||||
flow_dict_4 = OrderedDict([('priority', 2),
|
|
||||||
('in_port', ofport),
|
|
||||||
('actions', 'drop')])
|
|
||||||
flow_dict_5 = OrderedDict([
|
|
||||||
('priority', 4),
|
|
||||||
('in_port', ofport),
|
|
||||||
('dl_vlan', vid),
|
|
||||||
('actions', "strip_vlan,set_tunnel:%s,normal" % (lsw_id))])
|
|
||||||
flow_dict_6 = OrderedDict([
|
|
||||||
('priority', 3),
|
|
||||||
('tun_id', lsw_id),
|
|
||||||
('actions', "mod_vlan_vid:%s,output:%s" % (vid, ofport))])
|
|
||||||
flow_dict_7 = OrderedDict([
|
|
||||||
('priority', 4),
|
|
||||||
('nw_src', cidr),
|
|
||||||
('proto', 'arp'),
|
|
||||||
('actions', 'drop')])
|
|
||||||
|
|
||||||
self.br.add_flow(**flow_dict_1)
|
|
||||||
self.br.add_flow(**flow_dict_2)
|
|
||||||
self.br.add_flow(**flow_dict_3)
|
|
||||||
self.br.add_flow(**flow_dict_4)
|
|
||||||
self.br.add_flow(**flow_dict_5)
|
|
||||||
self.br.add_flow(**flow_dict_6)
|
|
||||||
self.br.add_flow(**flow_dict_7)
|
|
||||||
expected_calls = [
|
|
||||||
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=0,idle_timeout=0,"
|
|
||||||
"priority=2,dl_src=ca:fe:de:ad:be:ef"
|
|
||||||
",actions=strip_vlan,output:0"],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=0,idle_timeout=0,"
|
|
||||||
"priority=1,actions=normal"],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=0,idle_timeout=0,"
|
|
||||||
"priority=2,actions=drop"],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=0,idle_timeout=0,"
|
|
||||||
"priority=2,in_port=%s,actions=drop" % ofport],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=0,idle_timeout=0,"
|
|
||||||
"priority=4,dl_vlan=%s,in_port=%s,"
|
|
||||||
"actions=strip_vlan,set_tunnel:%s,normal"
|
|
||||||
% (vid, ofport, lsw_id)],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=0,idle_timeout=0,"
|
|
||||||
"priority=3,tun_id=%s,actions="
|
|
||||||
"mod_vlan_vid:%s,output:%s"
|
|
||||||
% (lsw_id, vid, ofport)],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
mock.call(["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=0,idle_timeout=0,"
|
|
||||||
"priority=4,nw_src=%s,arp,actions=drop" % cidr],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
]
|
|
||||||
self.execute.assert_has_calls(expected_calls)
|
|
||||||
|
|
||||||
def test_add_flow_timeout_set(self):
|
|
||||||
flow_dict = OrderedDict([('priority', 1),
|
|
||||||
('hard_timeout', 1000),
|
|
||||||
('idle_timeout', 2000),
|
|
||||||
('actions', 'normal')])
|
|
||||||
|
|
||||||
self.br.add_flow(**flow_dict)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=1000,idle_timeout=2000,priority=1,actions=normal"],
|
|
||||||
process_input=None,
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_add_flow_default_priority(self):
|
|
||||||
flow_dict = OrderedDict([('actions', 'normal')])
|
|
||||||
|
|
||||||
self.br.add_flow(**flow_dict)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-ofctl", "add-flow", self.BR_NAME,
|
|
||||||
"hard_timeout=0,idle_timeout=0,priority=1,actions=normal"],
|
|
||||||
process_input=None,
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_get_port_ofport(self):
|
|
||||||
pname = "tap99"
|
|
||||||
ofport = "6"
|
|
||||||
self.execute.return_value = ofport
|
|
||||||
self.assertEqual(self.br.get_port_ofport(pname), ofport)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_get_port_ofport_non_int(self):
|
|
||||||
pname = "tap99"
|
|
||||||
ofport = "[]"
|
|
||||||
self.execute.return_value = ofport
|
|
||||||
self.assertEqual(self.br.get_port_ofport(pname), const.INVALID_OFPORT)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-vsctl", self.TO, "get", "Interface", pname, "ofport"],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_get_datapath_id(self):
|
|
||||||
datapath_id = '"0000b67f4fbcc149"'
|
|
||||||
self.execute.return_value = datapath_id
|
|
||||||
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-vsctl", self.TO, "get",
|
|
||||||
"Bridge", self.BR_NAME, "datapath_id"],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_count_flows(self):
|
|
||||||
self.execute.return_value = 'ignore\nflow-1\n'
|
|
||||||
# counts the number of flows as total lines of output - 2
|
|
||||||
self.assertEqual(self.br.count_flows(), 1)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-ofctl", "dump-flows", self.BR_NAME],
|
|
||||||
root_helper=self.root_helper,
|
|
||||||
process_input=None)
|
|
||||||
|
|
||||||
def test_delete_flow(self):
|
|
||||||
ofport = "5"
|
|
||||||
lsw_id = 40
|
|
||||||
vid = 39
|
|
||||||
self.br.delete_flows(in_port=ofport)
|
|
||||||
self.br.delete_flows(tun_id=lsw_id)
|
|
||||||
self.br.delete_flows(dl_vlan=vid)
|
|
||||||
expected_calls = [
|
|
||||||
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
|
|
||||||
"in_port=" + ofport],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
|
|
||||||
"tun_id=%s" % lsw_id],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
mock.call(["ovs-ofctl", "del-flows", self.BR_NAME,
|
|
||||||
"dl_vlan=%s" % vid],
|
|
||||||
process_input=None, root_helper=self.root_helper),
|
|
||||||
]
|
|
||||||
self.execute.assert_has_calls(expected_calls)
|
|
||||||
|
|
||||||
def test_delete_flow_with_priority_set(self):
|
|
||||||
params = {'in_port': '1',
|
|
||||||
'priority': '1'}
|
|
||||||
|
|
||||||
self.assertRaises(exceptions.InvalidInput,
|
|
||||||
self.br.delete_flows,
|
|
||||||
**params)
|
|
||||||
|
|
||||||
def test_dump_flows(self):
|
|
||||||
table = 23
|
|
||||||
nxst_flow = "NXST_FLOW reply (xid=0x4):"
|
|
||||||
flows = "\n".join([" cookie=0x0, duration=18042.514s, table=0, "
|
|
||||||
"n_packets=6, n_bytes=468, "
|
|
||||||
"priority=2,in_port=1 actions=drop",
|
|
||||||
" cookie=0x0, duration=18027.562s, table=0, "
|
|
||||||
"n_packets=0, n_bytes=0, "
|
|
||||||
"priority=3,in_port=1,dl_vlan=100 "
|
|
||||||
"actions=mod_vlan_vid:1,NORMAL",
|
|
||||||
" cookie=0x0, duration=18044.351s, table=0, "
|
|
||||||
"n_packets=9, n_bytes=594, priority=1 "
|
|
||||||
"actions=NORMAL", " cookie=0x0, "
|
|
||||||
"duration=18044.211s, table=23, n_packets=0, "
|
|
||||||
"n_bytes=0, priority=0 actions=drop"])
|
|
||||||
flow_args = '\n'.join([nxst_flow, flows])
|
|
||||||
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
|
|
||||||
run_ofctl.side_effect = [flow_args]
|
|
||||||
retflows = self.br.dump_flows_for_table(table)
|
|
||||||
self.assertEqual(flows, retflows)
|
|
||||||
|
|
||||||
def test_dump_flows_ovs_dead(self):
|
|
||||||
table = 23
|
|
||||||
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
|
|
||||||
run_ofctl.side_effect = ['']
|
|
||||||
retflows = self.br.dump_flows_for_table(table)
|
|
||||||
self.assertEqual(None, retflows)
|
|
||||||
|
|
||||||
def test_mod_flow_with_priority_set(self):
|
|
||||||
params = {'in_port': '1',
|
|
||||||
'priority': '1'}
|
|
||||||
|
|
||||||
self.assertRaises(exceptions.InvalidInput,
|
|
||||||
self.br.mod_flow,
|
|
||||||
**params)
|
|
||||||
|
|
||||||
def test_mod_flow_no_actions_set(self):
|
|
||||||
params = {'in_port': '1'}
|
|
||||||
|
|
||||||
self.assertRaises(exceptions.InvalidInput,
|
|
||||||
self.br.mod_flow,
|
|
||||||
**params)
|
|
||||||
|
|
||||||
def test_defer_apply_flows(self):
|
|
||||||
|
|
||||||
flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start()
|
|
||||||
flow_expr.side_effect = ['added_flow_1', 'added_flow_2',
|
|
||||||
'deleted_flow_1']
|
|
||||||
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
|
|
||||||
|
|
||||||
self.br.defer_apply_on()
|
|
||||||
self.br.add_flow(flow='add_flow_1')
|
|
||||||
self.br.defer_apply_on()
|
|
||||||
self.br.add_flow(flow='add_flow_2')
|
|
||||||
self.br.delete_flows(flow='delete_flow_1')
|
|
||||||
self.br.defer_apply_off()
|
|
||||||
|
|
||||||
flow_expr.assert_has_calls([
|
|
||||||
mock.call({'flow': 'add_flow_1'}, 'add'),
|
|
||||||
mock.call({'flow': 'add_flow_2'}, 'add'),
|
|
||||||
mock.call({'flow': 'delete_flow_1'}, 'del')
|
|
||||||
])
|
|
||||||
|
|
||||||
run_ofctl.assert_has_calls([
|
|
||||||
mock.call('add-flows', ['-'], 'added_flow_1\nadded_flow_2\n'),
|
|
||||||
mock.call('del-flows', ['-'], 'deleted_flow_1\n')
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_defer_apply_flows_concurrently(self):
|
|
||||||
flow_expr = mock.patch.object(ovs_lib, '_build_flow_expr_str').start()
|
|
||||||
flow_expr.side_effect = ['added_flow_1', 'deleted_flow_1',
|
|
||||||
'modified_flow_1', 'added_flow_2',
|
|
||||||
'deleted_flow_2', 'modified_flow_2']
|
|
||||||
|
|
||||||
run_ofctl = mock.patch.object(self.br, 'run_ofctl').start()
|
|
||||||
|
|
||||||
def run_ofctl_fake(cmd, args, process_input=None):
|
|
||||||
self.br.defer_apply_on()
|
|
||||||
if cmd == 'add-flows':
|
|
||||||
self.br.add_flow(flow='added_flow_2')
|
|
||||||
elif cmd == 'del-flows':
|
|
||||||
self.br.delete_flows(flow='deleted_flow_2')
|
|
||||||
elif cmd == 'mod-flows':
|
|
||||||
self.br.mod_flow(flow='modified_flow_2')
|
|
||||||
run_ofctl.side_effect = run_ofctl_fake
|
|
||||||
|
|
||||||
self.br.defer_apply_on()
|
|
||||||
self.br.add_flow(flow='added_flow_1')
|
|
||||||
self.br.delete_flows(flow='deleted_flow_1')
|
|
||||||
self.br.mod_flow(flow='modified_flow_1')
|
|
||||||
self.br.defer_apply_off()
|
|
||||||
|
|
||||||
run_ofctl.side_effect = None
|
|
||||||
self.br.defer_apply_off()
|
|
||||||
|
|
||||||
flow_expr.assert_has_calls([
|
|
||||||
mock.call({'flow': 'added_flow_1'}, 'add'),
|
|
||||||
mock.call({'flow': 'deleted_flow_1'}, 'del'),
|
|
||||||
mock.call({'flow': 'modified_flow_1'}, 'mod'),
|
|
||||||
mock.call({'flow': 'added_flow_2'}, 'add'),
|
|
||||||
mock.call({'flow': 'deleted_flow_2'}, 'del'),
|
|
||||||
mock.call({'flow': 'modified_flow_2'}, 'mod')
|
|
||||||
])
|
|
||||||
run_ofctl.assert_has_calls([
|
|
||||||
mock.call('add-flows', ['-'], 'added_flow_1\n'),
|
|
||||||
mock.call('del-flows', ['-'], 'deleted_flow_1\n'),
|
|
||||||
mock.call('mod-flows', ['-'], 'modified_flow_1\n'),
|
|
||||||
mock.call('add-flows', ['-'], 'added_flow_2\n'),
|
|
||||||
mock.call('del-flows', ['-'], 'deleted_flow_2\n'),
|
|
||||||
mock.call('mod-flows', ['-'], 'modified_flow_2\n')
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_add_tunnel_port(self):
|
|
||||||
pname = "tap99"
|
|
||||||
local_ip = "1.1.1.1"
|
|
||||||
remote_ip = "9.9.9.9"
|
|
||||||
ofport = "6"
|
|
||||||
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
|
|
||||||
self.BR_NAME, pname]
|
|
||||||
command.extend(["--", "set", "Interface", pname])
|
|
||||||
command.extend(["type=gre", "options:df_default=true",
|
|
||||||
"options:remote_ip=" + remote_ip,
|
|
||||||
"options:local_ip=" + local_ip,
|
|
||||||
"options:in_key=flow",
|
|
||||||
"options:out_key=flow"])
|
|
||||||
# Each element is a tuple of (expected mock call, return_value)
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(command, root_helper=self.root_helper), None),
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "get",
|
|
||||||
"Interface", pname, "ofport"],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
ofport),
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
self.br.add_tunnel_port(pname, remote_ip, local_ip),
|
|
||||||
ofport)
|
|
||||||
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
def test_add_vxlan_fragmented_tunnel_port(self):
|
|
||||||
pname = "tap99"
|
|
||||||
local_ip = "1.1.1.1"
|
|
||||||
remote_ip = "9.9.9.9"
|
|
||||||
ofport = "6"
|
|
||||||
vxlan_udp_port = "9999"
|
|
||||||
dont_fragment = False
|
|
||||||
command = ["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
|
|
||||||
self.BR_NAME, pname]
|
|
||||||
command.extend(["--", "set", "Interface", pname])
|
|
||||||
command.extend(["type=" + p_const.TYPE_VXLAN,
|
|
||||||
"options:dst_port=" + vxlan_udp_port,
|
|
||||||
"options:df_default=false",
|
|
||||||
"options:remote_ip=" + remote_ip,
|
|
||||||
"options:local_ip=" + local_ip,
|
|
||||||
"options:in_key=flow",
|
|
||||||
"options:out_key=flow"])
|
|
||||||
# Each element is a tuple of (expected mock call, return_value)
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(command, root_helper=self.root_helper), None),
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "get",
|
|
||||||
"Interface", pname, "ofport"],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
ofport),
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
self.br.add_tunnel_port(pname, remote_ip, local_ip,
|
|
||||||
p_const.TYPE_VXLAN, vxlan_udp_port,
|
|
||||||
dont_fragment),
|
|
||||||
ofport)
|
|
||||||
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
def test_add_patch_port(self):
|
|
||||||
pname = "tap99"
|
|
||||||
peer = "bar10"
|
|
||||||
ofport = "6"
|
|
||||||
|
|
||||||
# Each element is a tuple of (expected mock call, return_value)
|
|
||||||
command = ["ovs-vsctl", self.TO, "add-port", self.BR_NAME, pname]
|
|
||||||
command.extend(["--", "set", "Interface", pname])
|
|
||||||
command.extend(["type=patch", "options:peer=" + peer])
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(command, root_helper=self.root_helper),
|
|
||||||
None),
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "get",
|
|
||||||
"Interface", pname, "ofport"],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
ofport)
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
def _test_get_vif_ports(self, is_xen=False):
|
|
||||||
pname = "tap99"
|
|
||||||
ofport = "6"
|
|
||||||
vif_id = uuidutils.generate_uuid()
|
|
||||||
mac = "ca:fe:de:ad:be:ef"
|
|
||||||
|
|
||||||
if is_xen:
|
|
||||||
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
|
|
||||||
% (vif_id, mac))
|
|
||||||
else:
|
|
||||||
external_ids = ('{iface-id="%s", attached-mac="%s"}'
|
|
||||||
% (vif_id, mac))
|
|
||||||
|
|
||||||
# Each element is a tuple of (expected mock call, return_value)
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
"%s\n" % pname),
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "get",
|
|
||||||
"Interface", pname, "external_ids"],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
external_ids),
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "get",
|
|
||||||
"Interface", pname, "ofport"],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
ofport),
|
|
||||||
]
|
|
||||||
if is_xen:
|
|
||||||
expected_calls_and_values.append(
|
|
||||||
(mock.call(["xe", "vif-param-get", "param-name=other-config",
|
|
||||||
"param-key=nicira-iface-id", "uuid=" + vif_id],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
vif_id)
|
|
||||||
)
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
ports = self.br.get_vif_ports()
|
|
||||||
self.assertEqual(1, len(ports))
|
|
||||||
self.assertEqual(ports[0].port_name, pname)
|
|
||||||
self.assertEqual(ports[0].ofport, ofport)
|
|
||||||
self.assertEqual(ports[0].vif_id, vif_id)
|
|
||||||
self.assertEqual(ports[0].vif_mac, mac)
|
|
||||||
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
def _encode_ovs_json(self, headings, data):
|
|
||||||
# See man ovs-vsctl(8) for the encoding details.
|
|
||||||
r = {"data": [],
|
|
||||||
"headings": headings}
|
|
||||||
for row in data:
|
|
||||||
ovs_row = []
|
|
||||||
r["data"].append(ovs_row)
|
|
||||||
for cell in row:
|
|
||||||
if isinstance(cell, (str, int, list)):
|
|
||||||
ovs_row.append(cell)
|
|
||||||
elif isinstance(cell, dict):
|
|
||||||
ovs_row.append(["map", cell.items()])
|
|
||||||
elif isinstance(cell, set):
|
|
||||||
ovs_row.append(["set", cell])
|
|
||||||
else:
|
|
||||||
raise TypeError('%r not int, str, list, set or dict' %
|
|
||||||
type(cell))
|
|
||||||
return jsonutils.dumps(r)
|
|
||||||
|
|
||||||
def _test_get_vif_port_set(self, is_xen):
|
|
||||||
if is_xen:
|
|
||||||
id_key = 'xs-vif-uuid'
|
|
||||||
else:
|
|
||||||
id_key = 'iface-id'
|
|
||||||
|
|
||||||
headings = ['name', 'external_ids']
|
|
||||||
data = [
|
|
||||||
# A vif port on this bridge:
|
|
||||||
['tap99', {id_key: 'tap99id', 'attached-mac': 'tap99mac'}, 1],
|
|
||||||
# A vif port on this bridge not yet configured
|
|
||||||
['tap98', {id_key: 'tap98id', 'attached-mac': 'tap98mac'}, []],
|
|
||||||
# Another vif port on this bridge not yet configured
|
|
||||||
['tap97', {id_key: 'tap97id', 'attached-mac': 'tap97mac'},
|
|
||||||
['set', []]],
|
|
||||||
|
|
||||||
# A vif port on another bridge:
|
|
||||||
['tap88', {id_key: 'tap88id', 'attached-mac': 'tap88id'}, 1],
|
|
||||||
# Non-vif port on this bridge:
|
|
||||||
['tun22', {}, 2],
|
|
||||||
]
|
|
||||||
|
|
||||||
# Each element is a tuple of (expected mock call, return_value)
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
'tap99\ntun22'),
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "--format=json",
|
|
||||||
"--", "--columns=name,external_ids,ofport",
|
|
||||||
"list", "Interface"],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
self._encode_ovs_json(headings, data)),
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
if is_xen:
|
|
||||||
get_xapi_iface_id = mock.patch.object(self.br,
|
|
||||||
'get_xapi_iface_id').start()
|
|
||||||
get_xapi_iface_id.return_value = 'tap99id'
|
|
||||||
|
|
||||||
port_set = self.br.get_vif_port_set()
|
|
||||||
self.assertEqual(set(['tap99id']), port_set)
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
if is_xen:
|
|
||||||
get_xapi_iface_id.assert_called_once_with('tap99id')
|
|
||||||
|
|
||||||
def test_get_vif_ports_nonxen(self):
|
|
||||||
self._test_get_vif_ports(is_xen=False)
|
|
||||||
|
|
||||||
def test_get_vif_ports_xen(self):
|
|
||||||
self._test_get_vif_ports(is_xen=True)
|
|
||||||
|
|
||||||
def test_get_vif_port_set_nonxen(self):
|
|
||||||
self._test_get_vif_port_set(False)
|
|
||||||
|
|
||||||
def test_get_vif_port_set_xen(self):
|
|
||||||
self._test_get_vif_port_set(True)
|
|
||||||
|
|
||||||
def test_get_vif_ports_list_ports_error(self):
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
RuntimeError()),
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
self.assertRaises(RuntimeError, self.br.get_vif_ports)
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
def test_get_vif_port_set_list_ports_error(self):
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
RuntimeError()),
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
def test_get_vif_port_set_list_interface_error(self):
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
'tap99\n'),
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "--format=json",
|
|
||||||
"--", "--columns=name,external_ids,ofport",
|
|
||||||
"list", "Interface"],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
RuntimeError()),
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
self.assertRaises(RuntimeError, self.br.get_vif_port_set)
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
def test_get_port_tag_dict(self):
|
|
||||||
headings = ['name', 'tag']
|
|
||||||
data = [
|
|
||||||
['int-br-eth2', set()],
|
|
||||||
['patch-tun', set()],
|
|
||||||
['qr-76d9e6b6-21', 1],
|
|
||||||
['tapce5318ff-78', 1],
|
|
||||||
['tape1400310-e6', 1],
|
|
||||||
]
|
|
||||||
|
|
||||||
# Each element is a tuple of (expected mock call, return_value)
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
'\n'.join((iface for iface, tag in data))),
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "--format=json",
|
|
||||||
"--", "--columns=name,tag",
|
|
||||||
"list", "Port"],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
self._encode_ovs_json(headings, data)),
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
port_tags = self.br.get_port_tag_dict()
|
|
||||||
self.assertEqual(
|
|
||||||
port_tags,
|
|
||||||
{u'int-br-eth2': [],
|
|
||||||
u'patch-tun': [],
|
|
||||||
u'qr-76d9e6b6-21': 1,
|
|
||||||
u'tapce5318ff-78': 1,
|
|
||||||
u'tape1400310-e6': 1}
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_clear_db_attribute(self):
|
|
||||||
pname = "tap77"
|
|
||||||
self.br.clear_db_attribute("Port", pname, "tag")
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-vsctl", self.TO, "clear", "Port", pname, "tag"],
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def _test_iface_to_br(self, exp_timeout=None):
|
|
||||||
iface = 'tap0'
|
|
||||||
br = 'br-int'
|
|
||||||
root_helper = 'sudo'
|
|
||||||
self.execute.return_value = 'br-int'
|
|
||||||
exp_timeout_str = self._build_timeout_opt(exp_timeout)
|
|
||||||
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-vsctl", exp_timeout_str, "iface-to-br", iface],
|
|
||||||
root_helper=root_helper)
|
|
||||||
|
|
||||||
def test_iface_to_br(self):
|
|
||||||
self._test_iface_to_br()
|
|
||||||
|
|
||||||
def test_iface_to_br_non_default_timeout(self):
|
|
||||||
new_timeout = 5
|
|
||||||
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
|
|
||||||
self._test_iface_to_br(new_timeout)
|
|
||||||
|
|
||||||
def test_iface_to_br_handles_ovs_vsctl_exception(self):
|
|
||||||
iface = 'tap0'
|
|
||||||
root_helper = 'sudo'
|
|
||||||
self.execute.side_effect = Exception
|
|
||||||
|
|
||||||
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-vsctl", self.TO, "iface-to-br", iface],
|
|
||||||
root_helper=root_helper)
|
|
||||||
|
|
||||||
def test_delete_all_ports(self):
|
|
||||||
with mock.patch.object(self.br, 'get_port_name_list',
|
|
||||||
return_value=['port1']) as get_port:
|
|
||||||
with mock.patch.object(self.br, 'delete_port') as delete_port:
|
|
||||||
self.br.delete_ports(all_ports=True)
|
|
||||||
get_port.assert_called_once_with()
|
|
||||||
delete_port.assert_called_once_with('port1')
|
|
||||||
|
|
||||||
def test_delete_neutron_ports(self):
|
|
||||||
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
|
|
||||||
'ca:fe:de:ad:be:ef', 'br')
|
|
||||||
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
|
|
||||||
'ca:ee:de:ad:be:ef', 'br')
|
|
||||||
with mock.patch.object(self.br, 'get_vif_ports',
|
|
||||||
return_value=[port1, port2]) as get_ports:
|
|
||||||
with mock.patch.object(self.br, 'delete_port') as delete_port:
|
|
||||||
self.br.delete_ports(all_ports=False)
|
|
||||||
get_ports.assert_called_once_with()
|
|
||||||
delete_port.assert_has_calls([
|
|
||||||
mock.call('tap1234'),
|
|
||||||
mock.call('tap5678')
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_delete_neutron_ports_list_error(self):
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
RuntimeError()),
|
|
||||||
]
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
self.assertRaises(RuntimeError, self.br.delete_ports, all_ports=False)
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
|
|
||||||
def _test_get_bridges(self, exp_timeout=None):
|
|
||||||
bridges = ['br-int', 'br-ex']
|
|
||||||
root_helper = 'sudo'
|
|
||||||
self.execute.return_value = 'br-int\nbr-ex\n'
|
|
||||||
timeout_str = self._build_timeout_opt(exp_timeout)
|
|
||||||
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
|
|
||||||
self.execute.assert_called_once_with(
|
|
||||||
["ovs-vsctl", timeout_str, "list-br"],
|
|
||||||
root_helper=root_helper)
|
|
||||||
|
|
||||||
def test_get_bridges(self):
|
|
||||||
self._test_get_bridges()
|
|
||||||
|
|
||||||
def test_get_bridges_not_default_timeout(self):
|
|
||||||
new_timeout = 5
|
|
||||||
cfg.CONF.set_override('ovs_vsctl_timeout', new_timeout)
|
|
||||||
self._test_get_bridges(new_timeout)
|
|
||||||
|
|
||||||
def test_get_local_port_mac_succeeds(self):
|
|
||||||
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
|
|
||||||
return_value=mock.Mock(address='foo')):
|
|
||||||
self.assertEqual('foo', self.br.get_local_port_mac())
|
|
||||||
|
|
||||||
def test_get_local_port_mac_raises_exception_for_missing_mac(self):
|
|
||||||
with mock.patch('neutron.agent.linux.ip_lib.IpLinkCommand',
|
|
||||||
return_value=mock.Mock(address=None)):
|
|
||||||
with testtools.ExpectedException(Exception):
|
|
||||||
self.br.get_local_port_mac()
|
|
||||||
|
|
||||||
def _test_get_vif_port_by_id(self, iface_id, data, br_name=None):
|
|
||||||
headings = ['external_ids', 'name', 'ofport']
|
|
||||||
# Each element is a tuple of (expected mock call, return_value)
|
|
||||||
expected_calls_and_values = [
|
|
||||||
(mock.call(["ovs-vsctl", self.TO, "--format=json",
|
|
||||||
"--", "--columns=external_ids,name,ofport",
|
|
||||||
"find", "Interface",
|
|
||||||
'external_ids:iface-id="%s"' % iface_id],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
self._encode_ovs_json(headings, data))]
|
|
||||||
if data:
|
|
||||||
if not br_name:
|
|
||||||
br_name = self.BR_NAME
|
|
||||||
|
|
||||||
expected_calls_and_values.append(
|
|
||||||
(mock.call(["ovs-vsctl", self.TO,
|
|
||||||
"iface-to-br", data[0][headings.index('name')]],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
br_name))
|
|
||||||
tools.setup_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
vif_port = self.br.get_vif_port_by_id(iface_id)
|
|
||||||
|
|
||||||
tools.verify_mock_calls(self.execute, expected_calls_and_values)
|
|
||||||
return vif_port
|
|
||||||
|
|
||||||
def _test_get_vif_port_by_id_with_data(self, ofport=None, mac=None):
|
|
||||||
external_ids = [["iface-id", "tap99id"],
|
|
||||||
["iface-status", "active"]]
|
|
||||||
if mac:
|
|
||||||
external_ids.append(["attached-mac", mac])
|
|
||||||
data = [[["map", external_ids], "tap99",
|
|
||||||
ofport if ofport else '["set",[]]']]
|
|
||||||
vif_port = self._test_get_vif_port_by_id('tap99id', data)
|
|
||||||
if not ofport or ofport == -1 or not mac:
|
|
||||||
self.assertIsNone(vif_port)
|
|
||||||
return
|
|
||||||
self.assertEqual(vif_port.vif_id, 'tap99id')
|
|
||||||
self.assertEqual(vif_port.vif_mac, 'aa:bb:cc:dd:ee:ff')
|
|
||||||
self.assertEqual(vif_port.port_name, 'tap99')
|
|
||||||
self.assertEqual(vif_port.ofport, ofport)
|
|
||||||
|
|
||||||
def test_get_vif_by_port_id_with_ofport(self):
|
|
||||||
self._test_get_vif_port_by_id_with_data(
|
|
||||||
ofport=1, mac="aa:bb:cc:dd:ee:ff")
|
|
||||||
|
|
||||||
def test_get_vif_by_port_id_without_ofport(self):
|
|
||||||
self._test_get_vif_port_by_id_with_data(mac="aa:bb:cc:dd:ee:ff")
|
|
||||||
|
|
||||||
def test_get_vif_by_port_id_with_invalid_ofport(self):
|
|
||||||
self._test_get_vif_port_by_id_with_data(
|
|
||||||
ofport=-1, mac="aa:bb:cc:dd:ee:ff")
|
|
||||||
|
|
||||||
def test_get_vif_by_port_id_without_mac(self):
|
|
||||||
self._test_get_vif_port_by_id_with_data(ofport=1)
|
|
||||||
|
|
||||||
def test_get_vif_by_port_id_with_no_data(self):
|
|
||||||
self.assertIsNone(self._test_get_vif_port_by_id('whatever', []))
|
|
||||||
|
|
||||||
def test_get_vif_by_port_id_different_bridge(self):
|
|
||||||
external_ids = [["iface-id", "tap99id"],
|
|
||||||
["iface-status", "active"]]
|
|
||||||
data = [[["map", external_ids], "tap99", 1]]
|
|
||||||
self.assertIsNone(self._test_get_vif_port_by_id('tap99id', data,
|
|
||||||
"br-ext"))
|
|
||||||
|
|
||||||
def test_ofctl_arg_supported(self):
|
|
||||||
with mock.patch('neutron.common.utils.get_random_string') as utils:
|
|
||||||
utils.return_value = 'test'
|
|
||||||
supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'cmd',
|
|
||||||
['args'])
|
|
||||||
self.execute.assert_has_calls([
|
|
||||||
mock.call(['ovs-vsctl', self.TO, '--', '--if-exists', 'del-br',
|
|
||||||
'br-test-test'], root_helper=self.root_helper),
|
|
||||||
mock.call(['ovs-vsctl', self.TO, '--', '--may-exist', 'add-br',
|
|
||||||
'br-test-test'], root_helper=self.root_helper),
|
|
||||||
mock.call(['ovs-ofctl', 'cmd', 'br-test-test', 'args'],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
mock.call(['ovs-vsctl', self.TO, '--', '--if-exists', 'del-br',
|
|
||||||
'br-test-test'], root_helper=self.root_helper)
|
|
||||||
])
|
|
||||||
self.assertTrue(supported)
|
|
||||||
|
|
||||||
self.execute.side_effect = Exception
|
|
||||||
supported = ovs_lib.ofctl_arg_supported(self.root_helper, 'cmd',
|
|
||||||
['args'])
|
|
||||||
self.execute.assert_has_calls([
|
|
||||||
mock.call(['ovs-vsctl', self.TO, '--', '--if-exists', 'del-br',
|
|
||||||
'br-test-test'], root_helper=self.root_helper),
|
|
||||||
mock.call(['ovs-vsctl', self.TO, '--', '--may-exist', 'add-br',
|
|
||||||
'br-test-test'], root_helper=self.root_helper),
|
|
||||||
mock.call(['ovs-ofctl', 'cmd', 'br-test-test', 'args'],
|
|
||||||
root_helper=self.root_helper),
|
|
||||||
mock.call(['ovs-vsctl', self.TO, '--', '--if-exists', 'del-br',
|
|
||||||
'br-test-test'], root_helper=self.root_helper)
|
|
||||||
])
|
|
||||||
self.assertFalse(supported)
|
|
|
@ -1,105 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import eventlet.event
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.agent.linux import ovsdb_monitor
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestOvsdbMonitor(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestOvsdbMonitor, self).setUp()
|
|
||||||
self.root_helper = 'sudo'
|
|
||||||
self.monitor = ovsdb_monitor.OvsdbMonitor('Interface',
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def read_output_queues_and_returns_result(self, output_type, output):
|
|
||||||
with mock.patch.object(self.monitor, '_process') as mock_process:
|
|
||||||
with mock.patch.object(mock_process, output_type) as mock_file:
|
|
||||||
with mock.patch.object(mock_file, 'readline') as mock_readline:
|
|
||||||
mock_readline.return_value = output
|
|
||||||
func = getattr(self.monitor,
|
|
||||||
'_read_%s' % output_type,
|
|
||||||
None)
|
|
||||||
return func()
|
|
||||||
|
|
||||||
def test__read_stdout_returns_none_for_empty_read(self):
|
|
||||||
result = self.read_output_queues_and_returns_result('stdout', '')
|
|
||||||
self.assertIsNone(result)
|
|
||||||
|
|
||||||
def test__read_stdout_queues_normal_output_to_stdout_queue(self):
|
|
||||||
output = 'foo'
|
|
||||||
result = self.read_output_queues_and_returns_result('stdout', output)
|
|
||||||
self.assertEqual(result, output)
|
|
||||||
self.assertEqual(self.monitor._stdout_lines.get_nowait(), output)
|
|
||||||
|
|
||||||
def test__read_stderr_returns_none(self):
|
|
||||||
result = self.read_output_queues_and_returns_result('stderr', '')
|
|
||||||
self.assertIsNone(result)
|
|
||||||
|
|
||||||
|
|
||||||
class TestSimpleInterfaceMonitor(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestSimpleInterfaceMonitor, self).setUp()
|
|
||||||
self.root_helper = 'sudo'
|
|
||||||
self.monitor = ovsdb_monitor.SimpleInterfaceMonitor(
|
|
||||||
root_helper=self.root_helper)
|
|
||||||
|
|
||||||
def test_is_active_is_false_by_default(self):
|
|
||||||
self.assertFalse(self.monitor.is_active)
|
|
||||||
|
|
||||||
def test_is_active_can_be_true(self):
|
|
||||||
self.monitor.data_received = True
|
|
||||||
self.monitor._kill_event = eventlet.event.Event()
|
|
||||||
self.assertTrue(self.monitor.is_active)
|
|
||||||
|
|
||||||
def test_has_updates_is_true_by_default(self):
|
|
||||||
self.assertTrue(self.monitor.has_updates)
|
|
||||||
|
|
||||||
def test_has_updates_is_false_if_active_with_no_output(self):
|
|
||||||
target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor'
|
|
||||||
'.is_active')
|
|
||||||
with mock.patch(target,
|
|
||||||
new_callable=mock.PropertyMock(return_value=True)):
|
|
||||||
self.assertFalse(self.monitor.has_updates)
|
|
||||||
|
|
||||||
def test__kill_sets_data_received_to_false(self):
|
|
||||||
self.monitor.data_received = True
|
|
||||||
with mock.patch(
|
|
||||||
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._kill'):
|
|
||||||
self.monitor._kill()
|
|
||||||
self.assertFalse(self.monitor.data_received)
|
|
||||||
|
|
||||||
def test__read_stdout_sets_data_received_and_returns_output(self):
|
|
||||||
output = 'foo'
|
|
||||||
with mock.patch(
|
|
||||||
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._read_stdout',
|
|
||||||
return_value=output):
|
|
||||||
result = self.monitor._read_stdout()
|
|
||||||
self.assertTrue(self.monitor.data_received)
|
|
||||||
self.assertEqual(result, output)
|
|
||||||
|
|
||||||
def test__read_stdout_does_not_set_data_received_for_empty_ouput(self):
|
|
||||||
output = None
|
|
||||||
with mock.patch(
|
|
||||||
'neutron.agent.linux.ovsdb_monitor.OvsdbMonitor._read_stdout',
|
|
||||||
return_value=output):
|
|
||||||
self.monitor._read_stdout()
|
|
||||||
self.assertFalse(self.monitor.data_received)
|
|
|
@ -1,116 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.agent.linux import polling
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestGetPollingManager(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_return_always_poll_by_default(self):
|
|
||||||
with polling.get_polling_manager() as pm:
|
|
||||||
self.assertEqual(pm.__class__, polling.AlwaysPoll)
|
|
||||||
|
|
||||||
def test_manage_polling_minimizer(self):
|
|
||||||
mock_target = 'neutron.agent.linux.polling.InterfacePollingMinimizer'
|
|
||||||
with mock.patch('%s.start' % mock_target) as mock_start:
|
|
||||||
with mock.patch('%s.stop' % mock_target) as mock_stop:
|
|
||||||
with polling.get_polling_manager(minimize_polling=True,
|
|
||||||
root_helper='test') as pm:
|
|
||||||
self.assertEqual(pm._monitor.root_helper, 'test')
|
|
||||||
self.assertEqual(pm.__class__,
|
|
||||||
polling.InterfacePollingMinimizer)
|
|
||||||
mock_stop.assert_has_calls(mock.call())
|
|
||||||
mock_start.assert_has_calls(mock.call())
|
|
||||||
|
|
||||||
|
|
||||||
class TestBasePollingManager(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestBasePollingManager, self).setUp()
|
|
||||||
self.pm = polling.BasePollingManager()
|
|
||||||
|
|
||||||
def test_force_polling_sets_interval_attribute(self):
|
|
||||||
self.assertFalse(self.pm._force_polling)
|
|
||||||
self.pm.force_polling()
|
|
||||||
self.assertTrue(self.pm._force_polling)
|
|
||||||
|
|
||||||
def test_polling_completed_sets_interval_attribute(self):
|
|
||||||
self.pm._polling_completed = False
|
|
||||||
self.pm.polling_completed()
|
|
||||||
self.assertTrue(self.pm._polling_completed)
|
|
||||||
|
|
||||||
def mock_is_polling_required(self, return_value):
|
|
||||||
return mock.patch.object(self.pm, '_is_polling_required',
|
|
||||||
return_value=return_value)
|
|
||||||
|
|
||||||
def test_is_polling_required_returns_true_when_forced(self):
|
|
||||||
with self.mock_is_polling_required(False):
|
|
||||||
self.pm.force_polling()
|
|
||||||
self.assertTrue(self.pm.is_polling_required)
|
|
||||||
self.assertFalse(self.pm._force_polling)
|
|
||||||
|
|
||||||
def test_is_polling_required_returns_true_when_polling_not_completed(self):
|
|
||||||
with self.mock_is_polling_required(False):
|
|
||||||
self.pm._polling_completed = False
|
|
||||||
self.assertTrue(self.pm.is_polling_required)
|
|
||||||
|
|
||||||
def test_is_polling_required_returns_true_when_updates_are_present(self):
|
|
||||||
with self.mock_is_polling_required(True):
|
|
||||||
self.assertTrue(self.pm.is_polling_required)
|
|
||||||
self.assertFalse(self.pm._polling_completed)
|
|
||||||
|
|
||||||
def test_is_polling_required_returns_false_for_no_updates(self):
|
|
||||||
with self.mock_is_polling_required(False):
|
|
||||||
self.assertFalse(self.pm.is_polling_required)
|
|
||||||
|
|
||||||
|
|
||||||
class TestAlwaysPoll(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_is_polling_required_always_returns_true(self):
|
|
||||||
pm = polling.AlwaysPoll()
|
|
||||||
self.assertTrue(pm.is_polling_required)
|
|
||||||
|
|
||||||
|
|
||||||
class TestInterfacePollingMinimizer(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestInterfacePollingMinimizer, self).setUp()
|
|
||||||
self.pm = polling.InterfacePollingMinimizer()
|
|
||||||
|
|
||||||
def test_start_calls_monitor_start(self):
|
|
||||||
with mock.patch.object(self.pm._monitor, 'start') as mock_start:
|
|
||||||
self.pm.start()
|
|
||||||
mock_start.assert_called_with()
|
|
||||||
|
|
||||||
def test_stop_calls_monitor_stop(self):
|
|
||||||
with mock.patch.object(self.pm._monitor, 'stop') as mock_stop:
|
|
||||||
self.pm.stop()
|
|
||||||
mock_stop.assert_called_with()
|
|
||||||
|
|
||||||
def mock_has_updates(self, return_value):
|
|
||||||
target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor'
|
|
||||||
'.has_updates')
|
|
||||||
return mock.patch(
|
|
||||||
target,
|
|
||||||
new_callable=mock.PropertyMock(return_value=return_value),
|
|
||||||
)
|
|
||||||
|
|
||||||
def test__is_polling_required_returns_when_updates_are_present(self):
|
|
||||||
with self.mock_has_updates(True):
|
|
||||||
self.assertTrue(self.pm._is_polling_required())
|
|
|
@ -1,154 +0,0 @@
|
||||||
# Copyright (c) 2013 Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
|
||||||
from neutron.common import utils
|
|
||||||
from neutron.db import agents_db
|
|
||||||
from neutron.openstack.common import timeutils
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestDhcpAgentNotifyAPI(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestDhcpAgentNotifyAPI, self).setUp()
|
|
||||||
self.notifier = (
|
|
||||||
dhcp_rpc_agent_api.DhcpAgentNotifyAPI(plugin=mock.Mock()))
|
|
||||||
|
|
||||||
mock_util_p = mock.patch.object(utils, 'is_extension_supported')
|
|
||||||
mock_log_p = mock.patch.object(dhcp_rpc_agent_api, 'LOG')
|
|
||||||
mock_fanout_p = mock.patch.object(self.notifier, '_fanout_message')
|
|
||||||
mock_cast_p = mock.patch.object(self.notifier, '_cast_message')
|
|
||||||
self.mock_util = mock_util_p.start()
|
|
||||||
self.mock_log = mock_log_p.start()
|
|
||||||
self.mock_fanout = mock_fanout_p.start()
|
|
||||||
self.mock_cast = mock_cast_p.start()
|
|
||||||
|
|
||||||
def _test__schedule_network(self, network,
|
|
||||||
new_agents=None, existing_agents=None,
|
|
||||||
expected_casts=0, expected_warnings=0):
|
|
||||||
self.notifier.plugin.schedule_network.return_value = new_agents
|
|
||||||
agents = self.notifier._schedule_network(
|
|
||||||
mock.ANY, network, existing_agents)
|
|
||||||
if new_agents is None:
|
|
||||||
new_agents = []
|
|
||||||
self.assertEqual(new_agents + existing_agents, agents)
|
|
||||||
self.assertEqual(expected_casts, self.mock_cast.call_count)
|
|
||||||
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
|
|
||||||
|
|
||||||
def test__schedule_network(self):
|
|
||||||
agent = agents_db.Agent()
|
|
||||||
agent.admin_state_up = True
|
|
||||||
agent.heartbeat_timestamp = timeutils.utcnow()
|
|
||||||
network = {'id': 'foo_net_id'}
|
|
||||||
self._test__schedule_network(network,
|
|
||||||
new_agents=[agent], existing_agents=[],
|
|
||||||
expected_casts=1, expected_warnings=0)
|
|
||||||
|
|
||||||
def test__schedule_network_no_existing_agents(self):
|
|
||||||
agent = agents_db.Agent()
|
|
||||||
agent.admin_state_up = True
|
|
||||||
agent.heartbeat_timestamp = timeutils.utcnow()
|
|
||||||
network = {'id': 'foo_net_id'}
|
|
||||||
self._test__schedule_network(network,
|
|
||||||
new_agents=None, existing_agents=[agent],
|
|
||||||
expected_casts=0, expected_warnings=0)
|
|
||||||
|
|
||||||
def test__schedule_network_no_new_agents(self):
|
|
||||||
network = {'id': 'foo_net_id'}
|
|
||||||
self._test__schedule_network(network,
|
|
||||||
new_agents=None, existing_agents=[],
|
|
||||||
expected_casts=0, expected_warnings=1)
|
|
||||||
|
|
||||||
def _test__get_enabled_agents(self, network,
|
|
||||||
agents=None, port_count=0,
|
|
||||||
expected_warnings=0, expected_errors=0):
|
|
||||||
self.notifier.plugin.get_ports_count.return_value = port_count
|
|
||||||
enabled_agents = self.notifier._get_enabled_agents(
|
|
||||||
mock.ANY, network, agents, mock.ANY, mock.ANY)
|
|
||||||
self.assertEqual(agents, enabled_agents)
|
|
||||||
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
|
|
||||||
self.assertEqual(expected_errors, self.mock_log.error.call_count)
|
|
||||||
|
|
||||||
def test__get_enabled_agents(self):
|
|
||||||
agent = agents_db.Agent()
|
|
||||||
agent.admin_state_up = True
|
|
||||||
agent.heartbeat_timestamp = timeutils.utcnow()
|
|
||||||
network = {'id': 'foo_network_id'}
|
|
||||||
self._test__get_enabled_agents(network, agents=[agent])
|
|
||||||
|
|
||||||
def test__get_enabled_agents_with_inactive_ones(self):
|
|
||||||
agent1 = agents_db.Agent()
|
|
||||||
agent1.admin_state_up = True
|
|
||||||
agent1.heartbeat_timestamp = timeutils.utcnow()
|
|
||||||
agent2 = agents_db.Agent()
|
|
||||||
agent2.admin_state_up = True
|
|
||||||
# This is effectively an inactive agent
|
|
||||||
agent2.heartbeat_timestamp = datetime.datetime(2000, 1, 1, 0, 0)
|
|
||||||
network = {'id': 'foo_network_id'}
|
|
||||||
self._test__get_enabled_agents(network,
|
|
||||||
agents=[agent1, agent2],
|
|
||||||
expected_warnings=1, expected_errors=0)
|
|
||||||
|
|
||||||
def test__get_enabled_agents_with_notification_required(self):
|
|
||||||
network = {'id': 'foo_network_id', 'subnets': ['foo_subnet_id']}
|
|
||||||
self._test__get_enabled_agents(network, [], port_count=20,
|
|
||||||
expected_warnings=0, expected_errors=1)
|
|
||||||
|
|
||||||
def test__notify_agents_fanout_required(self):
|
|
||||||
self.notifier._notify_agents(mock.ANY,
|
|
||||||
'network_delete_end',
|
|
||||||
mock.ANY, 'foo_network_id')
|
|
||||||
self.assertEqual(1, self.mock_fanout.call_count)
|
|
||||||
|
|
||||||
def _test__notify_agents(self, method,
|
|
||||||
expected_scheduling=0, expected_casts=0):
|
|
||||||
with mock.patch.object(self.notifier, '_schedule_network') as f:
|
|
||||||
with mock.patch.object(self.notifier, '_get_enabled_agents') as g:
|
|
||||||
agent = agents_db.Agent()
|
|
||||||
agent.admin_state_up = True
|
|
||||||
agent.heartbeat_timestamp = timeutils.utcnow()
|
|
||||||
g.return_value = [agent]
|
|
||||||
self.notifier._notify_agents(mock.Mock(), method,
|
|
||||||
mock.ANY, 'foo_network_id')
|
|
||||||
self.assertEqual(expected_scheduling, f.call_count)
|
|
||||||
self.assertEqual(expected_casts, self.mock_cast.call_count)
|
|
||||||
|
|
||||||
def test__notify_agents_cast_required_with_scheduling(self):
|
|
||||||
self._test__notify_agents('port_create_end',
|
|
||||||
expected_scheduling=1, expected_casts=1)
|
|
||||||
|
|
||||||
def test__notify_agents_cast_required_wo_scheduling_on_port_update(self):
|
|
||||||
self._test__notify_agents('port_update_end',
|
|
||||||
expected_scheduling=0, expected_casts=1)
|
|
||||||
|
|
||||||
def test__notify_agents_cast_required_wo_scheduling_on_subnet_create(self):
|
|
||||||
self._test__notify_agents('subnet_create_end',
|
|
||||||
expected_scheduling=0, expected_casts=1)
|
|
||||||
|
|
||||||
def test__notify_agents_no_action(self):
|
|
||||||
self._test__notify_agents('network_create_end',
|
|
||||||
expected_scheduling=0, expected_casts=0)
|
|
||||||
|
|
||||||
def test__fanout_message(self):
|
|
||||||
self.notifier._fanout_message(mock.ANY, mock.ANY, mock.ANY)
|
|
||||||
self.assertEqual(1, self.mock_fanout.call_count)
|
|
||||||
|
|
||||||
def test__cast_message(self):
|
|
||||||
self.notifier._cast_message(mock.ANY, mock.ANY, mock.ANY)
|
|
||||||
self.assertEqual(1, self.mock_cast.call_count)
|
|
|
@ -1,16 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,44 +0,0 @@
|
||||||
# Test config file for quantum-proxy-plugin.
|
|
||||||
|
|
||||||
[database]
|
|
||||||
# This line MUST be changed to actually run the plugin.
|
|
||||||
# Example:
|
|
||||||
# connection = mysql://root:pass@127.0.0.1:3306/restproxy_quantum
|
|
||||||
# Replace 127.0.0.1 above with the IP address of the database used by the
|
|
||||||
# main quantum server. (Leave it as is if the database runs on this host.)
|
|
||||||
connection = sqlite://
|
|
||||||
# Database reconnection retry times - in event connectivity is lost
|
|
||||||
# set to -1 implies an infinite retry count
|
|
||||||
# max_retries = 10
|
|
||||||
# Database reconnection interval in seconds - in event connectivity is lost
|
|
||||||
retry_interval = 2
|
|
||||||
|
|
||||||
[restproxy]
|
|
||||||
# All configuration for this plugin is in section '[restproxy]'
|
|
||||||
#
|
|
||||||
# The following parameters are supported:
|
|
||||||
# servers : <host:port>[,<host:port>]* (Error if not set)
|
|
||||||
# serverauth : <username:password> (default: no auth)
|
|
||||||
# serverssl : True | False (default: False)
|
|
||||||
#
|
|
||||||
servers=localhost:9000,localhost:8899
|
|
||||||
serverssl=False
|
|
||||||
#serverauth=username:password
|
|
||||||
|
|
||||||
[nova]
|
|
||||||
# Specify the VIF_TYPE that will be controlled on the Nova compute instances
|
|
||||||
# options: ivs or ovs
|
|
||||||
# default: ovs
|
|
||||||
vif_type = ovs
|
|
||||||
# Overrides for vif types based on nova compute node host IDs
|
|
||||||
# Comma separated list of host IDs to fix to a specific VIF type
|
|
||||||
node_override_vif_ivs = ivshost
|
|
||||||
|
|
||||||
[router]
|
|
||||||
# Specify the default router rules installed in newly created tenant routers
|
|
||||||
# Specify multiple times for multiple rules
|
|
||||||
# Use an * to specify default for all tenants
|
|
||||||
# Default is any any allow for all tenants
|
|
||||||
#tenant_default_router_rule=*:any:any:permit
|
|
||||||
# Maximum number of rules that a single router may have
|
|
||||||
max_router_rules=200
|
|
|
@ -1,2 +0,0 @@
|
||||||
ca_certs directory for SSL unit tests
|
|
||||||
No files will be generated here, but it should exist for the tests
|
|
|
@ -1,2 +0,0 @@
|
||||||
combined certificates directory for SSL unit tests
|
|
||||||
No files will be created here, but it should exist for the tests
|
|
|
@ -1,2 +0,0 @@
|
||||||
host_certs directory for SSL unit tests
|
|
||||||
No files will be created here, but it should exist for the tests
|
|
|
@ -1,185 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Big Switch Networks, Inc. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Kevin Benton, <kevin.benton@bigswitch.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
from neutron.openstack.common import jsonutils as json
|
|
||||||
from neutron.openstack.common import log as logging
|
|
||||||
from neutron.plugins.bigswitch import servermanager
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPResponseMock():
|
|
||||||
status = 200
|
|
||||||
reason = 'OK'
|
|
||||||
|
|
||||||
def __init__(self, sock, debuglevel=0, strict=0, method=None,
|
|
||||||
buffering=False):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
return "{'status': '200 OK'}"
|
|
||||||
|
|
||||||
def getheader(self, header):
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPResponseMock404(HTTPResponseMock):
|
|
||||||
status = 404
|
|
||||||
reason = 'Not Found'
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
return "{'status': '%s 404 Not Found'}" % servermanager.NXNETWORK
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPResponseMock500(HTTPResponseMock):
|
|
||||||
status = 500
|
|
||||||
reason = 'Internal Server Error'
|
|
||||||
|
|
||||||
def __init__(self, sock, debuglevel=0, strict=0, method=None,
|
|
||||||
buffering=False, errmsg='500 Internal Server Error'):
|
|
||||||
self.errmsg = errmsg
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
return "{'status': '%s'}" % self.errmsg
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPConnectionMock(object):
|
|
||||||
|
|
||||||
def __init__(self, server, port, timeout):
|
|
||||||
self.response = None
|
|
||||||
self.broken = False
|
|
||||||
# Port 9000 is the broken server
|
|
||||||
if port == 9000:
|
|
||||||
self.broken = True
|
|
||||||
errmsg = "This server is broken, please try another"
|
|
||||||
self.response = HTTPResponseMock500(None, errmsg=errmsg)
|
|
||||||
|
|
||||||
def request(self, action, uri, body, headers):
|
|
||||||
LOG.debug(_("Request: action=%(action)s, uri=%(uri)r, "
|
|
||||||
"body=%(body)s, headers=%(headers)s"),
|
|
||||||
{'action': action, 'uri': uri,
|
|
||||||
'body': body, 'headers': headers})
|
|
||||||
if self.broken and "ExceptOnBadServer" in uri:
|
|
||||||
raise Exception("Broken server got an unexpected request")
|
|
||||||
if self.response:
|
|
||||||
return
|
|
||||||
|
|
||||||
# detachment may return 404 and plugin shouldn't die
|
|
||||||
if uri.endswith('attachment') and action == 'DELETE':
|
|
||||||
self.response = HTTPResponseMock404(None)
|
|
||||||
else:
|
|
||||||
self.response = HTTPResponseMock(None)
|
|
||||||
|
|
||||||
# Port creations/updates must contain binding information
|
|
||||||
if ('port' in uri and 'attachment' not in uri
|
|
||||||
and 'binding' not in body and action in ('POST', 'PUT')):
|
|
||||||
errmsg = "Port binding info missing in port request '%s'" % body
|
|
||||||
self.response = HTTPResponseMock500(None, errmsg=errmsg)
|
|
||||||
return
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
def getresponse(self):
|
|
||||||
return self.response
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPConnectionMock404(HTTPConnectionMock):
|
|
||||||
|
|
||||||
def __init__(self, server, port, timeout):
|
|
||||||
self.response = HTTPResponseMock404(None)
|
|
||||||
self.broken = True
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPConnectionMock500(HTTPConnectionMock):
|
|
||||||
|
|
||||||
def __init__(self, server, port, timeout):
|
|
||||||
self.response = HTTPResponseMock500(None)
|
|
||||||
self.broken = True
|
|
||||||
|
|
||||||
|
|
||||||
class VerifyMultiTenantFloatingIP(HTTPConnectionMock):
|
|
||||||
|
|
||||||
def request(self, action, uri, body, headers):
|
|
||||||
# Only handle network update requests
|
|
||||||
if 'network' in uri and 'tenant' in uri and 'ports' not in uri:
|
|
||||||
req = json.loads(body)
|
|
||||||
if 'network' not in req or 'floatingips' not in req['network']:
|
|
||||||
msg = _("No floating IPs in request"
|
|
||||||
"uri=%(uri)s, body=%(body)s") % {'uri': uri,
|
|
||||||
'body': body}
|
|
||||||
raise Exception(msg)
|
|
||||||
distinct_tenants = []
|
|
||||||
for flip in req['network']['floatingips']:
|
|
||||||
if flip['tenant_id'] not in distinct_tenants:
|
|
||||||
distinct_tenants.append(flip['tenant_id'])
|
|
||||||
if len(distinct_tenants) < 2:
|
|
||||||
msg = _("Expected floating IPs from multiple tenants."
|
|
||||||
"uri=%(uri)s, body=%(body)s") % {'uri': uri,
|
|
||||||
'body': body}
|
|
||||||
raise Exception(msg)
|
|
||||||
super(VerifyMultiTenantFloatingIP,
|
|
||||||
self).request(action, uri, body, headers)
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPSMockBase(HTTPConnectionMock):
|
|
||||||
expected_cert = ''
|
|
||||||
combined_cert = None
|
|
||||||
|
|
||||||
def __init__(self, host, port=None, key_file=None, cert_file=None,
|
|
||||||
strict=None, timeout=None, source_address=None):
|
|
||||||
self.host = host
|
|
||||||
super(HTTPSMockBase, self).__init__(host, port, timeout)
|
|
||||||
|
|
||||||
def request(self, method, url, body=None, headers={}):
|
|
||||||
self.connect()
|
|
||||||
super(HTTPSMockBase, self).request(method, url, body, headers)
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPSNoValidation(HTTPSMockBase):
|
|
||||||
|
|
||||||
def connect(self):
|
|
||||||
if self.combined_cert:
|
|
||||||
raise Exception('combined_cert set on NoValidation')
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPSCAValidation(HTTPSMockBase):
|
|
||||||
expected_cert = 'DUMMYCERTIFICATEAUTHORITY'
|
|
||||||
|
|
||||||
def connect(self):
|
|
||||||
contents = get_cert_contents(self.combined_cert)
|
|
||||||
if self.expected_cert not in contents:
|
|
||||||
raise Exception('No dummy CA cert in cert_file')
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPSHostValidation(HTTPSMockBase):
|
|
||||||
expected_cert = 'DUMMYCERTFORHOST%s'
|
|
||||||
|
|
||||||
def connect(self):
|
|
||||||
contents = get_cert_contents(self.combined_cert)
|
|
||||||
expected = self.expected_cert % self.host
|
|
||||||
if expected not in contents:
|
|
||||||
raise Exception(_('No host cert for %(server)s in cert %(cert)s'),
|
|
||||||
{'server': self.host, 'cert': contents})
|
|
||||||
|
|
||||||
|
|
||||||
def get_cert_contents(path):
|
|
||||||
raise Exception('METHOD MUST BE MOCKED FOR TEST')
|
|
|
@ -1,33 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
# Copyright 2013 Big Switch Networks, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
from neutron.tests.unit.bigswitch import test_base
|
|
||||||
from neutron.tests.unit.openvswitch import test_agent_scheduler
|
|
||||||
|
|
||||||
|
|
||||||
class BigSwitchDhcpAgentNotifierTestCase(
|
|
||||||
test_agent_scheduler.OvsDhcpAgentNotifierTestCase,
|
|
||||||
test_base.BigSwitchTestBase):
|
|
||||||
|
|
||||||
plugin_str = ('%s.NeutronRestProxyV2' %
|
|
||||||
test_base.RESTPROXY_PKG_PATH)
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.setup_config_files()
|
|
||||||
self.setup_patches()
|
|
||||||
super(BigSwitchDhcpAgentNotifierTestCase, self).setUp()
|
|
||||||
self.startHttpPatch()
|
|
|
@ -1,74 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
# Copyright 2013 Big Switch Networks, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
import neutron.common.test_lib as test_lib
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.plugins.bigswitch import config
|
|
||||||
from neutron.tests.unit.bigswitch import fake_server
|
|
||||||
|
|
||||||
# REVISIT(kevinbenton): This needs to be imported here to create the
|
|
||||||
# portbindings table since it's not imported until function call time
|
|
||||||
# in the porttracker_db module, which will cause unit test failures when
|
|
||||||
# the unit tests are being run by testtools
|
|
||||||
from neutron.db import portbindings_db # noqa
|
|
||||||
|
|
||||||
RESTPROXY_PKG_PATH = 'neutron.plugins.bigswitch.plugin'
|
|
||||||
NOTIFIER = 'neutron.plugins.bigswitch.plugin.AgentNotifierApi'
|
|
||||||
CERTFETCH = 'neutron.plugins.bigswitch.servermanager.ServerPool._fetch_cert'
|
|
||||||
SERVER_MANAGER = 'neutron.plugins.bigswitch.servermanager'
|
|
||||||
HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
|
|
||||||
SPAWN = 'neutron.plugins.bigswitch.plugin.eventlet.GreenPool.spawn_n'
|
|
||||||
CWATCH = SERVER_MANAGER + '.ServerPool._consistency_watchdog'
|
|
||||||
|
|
||||||
|
|
||||||
class BigSwitchTestBase(object):
|
|
||||||
|
|
||||||
_plugin_name = ('%s.NeutronRestProxyV2' % RESTPROXY_PKG_PATH)
|
|
||||||
|
|
||||||
def setup_config_files(self):
|
|
||||||
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
|
|
||||||
test_lib.test_config['config_files'] = [os.path.join(etc_path,
|
|
||||||
'restproxy.ini.test')]
|
|
||||||
self.addCleanup(cfg.CONF.reset)
|
|
||||||
config.register_config()
|
|
||||||
# Only try SSL on SSL tests
|
|
||||||
cfg.CONF.set_override('server_ssl', False, 'RESTPROXY')
|
|
||||||
cfg.CONF.set_override('ssl_cert_directory',
|
|
||||||
os.path.join(etc_path, 'ssl'), 'RESTPROXY')
|
|
||||||
# The mock interferes with HTTP(S) connection caching
|
|
||||||
cfg.CONF.set_override('cache_connections', False, 'RESTPROXY')
|
|
||||||
|
|
||||||
def setup_patches(self):
|
|
||||||
self.plugin_notifier_p = mock.patch(NOTIFIER)
|
|
||||||
# prevent any greenthreads from spawning
|
|
||||||
self.spawn_p = mock.patch(SPAWN, new=lambda *args, **kwargs: None)
|
|
||||||
# prevent the consistency watchdog from starting
|
|
||||||
self.watch_p = mock.patch(CWATCH, new=lambda *args, **kwargs: None)
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
self.plugin_notifier_p.start()
|
|
||||||
self.spawn_p.start()
|
|
||||||
self.watch_p.start()
|
|
||||||
|
|
||||||
def startHttpPatch(self):
|
|
||||||
self.httpPatch = mock.patch(HTTPCON,
|
|
||||||
new=fake_server.HTTPConnectionMock)
|
|
||||||
self.httpPatch.start()
|
|
|
@ -1,84 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
# Copyright 2014 Big Switch Networks, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
# @author Kevin Benton
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.tests.unit.bigswitch import test_router_db
|
|
||||||
|
|
||||||
PLUGIN = 'neutron.plugins.bigswitch.plugin'
|
|
||||||
SERVERMANAGER = PLUGIN + '.servermanager'
|
|
||||||
SERVERPOOL = SERVERMANAGER + '.ServerPool'
|
|
||||||
SERVERRESTCALL = SERVERMANAGER + '.ServerProxy.rest_call'
|
|
||||||
HTTPCON = SERVERMANAGER + '.httplib.HTTPConnection'
|
|
||||||
|
|
||||||
|
|
||||||
class CapabilitiesTests(test_router_db.RouterDBTestBase):
|
|
||||||
|
|
||||||
def test_floating_ip_capability(self):
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch(SERVERRESTCALL,
|
|
||||||
return_value=(200, None, '["floatingip"]', None)),
|
|
||||||
mock.patch(SERVERPOOL + '.rest_create_floatingip',
|
|
||||||
return_value=(200, None, None, None)),
|
|
||||||
mock.patch(SERVERPOOL + '.rest_delete_floatingip',
|
|
||||||
return_value=(200, None, None, None))
|
|
||||||
) as (mock_rest, mock_create, mock_delete):
|
|
||||||
with self.floatingip_with_assoc() as fip:
|
|
||||||
pass
|
|
||||||
mock_create.assert_has_calls(
|
|
||||||
[mock.call(fip['floatingip']['tenant_id'], fip['floatingip'])]
|
|
||||||
)
|
|
||||||
mock_delete.assert_has_calls(
|
|
||||||
[mock.call(fip['floatingip']['tenant_id'],
|
|
||||||
fip['floatingip']['id'])]
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_floating_ip_capability_neg(self):
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch(SERVERRESTCALL,
|
|
||||||
return_value=(200, None, '[""]', None)),
|
|
||||||
mock.patch(SERVERPOOL + '.rest_update_network',
|
|
||||||
return_value=(200, None, None, None))
|
|
||||||
) as (mock_rest, mock_netupdate):
|
|
||||||
with self.floatingip_with_assoc() as fip:
|
|
||||||
pass
|
|
||||||
updates = [call[0][2]['floatingips']
|
|
||||||
for call in mock_netupdate.call_args_list]
|
|
||||||
all_floats = [f['floating_ip_address']
|
|
||||||
for floats in updates for f in floats]
|
|
||||||
self.assertIn(fip['floatingip']['floating_ip_address'], all_floats)
|
|
||||||
|
|
||||||
def test_keep_alive_capability(self):
|
|
||||||
with mock.patch(
|
|
||||||
SERVERRESTCALL, return_value=(200, None, '["keep-alive"]', None)
|
|
||||||
):
|
|
||||||
# perform a task to cause capabilities to be retrieved
|
|
||||||
with self.floatingip_with_assoc():
|
|
||||||
pass
|
|
||||||
# stop default HTTP patch since we need a magicmock
|
|
||||||
self.httpPatch.stop()
|
|
||||||
# now mock HTTP class instead of REST so we can see headers
|
|
||||||
conmock = mock.patch(HTTPCON).start()
|
|
||||||
instance = conmock.return_value
|
|
||||||
instance.getresponse.return_value.getheader.return_value = 'HASHHEADER'
|
|
||||||
with self.network():
|
|
||||||
callheaders = instance.request.mock_calls[0][1][3]
|
|
||||||
self.assertIn('Connection', callheaders)
|
|
||||||
self.assertEqual(callheaders['Connection'], 'keep-alive')
|
|
|
@ -1,188 +0,0 @@
|
||||||
# Copyright 2014 Big Switch Networks, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Kevin Benton, Big Switch Networks
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.openstack.common import importutils
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
OVSBRIDGE = 'neutron.agent.linux.ovs_lib.OVSBridge'
|
|
||||||
PLUGINAPI = 'neutron.plugins.bigswitch.agent.restproxy_agent.PluginApi'
|
|
||||||
CONTEXT = 'neutron.context'
|
|
||||||
CONSUMERCREATE = 'neutron.agent.rpc.create_consumers'
|
|
||||||
SGRPC = 'neutron.agent.securitygroups_rpc'
|
|
||||||
SGAGENT = 'neutron.plugins.bigswitch.agent.restproxy_agent.SecurityGroupAgent'
|
|
||||||
AGENTMOD = 'neutron.plugins.bigswitch.agent.restproxy_agent'
|
|
||||||
NEUTRONCFG = 'neutron.common.config'
|
|
||||||
PLCONFIG = 'neutron.plugins.bigswitch.config'
|
|
||||||
|
|
||||||
|
|
||||||
class BaseAgentTestCase(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(BaseAgentTestCase, self).setUp()
|
|
||||||
self.mod_agent = importutils.import_module(AGENTMOD)
|
|
||||||
|
|
||||||
|
|
||||||
class TestRestProxyAgentOVS(BaseAgentTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestRestProxyAgentOVS, self).setUp()
|
|
||||||
self.plapi = mock.patch(PLUGINAPI).start()
|
|
||||||
self.ovsbridge = mock.patch(OVSBRIDGE).start()
|
|
||||||
self.context = mock.patch(CONTEXT).start()
|
|
||||||
self.rpc = mock.patch(CONSUMERCREATE).start()
|
|
||||||
self.sg_rpc = mock.patch(SGRPC).start()
|
|
||||||
self.sg_agent = mock.patch(SGAGENT).start()
|
|
||||||
|
|
||||||
def mock_agent(self):
|
|
||||||
mock_context = mock.Mock(return_value='abc')
|
|
||||||
self.context.get_admin_context_without_session = mock_context
|
|
||||||
return self.mod_agent.RestProxyAgent('int-br', 2, 'helper')
|
|
||||||
|
|
||||||
def mock_port_update(self, **kwargs):
|
|
||||||
agent = self.mock_agent()
|
|
||||||
agent.port_update(mock.Mock(), **kwargs)
|
|
||||||
|
|
||||||
def test_port_update(self):
|
|
||||||
port = {'id': 1, 'security_groups': 'default'}
|
|
||||||
|
|
||||||
with mock.patch.object(self.ovsbridge.return_value,
|
|
||||||
'get_vif_port_by_id',
|
|
||||||
return_value=1) as get_vif:
|
|
||||||
self.mock_port_update(port=port)
|
|
||||||
|
|
||||||
get_vif.assert_called_once_with(1)
|
|
||||||
self.sg_agent.assert_has_calls([
|
|
||||||
mock.call().refresh_firewall()
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_port_update_not_vifport(self):
|
|
||||||
port = {'id': 1, 'security_groups': 'default'}
|
|
||||||
|
|
||||||
with mock.patch.object(self.ovsbridge.return_value,
|
|
||||||
'get_vif_port_by_id',
|
|
||||||
return_value=0) as get_vif:
|
|
||||||
self.mock_port_update(port=port)
|
|
||||||
|
|
||||||
get_vif.assert_called_once_with(1)
|
|
||||||
self.assertFalse(self.sg_agent.return_value.refresh_firewall.called)
|
|
||||||
|
|
||||||
def test_port_update_without_secgroup(self):
|
|
||||||
port = {'id': 1}
|
|
||||||
|
|
||||||
with mock.patch.object(self.ovsbridge.return_value,
|
|
||||||
'get_vif_port_by_id',
|
|
||||||
return_value=1) as get_vif:
|
|
||||||
self.mock_port_update(port=port)
|
|
||||||
|
|
||||||
get_vif.assert_called_once_with(1)
|
|
||||||
self.assertFalse(self.sg_agent.return_value.refresh_firewall.called)
|
|
||||||
|
|
||||||
def mock_update_ports(self, vif_port_set=None, registered_ports=None):
|
|
||||||
with mock.patch.object(self.ovsbridge.return_value,
|
|
||||||
'get_vif_port_set',
|
|
||||||
return_value=vif_port_set):
|
|
||||||
agent = self.mock_agent()
|
|
||||||
return agent._update_ports(registered_ports)
|
|
||||||
|
|
||||||
def test_update_ports_unchanged(self):
|
|
||||||
self.assertIsNone(self.mock_update_ports())
|
|
||||||
|
|
||||||
def test_update_ports_changed(self):
|
|
||||||
vif_port_set = set([1, 3])
|
|
||||||
registered_ports = set([1, 2])
|
|
||||||
expected = dict(current=vif_port_set,
|
|
||||||
added=set([3]),
|
|
||||||
removed=set([2]))
|
|
||||||
|
|
||||||
actual = self.mock_update_ports(vif_port_set, registered_ports)
|
|
||||||
|
|
||||||
self.assertEqual(expected, actual)
|
|
||||||
|
|
||||||
def mock_process_devices_filter(self, port_info):
|
|
||||||
agent = self.mock_agent()
|
|
||||||
agent._process_devices_filter(port_info)
|
|
||||||
|
|
||||||
def test_process_devices_filter_add(self):
|
|
||||||
port_info = {'added': 1}
|
|
||||||
|
|
||||||
self.mock_process_devices_filter(port_info)
|
|
||||||
|
|
||||||
self.sg_agent.assert_has_calls([
|
|
||||||
mock.call().prepare_devices_filter(1)
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_process_devices_filter_remove(self):
|
|
||||||
port_info = {'removed': 2}
|
|
||||||
|
|
||||||
self.mock_process_devices_filter(port_info)
|
|
||||||
|
|
||||||
self.sg_agent.assert_has_calls([
|
|
||||||
mock.call().remove_devices_filter(2)
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_process_devices_filter_both(self):
|
|
||||||
port_info = {'added': 1, 'removed': 2}
|
|
||||||
|
|
||||||
self.mock_process_devices_filter(port_info)
|
|
||||||
|
|
||||||
self.sg_agent.assert_has_calls([
|
|
||||||
mock.call().prepare_devices_filter(1),
|
|
||||||
mock.call().remove_devices_filter(2)
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_process_devices_filter_none(self):
|
|
||||||
port_info = {}
|
|
||||||
|
|
||||||
self.mock_process_devices_filter(port_info)
|
|
||||||
|
|
||||||
self.assertFalse(
|
|
||||||
self.sg_agent.return_value.prepare_devices_filter.called)
|
|
||||||
self.assertFalse(
|
|
||||||
self.sg_agent.return_value.remove_devices_filter.called)
|
|
||||||
|
|
||||||
|
|
||||||
class TestRestProxyAgent(BaseAgentTestCase):
|
|
||||||
def mock_main(self):
|
|
||||||
cfg_attrs = {'CONF.RESTPROXYAGENT.integration_bridge': 'integ_br',
|
|
||||||
'CONF.RESTPROXYAGENT.polling_interval': 5,
|
|
||||||
'CONF.RESTPROXYAGENT.virtual_switch_type': 'ovs',
|
|
||||||
'CONF.AGENT.root_helper': 'helper'}
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch(AGENTMOD + '.cfg', **cfg_attrs),
|
|
||||||
mock.patch(AGENTMOD + '.config.init'),
|
|
||||||
mock.patch(NEUTRONCFG),
|
|
||||||
mock.patch(PLCONFIG),
|
|
||||||
) as (mock_conf, mock_init, mock_log_conf, mock_pluginconf):
|
|
||||||
self.mod_agent.main()
|
|
||||||
|
|
||||||
mock_log_conf.assert_has_calls([
|
|
||||||
mock.call(mock_conf),
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_main(self):
|
|
||||||
agent_attrs = {'daemon_loop.side_effect': SystemExit(0)}
|
|
||||||
with mock.patch(AGENTMOD + '.RestProxyAgent',
|
|
||||||
**agent_attrs) as mock_agent:
|
|
||||||
self.assertRaises(SystemExit, self.mock_main)
|
|
||||||
|
|
||||||
mock_agent.assert_has_calls([
|
|
||||||
mock.call('integ_br', 5, 'helper', 'ovs'),
|
|
||||||
mock.call().daemon_loop()
|
|
||||||
])
|
|
|
@ -1,316 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
# Copyright 2012 Big Switch Networks, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
import webob.exc
|
|
||||||
|
|
||||||
from neutron.common import constants
|
|
||||||
from neutron import context
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.tests.unit import _test_extension_portbindings as test_bindings
|
|
||||||
from neutron.tests.unit.bigswitch import fake_server
|
|
||||||
from neutron.tests.unit.bigswitch import test_base
|
|
||||||
from neutron.tests.unit import test_api_v2
|
|
||||||
import neutron.tests.unit.test_db_plugin as test_plugin
|
|
||||||
import neutron.tests.unit.test_extension_allowedaddresspairs as test_addr_pair
|
|
||||||
|
|
||||||
patch = mock.patch
|
|
||||||
HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
|
|
||||||
|
|
||||||
|
|
||||||
class BigSwitchProxyPluginV2TestCase(test_base.BigSwitchTestBase,
|
|
||||||
test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
|
|
||||||
def setUp(self, plugin_name=None):
|
|
||||||
if hasattr(self, 'HAS_PORT_FILTER'):
|
|
||||||
cfg.CONF.set_override(
|
|
||||||
'enable_security_group', self.HAS_PORT_FILTER, 'SECURITYGROUP')
|
|
||||||
self.setup_config_files()
|
|
||||||
self.setup_patches()
|
|
||||||
if plugin_name:
|
|
||||||
self._plugin_name = plugin_name
|
|
||||||
super(BigSwitchProxyPluginV2TestCase,
|
|
||||||
self).setUp(self._plugin_name)
|
|
||||||
self.port_create_status = 'BUILD'
|
|
||||||
self.startHttpPatch()
|
|
||||||
|
|
||||||
|
|
||||||
class TestBigSwitchProxyBasicGet(test_plugin.TestBasicGet,
|
|
||||||
BigSwitchProxyPluginV2TestCase):
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestBigSwitchProxyV2HTTPResponse(test_plugin.TestV2HTTPResponse,
|
|
||||||
BigSwitchProxyPluginV2TestCase):
|
|
||||||
|
|
||||||
def test_failover_memory(self):
|
|
||||||
# first request causes failover so next shouldn't hit bad server
|
|
||||||
with self.network() as net:
|
|
||||||
kwargs = {'tenant_id': 'ExceptOnBadServer'}
|
|
||||||
with self.network(**kwargs) as net:
|
|
||||||
req = self.new_show_request('networks', net['network']['id'])
|
|
||||||
res = req.get_response(self.api)
|
|
||||||
self.assertEqual(res.status_int, 200)
|
|
||||||
|
|
||||||
|
|
||||||
class TestBigSwitchProxyPortsV2(test_plugin.TestPortsV2,
|
|
||||||
BigSwitchProxyPluginV2TestCase,
|
|
||||||
test_bindings.PortBindingsTestCase):
|
|
||||||
|
|
||||||
VIF_TYPE = portbindings.VIF_TYPE_OVS
|
|
||||||
HAS_PORT_FILTER = False
|
|
||||||
|
|
||||||
def setUp(self, plugin_name=None):
|
|
||||||
super(TestBigSwitchProxyPortsV2,
|
|
||||||
self).setUp(self._plugin_name)
|
|
||||||
|
|
||||||
def test_router_port_status_active(self):
|
|
||||||
# router ports screw up port auto-deletion so it has to be
|
|
||||||
# disabled for this test
|
|
||||||
with self.network(do_delete=False) as net:
|
|
||||||
with self.subnet(network=net, do_delete=False) as sub:
|
|
||||||
with self.port(
|
|
||||||
subnet=sub,
|
|
||||||
no_delete=True,
|
|
||||||
device_owner=constants.DEVICE_OWNER_ROUTER_INTF
|
|
||||||
) as port:
|
|
||||||
# router ports should be immediately active
|
|
||||||
self.assertEqual(port['port']['status'], 'ACTIVE')
|
|
||||||
|
|
||||||
def test_update_port_status_build(self):
|
|
||||||
# normal ports go into the pending build state for async creation
|
|
||||||
with self.port() as port:
|
|
||||||
self.assertEqual(port['port']['status'], 'BUILD')
|
|
||||||
self.assertEqual(self.port_create_status, 'BUILD')
|
|
||||||
|
|
||||||
def _get_ports(self, netid):
|
|
||||||
return self.deserialize('json',
|
|
||||||
self._list_ports('json', netid=netid))['ports']
|
|
||||||
|
|
||||||
def test_rollback_for_port_create(self):
|
|
||||||
plugin = manager.NeutronManager.get_plugin()
|
|
||||||
with self.subnet() as s:
|
|
||||||
# stop normal patch
|
|
||||||
self.httpPatch.stop()
|
|
||||||
# allow thread spawns for this test
|
|
||||||
self.spawn_p.stop()
|
|
||||||
kwargs = {'device_id': 'somedevid'}
|
|
||||||
# put in a broken 'server'
|
|
||||||
httpPatch = patch(HTTPCON, new=fake_server.HTTPConnectionMock500)
|
|
||||||
httpPatch.start()
|
|
||||||
with self.port(subnet=s, **kwargs):
|
|
||||||
# wait for async port create request to finish
|
|
||||||
plugin.evpool.waitall()
|
|
||||||
# put good 'server' back in
|
|
||||||
httpPatch.stop()
|
|
||||||
self.httpPatch.start()
|
|
||||||
ports = self._get_ports(s['subnet']['network_id'])
|
|
||||||
#failure to create should result in port in error state
|
|
||||||
self.assertEqual(ports[0]['status'], 'ERROR')
|
|
||||||
|
|
||||||
def test_rollback_for_port_update(self):
|
|
||||||
with self.network() as n:
|
|
||||||
with self.port(network_id=n['network']['id'],
|
|
||||||
device_id='66') as port:
|
|
||||||
port = self._get_ports(n['network']['id'])[0]
|
|
||||||
data = {'port': {'name': 'aNewName', 'device_id': '99'}}
|
|
||||||
# stop normal patch
|
|
||||||
self.httpPatch.stop()
|
|
||||||
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
|
|
||||||
self.new_update_request(
|
|
||||||
'ports', data, port['id']).get_response(self.api)
|
|
||||||
self.httpPatch.start()
|
|
||||||
uport = self._get_ports(n['network']['id'])[0]
|
|
||||||
# name should have stayed the same
|
|
||||||
self.assertEqual(port['name'], uport['name'])
|
|
||||||
|
|
||||||
def test_rollback_for_port_delete(self):
|
|
||||||
with self.network() as n:
|
|
||||||
with self.port(network_id=n['network']['id'],
|
|
||||||
device_id='somedevid') as port:
|
|
||||||
# stop normal patch
|
|
||||||
self.httpPatch.stop()
|
|
||||||
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
|
|
||||||
self._delete('ports', port['port']['id'],
|
|
||||||
expected_code=
|
|
||||||
webob.exc.HTTPInternalServerError.code)
|
|
||||||
self.httpPatch.start()
|
|
||||||
port = self._get_ports(n['network']['id'])[0]
|
|
||||||
self.assertEqual('BUILD', port['status'])
|
|
||||||
|
|
||||||
def test_correct_shared_net_tenant_id(self):
|
|
||||||
# tenant_id in port requests should match network tenant_id instead
|
|
||||||
# of port tenant_id
|
|
||||||
def rest_port_op(self, ten_id, netid, port):
|
|
||||||
if ten_id != 'SHARED':
|
|
||||||
raise Exception('expecting tenant_id SHARED. got %s' % ten_id)
|
|
||||||
with self.network(tenant_id='SHARED', shared=True) as net:
|
|
||||||
with self.subnet(network=net) as sub:
|
|
||||||
pref = 'neutron.plugins.bigswitch.servermanager.ServerPool.%s'
|
|
||||||
tomock = [pref % 'rest_create_port',
|
|
||||||
pref % 'rest_update_port',
|
|
||||||
pref % 'rest_delete_port']
|
|
||||||
patches = [patch(f, create=True, new=rest_port_op)
|
|
||||||
for f in tomock]
|
|
||||||
for restp in patches:
|
|
||||||
restp.start()
|
|
||||||
with self.port(subnet=sub, tenant_id='port-owner') as port:
|
|
||||||
data = {'port': {'binding:host_id': 'someotherhost',
|
|
||||||
'device_id': 'override_dev'}}
|
|
||||||
req = self.new_update_request('ports', data,
|
|
||||||
port['port']['id'])
|
|
||||||
res = req.get_response(self.api)
|
|
||||||
self.assertEqual(res.status_int, 200)
|
|
||||||
|
|
||||||
def test_create404_triggers_sync(self):
|
|
||||||
# allow async port thread for this patch
|
|
||||||
self.spawn_p.stop()
|
|
||||||
with contextlib.nested(
|
|
||||||
self.subnet(),
|
|
||||||
patch(HTTPCON, create=True,
|
|
||||||
new=fake_server.HTTPConnectionMock404),
|
|
||||||
patch(test_base.RESTPROXY_PKG_PATH
|
|
||||||
+ '.NeutronRestProxyV2._send_all_data')
|
|
||||||
) as (s, mock_http, mock_send_all):
|
|
||||||
with self.port(subnet=s, device_id='somedevid') as p:
|
|
||||||
# wait for the async port thread to finish
|
|
||||||
plugin = manager.NeutronManager.get_plugin()
|
|
||||||
plugin.evpool.waitall()
|
|
||||||
call = mock.call(
|
|
||||||
send_routers=True, send_ports=True, send_floating_ips=True,
|
|
||||||
triggered_by_tenant=p['port']['tenant_id']
|
|
||||||
)
|
|
||||||
mock_send_all.assert_has_calls([call])
|
|
||||||
self.spawn_p.start()
|
|
||||||
|
|
||||||
def test_port_vif_details_default(self):
|
|
||||||
kwargs = {'name': 'name', 'device_id': 'override_dev'}
|
|
||||||
with self.port(**kwargs) as port:
|
|
||||||
self.assertEqual(port['port']['binding:vif_type'],
|
|
||||||
portbindings.VIF_TYPE_OVS)
|
|
||||||
|
|
||||||
def test_port_vif_details_override(self):
|
|
||||||
# ivshost is in the test config to override to IVS
|
|
||||||
kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
|
|
||||||
'device_id': 'override_dev'}
|
|
||||||
with self.port(**kwargs) as port:
|
|
||||||
self.assertEqual(port['port']['binding:vif_type'],
|
|
||||||
portbindings.VIF_TYPE_IVS)
|
|
||||||
kwargs = {'name': 'name2', 'binding:host_id': 'someotherhost',
|
|
||||||
'device_id': 'other_dev'}
|
|
||||||
with self.port(**kwargs) as port:
|
|
||||||
self.assertEqual(port['port']['binding:vif_type'], self.VIF_TYPE)
|
|
||||||
|
|
||||||
def test_port_move(self):
|
|
||||||
# ivshost is in the test config to override to IVS
|
|
||||||
kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
|
|
||||||
'device_id': 'override_dev'}
|
|
||||||
with self.port(**kwargs) as port:
|
|
||||||
data = {'port': {'binding:host_id': 'someotherhost',
|
|
||||||
'device_id': 'override_dev'}}
|
|
||||||
req = self.new_update_request('ports', data, port['port']['id'])
|
|
||||||
res = self.deserialize(self.fmt, req.get_response(self.api))
|
|
||||||
self.assertEqual(res['port']['binding:vif_type'], self.VIF_TYPE)
|
|
||||||
|
|
||||||
def _make_port(self, fmt, net_id, expected_res_status=None, arg_list=None,
|
|
||||||
**kwargs):
|
|
||||||
arg_list = arg_list or ()
|
|
||||||
arg_list += ('binding:host_id', )
|
|
||||||
res = self._create_port(fmt, net_id, expected_res_status,
|
|
||||||
arg_list, **kwargs)
|
|
||||||
# Things can go wrong - raise HTTP exc with res code only
|
|
||||||
# so it can be caught by unit tests
|
|
||||||
if res.status_int >= 400:
|
|
||||||
raise webob.exc.HTTPClientError(code=res.status_int)
|
|
||||||
return self.deserialize(fmt, res)
|
|
||||||
|
|
||||||
|
|
||||||
class TestVifDifferentDefault(BigSwitchProxyPluginV2TestCase):
|
|
||||||
|
|
||||||
def setup_config_files(self):
|
|
||||||
super(TestVifDifferentDefault, self).setup_config_files()
|
|
||||||
cfg.CONF.set_override('vif_type', 'ivs', 'NOVA')
|
|
||||||
|
|
||||||
def test_default_viftype(self):
|
|
||||||
with self.port() as port:
|
|
||||||
self.assertEqual(port['port']['binding:vif_type'], 'ivs')
|
|
||||||
|
|
||||||
|
|
||||||
class TestBigSwitchProxyNetworksV2(test_plugin.TestNetworksV2,
|
|
||||||
BigSwitchProxyPluginV2TestCase):
|
|
||||||
|
|
||||||
def _get_networks(self, tenant_id):
|
|
||||||
ctx = context.Context('', tenant_id)
|
|
||||||
return manager.NeutronManager.get_plugin().get_networks(ctx)
|
|
||||||
|
|
||||||
def test_rollback_on_network_create(self):
|
|
||||||
tid = test_api_v2._uuid()
|
|
||||||
kwargs = {'tenant_id': tid}
|
|
||||||
self.httpPatch.stop()
|
|
||||||
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
|
|
||||||
self._create_network('json', 'netname', True, **kwargs)
|
|
||||||
self.httpPatch.start()
|
|
||||||
self.assertFalse(self._get_networks(tid))
|
|
||||||
|
|
||||||
def test_rollback_on_network_update(self):
|
|
||||||
with self.network() as n:
|
|
||||||
data = {'network': {'name': 'aNewName'}}
|
|
||||||
self.httpPatch.stop()
|
|
||||||
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
|
|
||||||
self.new_update_request(
|
|
||||||
'networks', data, n['network']['id']
|
|
||||||
).get_response(self.api)
|
|
||||||
self.httpPatch.start()
|
|
||||||
updatedn = self._get_networks(n['network']['tenant_id'])[0]
|
|
||||||
# name should have stayed the same due to failure
|
|
||||||
self.assertEqual(n['network']['name'], updatedn['name'])
|
|
||||||
|
|
||||||
def test_rollback_on_network_delete(self):
|
|
||||||
with self.network() as n:
|
|
||||||
self.httpPatch.stop()
|
|
||||||
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
|
|
||||||
self._delete(
|
|
||||||
'networks', n['network']['id'],
|
|
||||||
expected_code=webob.exc.HTTPInternalServerError.code)
|
|
||||||
self.httpPatch.start()
|
|
||||||
# network should still exist in db
|
|
||||||
self.assertEqual(n['network']['id'],
|
|
||||||
self._get_networks(n['network']['tenant_id']
|
|
||||||
)[0]['id'])
|
|
||||||
|
|
||||||
|
|
||||||
class TestBigSwitchProxySubnetsV2(test_plugin.TestSubnetsV2,
|
|
||||||
BigSwitchProxyPluginV2TestCase):
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestBigSwitchProxySync(BigSwitchProxyPluginV2TestCase):
|
|
||||||
|
|
||||||
def test_send_data(self):
|
|
||||||
plugin_obj = manager.NeutronManager.get_plugin()
|
|
||||||
result = plugin_obj._send_all_data()
|
|
||||||
self.assertEqual(result[0], 200)
|
|
||||||
|
|
||||||
|
|
||||||
class TestBigSwitchAddressPairs(BigSwitchProxyPluginV2TestCase,
|
|
||||||
test_addr_pair.TestAllowedAddressPairs):
|
|
||||||
pass
|
|
|
@ -1,554 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Big Switch Networks, Inc. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# Adapted from neutron.tests.unit.test_l3_plugin
|
|
||||||
# @author: Sumit Naiksatam, sumitnaiksatam@gmail.com
|
|
||||||
#
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import copy
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
from six import moves
|
|
||||||
from webob import exc
|
|
||||||
|
|
||||||
from neutron.common import test_lib
|
|
||||||
from neutron import context
|
|
||||||
from neutron.extensions import l3
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.openstack.common import uuidutils
|
|
||||||
from neutron.plugins.bigswitch.extensions import routerrule
|
|
||||||
from neutron.tests.unit.bigswitch import fake_server
|
|
||||||
from neutron.tests.unit.bigswitch import test_base
|
|
||||||
from neutron.tests.unit import test_api_v2
|
|
||||||
from neutron.tests.unit import test_extension_extradhcpopts as test_extradhcp
|
|
||||||
from neutron.tests.unit import test_l3_plugin
|
|
||||||
|
|
||||||
|
|
||||||
HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
|
|
||||||
_uuid = uuidutils.generate_uuid
|
|
||||||
|
|
||||||
|
|
||||||
class RouterRulesTestExtensionManager(object):
|
|
||||||
|
|
||||||
def get_resources(self):
|
|
||||||
l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
|
|
||||||
routerrule.EXTENDED_ATTRIBUTES_2_0['routers'])
|
|
||||||
return l3.L3.get_resources()
|
|
||||||
|
|
||||||
def get_actions(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_request_extensions(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
class DHCPOptsTestCase(test_base.BigSwitchTestBase,
|
|
||||||
test_extradhcp.TestExtraDhcpOpt):
|
|
||||||
|
|
||||||
def setUp(self, plugin=None):
|
|
||||||
self.setup_patches()
|
|
||||||
self.setup_config_files()
|
|
||||||
super(test_extradhcp.ExtraDhcpOptDBTestCase,
|
|
||||||
self).setUp(plugin=self._plugin_name)
|
|
||||||
self.startHttpPatch()
|
|
||||||
|
|
||||||
|
|
||||||
class RouterDBTestBase(test_base.BigSwitchTestBase,
|
|
||||||
test_l3_plugin.L3BaseForIntTests,
|
|
||||||
test_l3_plugin.L3NatTestCaseMixin):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.setup_patches()
|
|
||||||
self.setup_config_files()
|
|
||||||
ext_mgr = RouterRulesTestExtensionManager()
|
|
||||||
super(RouterDBTestBase, self).setUp(plugin=self._plugin_name,
|
|
||||||
ext_mgr=ext_mgr)
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', False)
|
|
||||||
self.plugin_obj = manager.NeutronManager.get_plugin()
|
|
||||||
self.startHttpPatch()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
super(RouterDBTestBase, self).tearDown()
|
|
||||||
del test_lib.test_config['config_files']
|
|
||||||
|
|
||||||
|
|
||||||
class RouterDBTestCase(RouterDBTestBase,
|
|
||||||
test_l3_plugin.L3NatDBIntTestCase):
|
|
||||||
|
|
||||||
def test_router_remove_router_interface_wrong_subnet_returns_400(self):
|
|
||||||
with self.router() as r:
|
|
||||||
with self.subnet() as s:
|
|
||||||
with self.subnet(cidr='10.0.10.0/24') as s1:
|
|
||||||
with self.port(subnet=s1, no_delete=True) as p:
|
|
||||||
self._router_interface_action('add',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p['port']['id'])
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
s['subnet']['id'],
|
|
||||||
p['port']['id'],
|
|
||||||
exc.HTTPBadRequest.code)
|
|
||||||
#remove properly to clean-up
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p['port']['id'])
|
|
||||||
|
|
||||||
def test_router_remove_router_interface_wrong_port_returns_404(self):
|
|
||||||
with self.router() as r:
|
|
||||||
with self.subnet() as s:
|
|
||||||
with self.port(subnet=s, no_delete=True) as p:
|
|
||||||
self._router_interface_action('add',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p['port']['id'])
|
|
||||||
# create another port for testing failure case
|
|
||||||
res = self._create_port('json', p['port']['network_id'])
|
|
||||||
p2 = self.deserialize('json', res)
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p2['port']['id'],
|
|
||||||
exc.HTTPNotFound.code)
|
|
||||||
# remove correct interface to cleanup
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p['port']['id'])
|
|
||||||
# remove extra port created
|
|
||||||
self._delete('ports', p2['port']['id'])
|
|
||||||
|
|
||||||
def test_multi_tenant_flip_alllocation(self):
|
|
||||||
tenant1_id = _uuid()
|
|
||||||
tenant2_id = _uuid()
|
|
||||||
with contextlib.nested(
|
|
||||||
self.network(tenant_id=tenant1_id),
|
|
||||||
self.network(tenant_id=tenant2_id)) as (n1, n2):
|
|
||||||
with contextlib.nested(
|
|
||||||
self.subnet(network=n1, cidr='11.0.0.0/24'),
|
|
||||||
self.subnet(network=n2, cidr='12.0.0.0/24'),
|
|
||||||
self.subnet(cidr='13.0.0.0/24')) as (s1, s2, psub):
|
|
||||||
with contextlib.nested(
|
|
||||||
self.router(tenant_id=tenant1_id),
|
|
||||||
self.router(tenant_id=tenant2_id),
|
|
||||||
self.port(subnet=s1, tenant_id=tenant1_id),
|
|
||||||
self.port(subnet=s2, tenant_id=tenant2_id)) as (r1, r2,
|
|
||||||
p1, p2):
|
|
||||||
self._set_net_external(psub['subnet']['network_id'])
|
|
||||||
s1id = p1['port']['fixed_ips'][0]['subnet_id']
|
|
||||||
s2id = p2['port']['fixed_ips'][0]['subnet_id']
|
|
||||||
s1 = {'subnet': {'id': s1id}}
|
|
||||||
s2 = {'subnet': {'id': s2id}}
|
|
||||||
self._add_external_gateway_to_router(
|
|
||||||
r1['router']['id'],
|
|
||||||
psub['subnet']['network_id'])
|
|
||||||
self._add_external_gateway_to_router(
|
|
||||||
r2['router']['id'],
|
|
||||||
psub['subnet']['network_id'])
|
|
||||||
self._router_interface_action(
|
|
||||||
'add', r1['router']['id'],
|
|
||||||
s1['subnet']['id'], None)
|
|
||||||
self._router_interface_action(
|
|
||||||
'add', r2['router']['id'],
|
|
||||||
s2['subnet']['id'], None)
|
|
||||||
fl1 = self._make_floatingip_for_tenant_port(
|
|
||||||
net_id=psub['subnet']['network_id'],
|
|
||||||
port_id=p1['port']['id'],
|
|
||||||
tenant_id=tenant1_id)
|
|
||||||
self.httpPatch.stop()
|
|
||||||
multiFloatPatch = mock.patch(
|
|
||||||
HTTPCON,
|
|
||||||
new=fake_server.VerifyMultiTenantFloatingIP)
|
|
||||||
multiFloatPatch.start()
|
|
||||||
fl2 = self._make_floatingip_for_tenant_port(
|
|
||||||
net_id=psub['subnet']['network_id'],
|
|
||||||
port_id=p2['port']['id'],
|
|
||||||
tenant_id=tenant2_id)
|
|
||||||
multiFloatPatch.stop()
|
|
||||||
self.httpPatch.start()
|
|
||||||
self._delete('floatingips', fl1['floatingip']['id'])
|
|
||||||
self._delete('floatingips', fl2['floatingip']['id'])
|
|
||||||
self._router_interface_action(
|
|
||||||
'remove', r1['router']['id'],
|
|
||||||
s1['subnet']['id'], None)
|
|
||||||
self._router_interface_action(
|
|
||||||
'remove', r2['router']['id'],
|
|
||||||
s2['subnet']['id'], None)
|
|
||||||
|
|
||||||
def _make_floatingip_for_tenant_port(self, net_id, port_id, tenant_id):
|
|
||||||
data = {'floatingip': {'floating_network_id': net_id,
|
|
||||||
'tenant_id': tenant_id,
|
|
||||||
'port_id': port_id}}
|
|
||||||
floatingip_req = self.new_create_request('floatingips', data, self.fmt)
|
|
||||||
res = floatingip_req.get_response(self.ext_api)
|
|
||||||
return self.deserialize(self.fmt, res)
|
|
||||||
|
|
||||||
def test_floatingip_with_invalid_create_port(self):
|
|
||||||
self._test_floatingip_with_invalid_create_port(
|
|
||||||
'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2')
|
|
||||||
|
|
||||||
def test_create_floatingip_no_ext_gateway_return_404(self):
|
|
||||||
with self.subnet(cidr='10.0.10.0/24') as public_sub:
|
|
||||||
self._set_net_external(public_sub['subnet']['network_id'])
|
|
||||||
with self.port() as private_port:
|
|
||||||
with self.router():
|
|
||||||
res = self._create_floatingip(
|
|
||||||
'json',
|
|
||||||
public_sub['subnet']['network_id'],
|
|
||||||
port_id=private_port['port']['id'])
|
|
||||||
self.assertEqual(res.status_int, exc.HTTPNotFound.code)
|
|
||||||
|
|
||||||
def test_router_update_gateway(self):
|
|
||||||
with self.router() as r:
|
|
||||||
with self.subnet() as s1:
|
|
||||||
with self.subnet(cidr='10.0.10.0/24') as s2:
|
|
||||||
self._set_net_external(s1['subnet']['network_id'])
|
|
||||||
self._add_external_gateway_to_router(
|
|
||||||
r['router']['id'],
|
|
||||||
s1['subnet']['network_id'])
|
|
||||||
body = self._show('routers', r['router']['id'])
|
|
||||||
net_id = (body['router']
|
|
||||||
['external_gateway_info']['network_id'])
|
|
||||||
self.assertEqual(net_id, s1['subnet']['network_id'])
|
|
||||||
self._set_net_external(s2['subnet']['network_id'])
|
|
||||||
self._add_external_gateway_to_router(
|
|
||||||
r['router']['id'],
|
|
||||||
s2['subnet']['network_id'])
|
|
||||||
body = self._show('routers', r['router']['id'])
|
|
||||||
net_id = (body['router']
|
|
||||||
['external_gateway_info']['network_id'])
|
|
||||||
self.assertEqual(net_id, s2['subnet']['network_id'])
|
|
||||||
self._remove_external_gateway_from_router(
|
|
||||||
r['router']['id'],
|
|
||||||
s2['subnet']['network_id'])
|
|
||||||
|
|
||||||
def test_router_add_interface_overlapped_cidr(self):
|
|
||||||
self.skipTest("Plugin does not support")
|
|
||||||
|
|
||||||
def test_router_add_interface_overlapped_cidr_returns_400(self):
|
|
||||||
self.skipTest("Plugin does not support")
|
|
||||||
|
|
||||||
def test_list_nets_external(self):
|
|
||||||
self.skipTest("Plugin does not support")
|
|
||||||
|
|
||||||
def test_router_update_gateway_with_existed_floatingip(self):
|
|
||||||
with self.subnet(cidr='10.0.10.0/24') as subnet:
|
|
||||||
self._set_net_external(subnet['subnet']['network_id'])
|
|
||||||
with self.floatingip_with_assoc() as fip:
|
|
||||||
self._add_external_gateway_to_router(
|
|
||||||
fip['floatingip']['router_id'],
|
|
||||||
subnet['subnet']['network_id'],
|
|
||||||
expected_code=exc.HTTPConflict.code)
|
|
||||||
|
|
||||||
def test_router_remove_interface_wrong_subnet_returns_400(self):
|
|
||||||
with self.router() as r:
|
|
||||||
with self.subnet(cidr='10.0.10.0/24') as s:
|
|
||||||
with self.port(no_delete=True) as p:
|
|
||||||
self._router_interface_action('add',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p['port']['id'])
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
s['subnet']['id'],
|
|
||||||
p['port']['id'],
|
|
||||||
exc.HTTPBadRequest.code)
|
|
||||||
#remove properly to clean-up
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p['port']['id'])
|
|
||||||
|
|
||||||
def test_router_remove_interface_wrong_port_returns_404(self):
|
|
||||||
with self.router() as r:
|
|
||||||
with self.subnet(cidr='10.0.10.0/24'):
|
|
||||||
with self.port(no_delete=True) as p:
|
|
||||||
self._router_interface_action('add',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p['port']['id'])
|
|
||||||
# create another port for testing failure case
|
|
||||||
res = self._create_port('json', p['port']['network_id'])
|
|
||||||
p2 = self.deserialize('json', res)
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p2['port']['id'],
|
|
||||||
exc.HTTPNotFound.code)
|
|
||||||
# remove correct interface to cleanup
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
None,
|
|
||||||
p['port']['id'])
|
|
||||||
# remove extra port created
|
|
||||||
self._delete('ports', p2['port']['id'])
|
|
||||||
|
|
||||||
def test_send_data(self):
|
|
||||||
fmt = 'json'
|
|
||||||
plugin_obj = manager.NeutronManager.get_plugin()
|
|
||||||
|
|
||||||
with self.router() as r:
|
|
||||||
r_id = r['router']['id']
|
|
||||||
|
|
||||||
with self.subnet(cidr='10.0.10.0/24') as s:
|
|
||||||
s_id = s['subnet']['id']
|
|
||||||
|
|
||||||
with self.router() as r1:
|
|
||||||
r1_id = r1['router']['id']
|
|
||||||
body = self._router_interface_action('add', r_id, s_id,
|
|
||||||
None)
|
|
||||||
self.assertIn('port_id', body)
|
|
||||||
r_port_id = body['port_id']
|
|
||||||
body = self._show('ports', r_port_id)
|
|
||||||
self.assertEqual(body['port']['device_id'], r_id)
|
|
||||||
|
|
||||||
with self.subnet(cidr='10.0.20.0/24') as s1:
|
|
||||||
s1_id = s1['subnet']['id']
|
|
||||||
body = self._router_interface_action('add', r1_id,
|
|
||||||
s1_id, None)
|
|
||||||
self.assertIn('port_id', body)
|
|
||||||
r1_port_id = body['port_id']
|
|
||||||
body = self._show('ports', r1_port_id)
|
|
||||||
self.assertEqual(body['port']['device_id'], r1_id)
|
|
||||||
|
|
||||||
with self.subnet(cidr='11.0.0.0/24') as public_sub:
|
|
||||||
public_net_id = public_sub['subnet']['network_id']
|
|
||||||
self._set_net_external(public_net_id)
|
|
||||||
|
|
||||||
with self.port() as prv_port:
|
|
||||||
prv_fixed_ip = prv_port['port']['fixed_ips'][0]
|
|
||||||
priv_sub_id = prv_fixed_ip['subnet_id']
|
|
||||||
self._add_external_gateway_to_router(
|
|
||||||
r_id, public_net_id)
|
|
||||||
self._router_interface_action('add', r_id,
|
|
||||||
priv_sub_id,
|
|
||||||
None)
|
|
||||||
|
|
||||||
priv_port_id = prv_port['port']['id']
|
|
||||||
res = self._create_floatingip(
|
|
||||||
fmt, public_net_id,
|
|
||||||
port_id=priv_port_id)
|
|
||||||
self.assertEqual(res.status_int,
|
|
||||||
exc.HTTPCreated.code)
|
|
||||||
floatingip = self.deserialize(fmt, res)
|
|
||||||
|
|
||||||
result = plugin_obj._send_all_data()
|
|
||||||
self.assertEqual(result[0], 200)
|
|
||||||
|
|
||||||
self._delete('floatingips',
|
|
||||||
floatingip['floatingip']['id'])
|
|
||||||
self._remove_external_gateway_from_router(
|
|
||||||
r_id, public_net_id)
|
|
||||||
self._router_interface_action('remove', r_id,
|
|
||||||
priv_sub_id,
|
|
||||||
None)
|
|
||||||
self._router_interface_action('remove', r_id, s_id,
|
|
||||||
None)
|
|
||||||
self._show('ports', r_port_id,
|
|
||||||
expected_code=exc.HTTPNotFound.code)
|
|
||||||
self._router_interface_action('remove', r1_id, s1_id,
|
|
||||||
None)
|
|
||||||
self._show('ports', r1_port_id,
|
|
||||||
expected_code=exc.HTTPNotFound.code)
|
|
||||||
|
|
||||||
def test_router_rules_update(self):
|
|
||||||
with self.router() as r:
|
|
||||||
r_id = r['router']['id']
|
|
||||||
router_rules = [{'destination': '1.2.3.4/32',
|
|
||||||
'source': '4.3.2.1/32',
|
|
||||||
'action': 'permit',
|
|
||||||
'nexthops': ['4.4.4.4', '4.4.4.5']}]
|
|
||||||
body = self._update('routers', r_id,
|
|
||||||
{'router': {'router_rules': router_rules}})
|
|
||||||
|
|
||||||
body = self._show('routers', r['router']['id'])
|
|
||||||
self.assertIn('router_rules', body['router'])
|
|
||||||
rules = body['router']['router_rules']
|
|
||||||
self.assertEqual(_strip_rule_ids(rules), router_rules)
|
|
||||||
# Try after adding another rule
|
|
||||||
router_rules.append({'source': 'external',
|
|
||||||
'destination': '8.8.8.8/32',
|
|
||||||
'action': 'permit', 'nexthops': []})
|
|
||||||
body = self._update('routers', r['router']['id'],
|
|
||||||
{'router': {'router_rules': router_rules}})
|
|
||||||
|
|
||||||
body = self._show('routers', r['router']['id'])
|
|
||||||
self.assertIn('router_rules', body['router'])
|
|
||||||
rules = body['router']['router_rules']
|
|
||||||
self.assertEqual(_strip_rule_ids(rules), router_rules)
|
|
||||||
|
|
||||||
def test_router_rules_separation(self):
|
|
||||||
with self.router() as r1:
|
|
||||||
with self.router() as r2:
|
|
||||||
r1_id = r1['router']['id']
|
|
||||||
r2_id = r2['router']['id']
|
|
||||||
router1_rules = [{'destination': '5.6.7.8/32',
|
|
||||||
'source': '8.7.6.5/32',
|
|
||||||
'action': 'permit',
|
|
||||||
'nexthops': ['8.8.8.8', '9.9.9.9']}]
|
|
||||||
router2_rules = [{'destination': '1.2.3.4/32',
|
|
||||||
'source': '4.3.2.1/32',
|
|
||||||
'action': 'permit',
|
|
||||||
'nexthops': ['4.4.4.4', '4.4.4.5']}]
|
|
||||||
body1 = self._update('routers', r1_id,
|
|
||||||
{'router':
|
|
||||||
{'router_rules': router1_rules}})
|
|
||||||
body2 = self._update('routers', r2_id,
|
|
||||||
{'router':
|
|
||||||
{'router_rules': router2_rules}})
|
|
||||||
|
|
||||||
body1 = self._show('routers', r1_id)
|
|
||||||
body2 = self._show('routers', r2_id)
|
|
||||||
rules1 = body1['router']['router_rules']
|
|
||||||
rules2 = body2['router']['router_rules']
|
|
||||||
self.assertEqual(_strip_rule_ids(rules1), router1_rules)
|
|
||||||
self.assertEqual(_strip_rule_ids(rules2), router2_rules)
|
|
||||||
|
|
||||||
def test_router_rules_validation(self):
|
|
||||||
with self.router() as r:
|
|
||||||
r_id = r['router']['id']
|
|
||||||
good_rules = [{'destination': '1.2.3.4/32',
|
|
||||||
'source': '4.3.2.1/32',
|
|
||||||
'action': 'permit',
|
|
||||||
'nexthops': ['4.4.4.4', '4.4.4.5']}]
|
|
||||||
|
|
||||||
body = self._update('routers', r_id,
|
|
||||||
{'router': {'router_rules': good_rules}})
|
|
||||||
body = self._show('routers', r_id)
|
|
||||||
self.assertIn('router_rules', body['router'])
|
|
||||||
self.assertEqual(good_rules,
|
|
||||||
_strip_rule_ids(body['router']['router_rules']))
|
|
||||||
|
|
||||||
# Missing nexthops should be populated with an empty list
|
|
||||||
light_rules = copy.deepcopy(good_rules)
|
|
||||||
del light_rules[0]['nexthops']
|
|
||||||
body = self._update('routers', r_id,
|
|
||||||
{'router': {'router_rules': light_rules}})
|
|
||||||
body = self._show('routers', r_id)
|
|
||||||
self.assertIn('router_rules', body['router'])
|
|
||||||
light_rules[0]['nexthops'] = []
|
|
||||||
self.assertEqual(light_rules,
|
|
||||||
_strip_rule_ids(body['router']['router_rules']))
|
|
||||||
# bad CIDR
|
|
||||||
bad_rules = copy.deepcopy(good_rules)
|
|
||||||
bad_rules[0]['destination'] = '1.1.1.1'
|
|
||||||
body = self._update('routers', r_id,
|
|
||||||
{'router': {'router_rules': bad_rules}},
|
|
||||||
expected_code=exc.HTTPBadRequest.code)
|
|
||||||
# bad next hop
|
|
||||||
bad_rules = copy.deepcopy(good_rules)
|
|
||||||
bad_rules[0]['nexthops'] = ['1.1.1.1', 'f2']
|
|
||||||
body = self._update('routers', r_id,
|
|
||||||
{'router': {'router_rules': bad_rules}},
|
|
||||||
expected_code=exc.HTTPBadRequest.code)
|
|
||||||
# bad action
|
|
||||||
bad_rules = copy.deepcopy(good_rules)
|
|
||||||
bad_rules[0]['action'] = 'dance'
|
|
||||||
body = self._update('routers', r_id,
|
|
||||||
{'router': {'router_rules': bad_rules}},
|
|
||||||
expected_code=exc.HTTPBadRequest.code)
|
|
||||||
# duplicate rule with opposite action
|
|
||||||
bad_rules = copy.deepcopy(good_rules)
|
|
||||||
bad_rules.append(copy.deepcopy(bad_rules[0]))
|
|
||||||
bad_rules.append(copy.deepcopy(bad_rules[0]))
|
|
||||||
bad_rules[1]['source'] = 'any'
|
|
||||||
bad_rules[2]['action'] = 'deny'
|
|
||||||
body = self._update('routers', r_id,
|
|
||||||
{'router': {'router_rules': bad_rules}},
|
|
||||||
expected_code=exc.HTTPBadRequest.code)
|
|
||||||
# duplicate nexthop
|
|
||||||
bad_rules = copy.deepcopy(good_rules)
|
|
||||||
bad_rules[0]['nexthops'] = ['1.1.1.1', '1.1.1.1']
|
|
||||||
body = self._update('routers', r_id,
|
|
||||||
{'router': {'router_rules': bad_rules}},
|
|
||||||
expected_code=exc.HTTPBadRequest.code)
|
|
||||||
# make sure light rules persisted during bad updates
|
|
||||||
body = self._show('routers', r_id)
|
|
||||||
self.assertIn('router_rules', body['router'])
|
|
||||||
self.assertEqual(light_rules,
|
|
||||||
_strip_rule_ids(body['router']['router_rules']))
|
|
||||||
|
|
||||||
def test_router_rules_config_change(self):
|
|
||||||
cfg.CONF.set_override('tenant_default_router_rule',
|
|
||||||
['*:any:any:deny',
|
|
||||||
'*:8.8.8.8/32:any:permit:1.2.3.4'],
|
|
||||||
'ROUTER')
|
|
||||||
with self.router() as r:
|
|
||||||
body = self._show('routers', r['router']['id'])
|
|
||||||
expected_rules = [{'source': 'any', 'destination': 'any',
|
|
||||||
'nexthops': [], 'action': 'deny'},
|
|
||||||
{'source': '8.8.8.8/32', 'destination': 'any',
|
|
||||||
'nexthops': ['1.2.3.4'], 'action': 'permit'}]
|
|
||||||
self.assertEqual(expected_rules,
|
|
||||||
_strip_rule_ids(body['router']['router_rules']))
|
|
||||||
|
|
||||||
def test_rule_exhaustion(self):
|
|
||||||
cfg.CONF.set_override('max_router_rules', 10, 'ROUTER')
|
|
||||||
with self.router() as r:
|
|
||||||
rules = []
|
|
||||||
for i in moves.xrange(1, 12):
|
|
||||||
rule = {'source': 'any', 'nexthops': [],
|
|
||||||
'destination': '1.1.1.' + str(i) + '/32',
|
|
||||||
'action': 'permit'}
|
|
||||||
rules.append(rule)
|
|
||||||
self._update('routers', r['router']['id'],
|
|
||||||
{'router': {'router_rules': rules}},
|
|
||||||
expected_code=exc.HTTPBadRequest.code)
|
|
||||||
|
|
||||||
def test_rollback_on_router_create(self):
|
|
||||||
tid = test_api_v2._uuid()
|
|
||||||
self.httpPatch.stop()
|
|
||||||
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
|
|
||||||
self._create_router('json', tid)
|
|
||||||
self.assertTrue(len(self._get_routers(tid)) == 0)
|
|
||||||
|
|
||||||
def test_rollback_on_router_update(self):
|
|
||||||
with self.router() as r:
|
|
||||||
data = {'router': {'name': 'aNewName'}}
|
|
||||||
self.httpPatch.stop()
|
|
||||||
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
|
|
||||||
self.new_update_request(
|
|
||||||
'routers', data, r['router']['id']).get_response(self.api)
|
|
||||||
self.httpPatch.start()
|
|
||||||
updatedr = self._get_routers(r['router']['tenant_id'])[0]
|
|
||||||
# name should have stayed the same due to failure
|
|
||||||
self.assertEqual(r['router']['name'], updatedr['name'])
|
|
||||||
|
|
||||||
def test_rollback_on_router_delete(self):
|
|
||||||
with self.router() as r:
|
|
||||||
self.httpPatch.stop()
|
|
||||||
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
|
|
||||||
self._delete('routers', r['router']['id'],
|
|
||||||
expected_code=exc.HTTPInternalServerError.code)
|
|
||||||
self.httpPatch.start()
|
|
||||||
self.assertEqual(r['router']['id'],
|
|
||||||
self._get_routers(r['router']['tenant_id']
|
|
||||||
)[0]['id'])
|
|
||||||
|
|
||||||
def _get_routers(self, tenant_id):
|
|
||||||
ctx = context.Context('', tenant_id)
|
|
||||||
return self.plugin_obj.get_routers(ctx)
|
|
||||||
|
|
||||||
|
|
||||||
def _strip_rule_ids(rules):
|
|
||||||
cleaned = []
|
|
||||||
for rule in rules:
|
|
||||||
del rule['id']
|
|
||||||
cleaned.append(rule)
|
|
||||||
return cleaned
|
|
|
@ -1,47 +0,0 @@
|
||||||
# Copyright 2014, Big Switch Networks
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.tests.unit.bigswitch import test_base
|
|
||||||
from neutron.tests.unit import test_extension_security_group as test_sg
|
|
||||||
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
|
|
||||||
|
|
||||||
|
|
||||||
class RestProxySecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase,
|
|
||||||
test_base.BigSwitchTestBase):
|
|
||||||
plugin_str = ('%s.NeutronRestProxyV2' %
|
|
||||||
test_base.RESTPROXY_PKG_PATH)
|
|
||||||
|
|
||||||
def setUp(self, plugin=None):
|
|
||||||
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_HYBRID_DRIVER)
|
|
||||||
self.setup_config_files()
|
|
||||||
self.setup_patches()
|
|
||||||
self._attribute_map_bk_ = {}
|
|
||||||
super(RestProxySecurityGroupsTestCase, self).setUp(self.plugin_str)
|
|
||||||
plugin = manager.NeutronManager.get_plugin()
|
|
||||||
self.notifier = plugin.notifier
|
|
||||||
self.rpc = plugin.endpoints[0]
|
|
||||||
self.startHttpPatch()
|
|
||||||
|
|
||||||
|
|
||||||
class TestSecServerRpcCallBack(test_sg_rpc.SGServerRpcCallBackMixinTestCase,
|
|
||||||
RestProxySecurityGroupsTestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestSecurityGroupsMixin(test_sg.TestSecurityGroups,
|
|
||||||
test_sg_rpc.SGNotificationTestMixin,
|
|
||||||
RestProxySecurityGroupsTestCase):
|
|
||||||
pass
|
|
|
@ -1,467 +0,0 @@
|
||||||
# Copyright 2014 Big Switch Networks, Inc. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Kevin Benton, kevin.benton@bigswitch.com
|
|
||||||
#
|
|
||||||
import contextlib
|
|
||||||
import httplib
|
|
||||||
import socket
|
|
||||||
import ssl
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.openstack.common import importutils
|
|
||||||
from neutron.plugins.bigswitch import servermanager
|
|
||||||
from neutron.tests.unit.bigswitch import test_restproxy_plugin as test_rp
|
|
||||||
|
|
||||||
SERVERMANAGER = 'neutron.plugins.bigswitch.servermanager'
|
|
||||||
HTTPCON = SERVERMANAGER + '.httplib.HTTPConnection'
|
|
||||||
HTTPSCON = SERVERMANAGER + '.HTTPSConnectionWithValidation'
|
|
||||||
|
|
||||||
|
|
||||||
class ServerManagerTests(test_rp.BigSwitchProxyPluginV2TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.socket_mock = mock.patch(
|
|
||||||
SERVERMANAGER + '.socket.create_connection').start()
|
|
||||||
self.wrap_mock = mock.patch(SERVERMANAGER + '.ssl.wrap_socket').start()
|
|
||||||
super(ServerManagerTests, self).setUp()
|
|
||||||
# http patch must not be running or it will mangle the servermanager
|
|
||||||
# import where the https connection classes are defined
|
|
||||||
self.httpPatch.stop()
|
|
||||||
self.sm = importutils.import_module(SERVERMANAGER)
|
|
||||||
|
|
||||||
def test_no_servers(self):
|
|
||||||
cfg.CONF.set_override('servers', [], 'RESTPROXY')
|
|
||||||
self.assertRaises(cfg.Error, servermanager.ServerPool)
|
|
||||||
|
|
||||||
def test_malformed_servers(self):
|
|
||||||
cfg.CONF.set_override('servers', ['1.2.3.4', '1.1.1.1:a'], 'RESTPROXY')
|
|
||||||
self.assertRaises(cfg.Error, servermanager.ServerPool)
|
|
||||||
|
|
||||||
def test_ipv6_server_address(self):
|
|
||||||
cfg.CONF.set_override(
|
|
||||||
'servers', ['[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]:80'],
|
|
||||||
'RESTPROXY')
|
|
||||||
s = servermanager.ServerPool()
|
|
||||||
self.assertEqual(s.servers[0].server,
|
|
||||||
'[ABCD:EF01:2345:6789:ABCD:EF01:2345:6789]')
|
|
||||||
|
|
||||||
def test_sticky_cert_fetch_fail(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
pl.servers.ssl = True
|
|
||||||
with mock.patch(
|
|
||||||
'ssl.get_server_certificate',
|
|
||||||
side_effect=Exception('There is no more entropy in the universe')
|
|
||||||
) as sslgetmock:
|
|
||||||
self.assertRaises(
|
|
||||||
cfg.Error,
|
|
||||||
pl.servers._get_combined_cert_for_server,
|
|
||||||
*('example.org', 443)
|
|
||||||
)
|
|
||||||
sslgetmock.assert_has_calls([mock.call(('example.org', 443))])
|
|
||||||
|
|
||||||
def test_consistency_watchdog_stops_with_0_polling_interval(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
pl.servers.capabilities = ['consistency']
|
|
||||||
self.watch_p.stop()
|
|
||||||
with mock.patch('eventlet.sleep') as smock:
|
|
||||||
# should return immediately a polling interval of 0
|
|
||||||
pl.servers._consistency_watchdog(0)
|
|
||||||
self.assertFalse(smock.called)
|
|
||||||
|
|
||||||
def test_consistency_watchdog(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
pl.servers.capabilities = []
|
|
||||||
self.watch_p.stop()
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch('eventlet.sleep'),
|
|
||||||
mock.patch(
|
|
||||||
SERVERMANAGER + '.ServerPool.rest_call',
|
|
||||||
side_effect=servermanager.RemoteRestError(
|
|
||||||
reason='Failure to trigger except clause.'
|
|
||||||
)
|
|
||||||
),
|
|
||||||
mock.patch(
|
|
||||||
SERVERMANAGER + '.LOG.exception',
|
|
||||||
side_effect=KeyError('Failure to break loop')
|
|
||||||
)
|
|
||||||
) as (smock, rmock, lmock):
|
|
||||||
# should return immediately without consistency capability
|
|
||||||
pl.servers._consistency_watchdog()
|
|
||||||
self.assertFalse(smock.called)
|
|
||||||
pl.servers.capabilities = ['consistency']
|
|
||||||
self.assertRaises(KeyError,
|
|
||||||
pl.servers._consistency_watchdog)
|
|
||||||
rmock.assert_called_with('GET', '/health', '', {}, [], False)
|
|
||||||
self.assertEqual(1, len(lmock.mock_calls))
|
|
||||||
|
|
||||||
def test_consistency_hash_header(self):
|
|
||||||
# mock HTTP class instead of rest_call so we can see headers
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
rv = conmock.return_value
|
|
||||||
rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
|
|
||||||
with self.network():
|
|
||||||
callheaders = rv.request.mock_calls[0][1][3]
|
|
||||||
self.assertIn('X-BSN-BVS-HASH-MATCH', callheaders)
|
|
||||||
# first call will be empty to indicate no previous state hash
|
|
||||||
self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'], '')
|
|
||||||
# change the header that will be received on delete call
|
|
||||||
rv.getresponse.return_value.getheader.return_value = 'HASH2'
|
|
||||||
|
|
||||||
# net delete should have used header received on create
|
|
||||||
callheaders = rv.request.mock_calls[1][1][3]
|
|
||||||
self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'], 'HASHHEADER')
|
|
||||||
|
|
||||||
# create again should now use header received from prev delete
|
|
||||||
with self.network():
|
|
||||||
callheaders = rv.request.mock_calls[2][1][3]
|
|
||||||
self.assertIn('X-BSN-BVS-HASH-MATCH', callheaders)
|
|
||||||
self.assertEqual(callheaders['X-BSN-BVS-HASH-MATCH'],
|
|
||||||
'HASH2')
|
|
||||||
|
|
||||||
def test_file_put_contents(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
with mock.patch(SERVERMANAGER + '.open', create=True) as omock:
|
|
||||||
pl.servers._file_put_contents('somepath', 'contents')
|
|
||||||
omock.assert_has_calls([mock.call('somepath', 'w')])
|
|
||||||
omock.return_value.__enter__.return_value.assert_has_calls([
|
|
||||||
mock.call.write('contents')
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_combine_certs_to_file(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
with mock.patch(SERVERMANAGER + '.open', create=True) as omock:
|
|
||||||
omock.return_value.__enter__().read.return_value = 'certdata'
|
|
||||||
pl.servers._combine_certs_to_file(['cert1.pem', 'cert2.pem'],
|
|
||||||
'combined.pem')
|
|
||||||
# mock shared between read and write file handles so the calls
|
|
||||||
# are mixed together
|
|
||||||
omock.assert_has_calls([
|
|
||||||
mock.call('combined.pem', 'w'),
|
|
||||||
mock.call('cert1.pem', 'r'),
|
|
||||||
mock.call('cert2.pem', 'r'),
|
|
||||||
], any_order=True)
|
|
||||||
omock.return_value.__enter__.return_value.assert_has_calls([
|
|
||||||
mock.call.read(),
|
|
||||||
mock.call.write('certdata'),
|
|
||||||
mock.call.read(),
|
|
||||||
mock.call.write('certdata')
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_auth_header(self):
|
|
||||||
cfg.CONF.set_override('server_auth', 'username:pass', 'RESTPROXY')
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
rv = conmock.return_value
|
|
||||||
rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
|
|
||||||
sp.rest_create_network('tenant', 'network')
|
|
||||||
callheaders = rv.request.mock_calls[0][1][3]
|
|
||||||
self.assertIn('Authorization', callheaders)
|
|
||||||
self.assertEqual(callheaders['Authorization'],
|
|
||||||
'Basic dXNlcm5hbWU6cGFzcw==')
|
|
||||||
|
|
||||||
def test_header_add(self):
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
rv = conmock.return_value
|
|
||||||
rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
|
|
||||||
sp.servers[0].rest_call('GET', '/', headers={'EXTRA-HEADER': 'HI'})
|
|
||||||
callheaders = rv.request.mock_calls[0][1][3]
|
|
||||||
# verify normal headers weren't mangled
|
|
||||||
self.assertIn('Content-type', callheaders)
|
|
||||||
self.assertEqual(callheaders['Content-type'],
|
|
||||||
'application/json')
|
|
||||||
# verify new header made it in
|
|
||||||
self.assertIn('EXTRA-HEADER', callheaders)
|
|
||||||
self.assertEqual(callheaders['EXTRA-HEADER'], 'HI')
|
|
||||||
|
|
||||||
def test_capabilities_retrieval(self):
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
rv = conmock.return_value.getresponse.return_value
|
|
||||||
rv.getheader.return_value = 'HASHHEADER'
|
|
||||||
|
|
||||||
# each server will get different capabilities
|
|
||||||
rv.read.side_effect = ['["a","b","c"]', '["b","c","d"]']
|
|
||||||
# pool capabilities is intersection between both
|
|
||||||
self.assertEqual(set(['b', 'c']), sp.get_capabilities())
|
|
||||||
self.assertEqual(2, rv.read.call_count)
|
|
||||||
|
|
||||||
# the pool should cache after the first call so no more
|
|
||||||
# HTTP calls should be made
|
|
||||||
rv.read.side_effect = ['["w","x","y"]', '["x","y","z"]']
|
|
||||||
self.assertEqual(set(['b', 'c']), sp.get_capabilities())
|
|
||||||
self.assertEqual(2, rv.read.call_count)
|
|
||||||
|
|
||||||
def test_capabilities_retrieval_failure(self):
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
rv = conmock.return_value.getresponse.return_value
|
|
||||||
rv.getheader.return_value = 'HASHHEADER'
|
|
||||||
# a failure to parse should result in an empty capability set
|
|
||||||
rv.read.return_value = 'XXXXX'
|
|
||||||
self.assertEqual([], sp.servers[0].get_capabilities())
|
|
||||||
|
|
||||||
# One broken server should affect all capabilities
|
|
||||||
rv.read.side_effect = ['{"a": "b"}', '["b","c","d"]']
|
|
||||||
self.assertEqual(set(), sp.get_capabilities())
|
|
||||||
|
|
||||||
def test_reconnect_on_timeout_change(self):
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
rv = conmock.return_value
|
|
||||||
rv.getresponse.return_value.getheader.return_value = 'HASHHEADER'
|
|
||||||
sp.servers[0].capabilities = ['keep-alive']
|
|
||||||
sp.servers[0].rest_call('GET', '/', timeout=10)
|
|
||||||
# even with keep-alive enabled, a change in timeout will trigger
|
|
||||||
# a reconnect
|
|
||||||
sp.servers[0].rest_call('GET', '/', timeout=75)
|
|
||||||
conmock.assert_has_calls([
|
|
||||||
mock.call('localhost', 9000, timeout=10),
|
|
||||||
mock.call('localhost', 9000, timeout=75),
|
|
||||||
], any_order=True)
|
|
||||||
|
|
||||||
def test_connect_failures(self):
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON, return_value=None):
|
|
||||||
resp = sp.servers[0].rest_call('GET', '/')
|
|
||||||
self.assertEqual(resp, (0, None, None, None))
|
|
||||||
# verify same behavior on ssl class
|
|
||||||
sp.servers[0].currentcon = False
|
|
||||||
sp.servers[0].ssl = True
|
|
||||||
with mock.patch(HTTPSCON, return_value=None):
|
|
||||||
resp = sp.servers[0].rest_call('GET', '/')
|
|
||||||
self.assertEqual(resp, (0, None, None, None))
|
|
||||||
|
|
||||||
def test_reconnect_cached_connection(self):
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
rv = conmock.return_value
|
|
||||||
rv.getresponse.return_value.getheader.return_value = 'HASH'
|
|
||||||
sp.servers[0].capabilities = ['keep-alive']
|
|
||||||
sp.servers[0].rest_call('GET', '/first')
|
|
||||||
# raise an error on re-use to verify reconnect
|
|
||||||
# return okay the second time so the reconnect works
|
|
||||||
rv.request.side_effect = [httplib.ImproperConnectionState(),
|
|
||||||
mock.MagicMock()]
|
|
||||||
sp.servers[0].rest_call('GET', '/second')
|
|
||||||
uris = [c[1][1] for c in rv.request.mock_calls]
|
|
||||||
expected = [
|
|
||||||
sp.base_uri + '/first',
|
|
||||||
sp.base_uri + '/second',
|
|
||||||
sp.base_uri + '/second',
|
|
||||||
]
|
|
||||||
self.assertEqual(uris, expected)
|
|
||||||
|
|
||||||
def test_no_reconnect_recurse_to_infinity(self):
|
|
||||||
# retry uses recursion when a reconnect is necessary
|
|
||||||
# this test makes sure it stops after 1 recursive call
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
rv = conmock.return_value
|
|
||||||
# hash header must be string instead of mock object
|
|
||||||
rv.getresponse.return_value.getheader.return_value = 'HASH'
|
|
||||||
sp.servers[0].capabilities = ['keep-alive']
|
|
||||||
sp.servers[0].rest_call('GET', '/first')
|
|
||||||
# after retrying once, the rest call should raise the
|
|
||||||
# exception up
|
|
||||||
rv.request.side_effect = httplib.ImproperConnectionState()
|
|
||||||
self.assertRaises(httplib.ImproperConnectionState,
|
|
||||||
sp.servers[0].rest_call,
|
|
||||||
*('GET', '/second'))
|
|
||||||
# 1 for the first call, 2 for the second with retry
|
|
||||||
self.assertEqual(rv.request.call_count, 3)
|
|
||||||
|
|
||||||
def test_socket_error(self):
|
|
||||||
sp = servermanager.ServerPool()
|
|
||||||
with mock.patch(HTTPCON) as conmock:
|
|
||||||
conmock.return_value.request.side_effect = socket.timeout()
|
|
||||||
resp = sp.servers[0].rest_call('GET', '/')
|
|
||||||
self.assertEqual(resp, (0, None, None, None))
|
|
||||||
|
|
||||||
def test_cert_get_fail(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
pl.servers.ssl = True
|
|
||||||
with mock.patch('os.path.exists', return_value=False):
|
|
||||||
self.assertRaises(cfg.Error,
|
|
||||||
pl.servers._get_combined_cert_for_server,
|
|
||||||
*('example.org', 443))
|
|
||||||
|
|
||||||
def test_cert_make_dirs(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
pl.servers.ssl = True
|
|
||||||
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
|
|
||||||
# pretend base dir exists, 3 children don't, and host cert does
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch('os.path.exists', side_effect=[True, False, False,
|
|
||||||
False, True]),
|
|
||||||
mock.patch('os.makedirs'),
|
|
||||||
mock.patch(SERVERMANAGER + '.ServerPool._combine_certs_to_file')
|
|
||||||
) as (exmock, makemock, combmock):
|
|
||||||
# will raise error because no certs found
|
|
||||||
self.assertIn(
|
|
||||||
'example.org',
|
|
||||||
pl.servers._get_combined_cert_for_server('example.org', 443)
|
|
||||||
)
|
|
||||||
base = cfg.CONF.RESTPROXY.ssl_cert_directory
|
|
||||||
hpath = base + '/host_certs/example.org.pem'
|
|
||||||
combpath = base + '/combined/example.org.pem'
|
|
||||||
combmock.assert_has_calls([mock.call([hpath], combpath)])
|
|
||||||
self.assertEqual(exmock.call_count, 5)
|
|
||||||
self.assertEqual(makemock.call_count, 3)
|
|
||||||
|
|
||||||
def test_no_cert_error(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
pl.servers.ssl = True
|
|
||||||
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
|
|
||||||
# pretend base dir exists and 3 children do, but host cert doesn't
|
|
||||||
with mock.patch(
|
|
||||||
'os.path.exists',
|
|
||||||
side_effect=[True, True, True, True, False]
|
|
||||||
) as exmock:
|
|
||||||
# will raise error because no certs found
|
|
||||||
self.assertRaises(
|
|
||||||
cfg.Error,
|
|
||||||
pl.servers._get_combined_cert_for_server,
|
|
||||||
*('example.org', 443)
|
|
||||||
)
|
|
||||||
self.assertEqual(exmock.call_count, 5)
|
|
||||||
|
|
||||||
def test_action_success(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
self.assertTrue(pl.servers.action_success((200,)))
|
|
||||||
|
|
||||||
def test_server_failure(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
self.assertTrue(pl.servers.server_failure((404,)))
|
|
||||||
# server failure has an ignore codes option
|
|
||||||
self.assertFalse(pl.servers.server_failure((404,),
|
|
||||||
ignore_codes=[404]))
|
|
||||||
|
|
||||||
def test_conflict_triggers_sync(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
with mock.patch(
|
|
||||||
SERVERMANAGER + '.ServerProxy.rest_call',
|
|
||||||
return_value=(httplib.CONFLICT, 0, 0, 0)
|
|
||||||
) as srestmock:
|
|
||||||
# making a call should trigger a conflict sync
|
|
||||||
pl.servers.rest_call('GET', '/', '', None, [])
|
|
||||||
srestmock.assert_has_calls([
|
|
||||||
mock.call('GET', '/', '', None, False, reconnect=True),
|
|
||||||
mock.call('PUT', '/topology',
|
|
||||||
{'routers': [], 'networks': []},
|
|
||||||
timeout=None)
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_conflict_sync_raises_error_without_topology(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
pl.servers.get_topo_function = None
|
|
||||||
with mock.patch(
|
|
||||||
SERVERMANAGER + '.ServerProxy.rest_call',
|
|
||||||
return_value=(httplib.CONFLICT, 0, 0, 0)
|
|
||||||
):
|
|
||||||
# making a call should trigger a conflict sync that will
|
|
||||||
# error without the topology function set
|
|
||||||
self.assertRaises(
|
|
||||||
cfg.Error,
|
|
||||||
pl.servers.rest_call,
|
|
||||||
*('GET', '/', '', None, [])
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_floating_calls(self):
|
|
||||||
pl = manager.NeutronManager.get_plugin()
|
|
||||||
with mock.patch(SERVERMANAGER + '.ServerPool.rest_action') as ramock:
|
|
||||||
pl.servers.rest_create_floatingip('tenant', {'id': 'somefloat'})
|
|
||||||
pl.servers.rest_update_floatingip('tenant', {'name': 'myfl'}, 'id')
|
|
||||||
pl.servers.rest_delete_floatingip('tenant', 'oldid')
|
|
||||||
ramock.assert_has_calls([
|
|
||||||
mock.call('PUT', '/tenants/tenant/floatingips/somefloat',
|
|
||||||
errstr=u'Unable to create floating IP: %s'),
|
|
||||||
mock.call('PUT', '/tenants/tenant/floatingips/id',
|
|
||||||
errstr=u'Unable to update floating IP: %s'),
|
|
||||||
mock.call('DELETE', '/tenants/tenant/floatingips/oldid',
|
|
||||||
errstr=u'Unable to delete floating IP: %s')
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_HTTPSConnectionWithValidation_without_cert(self):
|
|
||||||
con = self.sm.HTTPSConnectionWithValidation(
|
|
||||||
'www.example.org', 443, timeout=90)
|
|
||||||
con.source_address = '127.0.0.1'
|
|
||||||
con.request("GET", "/")
|
|
||||||
self.socket_mock.assert_has_calls([mock.call(
|
|
||||||
('www.example.org', 443), 90, '127.0.0.1'
|
|
||||||
)])
|
|
||||||
self.wrap_mock.assert_has_calls([mock.call(
|
|
||||||
self.socket_mock(), None, None, cert_reqs=ssl.CERT_NONE
|
|
||||||
)])
|
|
||||||
self.assertEqual(con.sock, self.wrap_mock())
|
|
||||||
|
|
||||||
def test_HTTPSConnectionWithValidation_with_cert(self):
|
|
||||||
con = self.sm.HTTPSConnectionWithValidation(
|
|
||||||
'www.example.org', 443, timeout=90)
|
|
||||||
con.combined_cert = 'SOMECERTS.pem'
|
|
||||||
con.source_address = '127.0.0.1'
|
|
||||||
con.request("GET", "/")
|
|
||||||
self.socket_mock.assert_has_calls([mock.call(
|
|
||||||
('www.example.org', 443), 90, '127.0.0.1'
|
|
||||||
)])
|
|
||||||
self.wrap_mock.assert_has_calls([mock.call(
|
|
||||||
self.socket_mock(), None, None, ca_certs='SOMECERTS.pem',
|
|
||||||
cert_reqs=ssl.CERT_REQUIRED
|
|
||||||
)])
|
|
||||||
self.assertEqual(con.sock, self.wrap_mock())
|
|
||||||
|
|
||||||
def test_HTTPSConnectionWithValidation_tunnel(self):
|
|
||||||
tunnel_mock = mock.patch.object(
|
|
||||||
self.sm.HTTPSConnectionWithValidation,
|
|
||||||
'_tunnel').start()
|
|
||||||
con = self.sm.HTTPSConnectionWithValidation(
|
|
||||||
'www.example.org', 443, timeout=90)
|
|
||||||
con.source_address = '127.0.0.1'
|
|
||||||
if not hasattr(con, 'set_tunnel'):
|
|
||||||
# no tunnel support in py26
|
|
||||||
return
|
|
||||||
con.set_tunnel('myproxy.local', 3128)
|
|
||||||
con.request("GET", "/")
|
|
||||||
self.socket_mock.assert_has_calls([mock.call(
|
|
||||||
('www.example.org', 443), 90, '127.0.0.1'
|
|
||||||
)])
|
|
||||||
self.wrap_mock.assert_has_calls([mock.call(
|
|
||||||
self.socket_mock(), None, None, cert_reqs=ssl.CERT_NONE
|
|
||||||
)])
|
|
||||||
# _tunnel() doesn't take any args
|
|
||||||
tunnel_mock.assert_has_calls([mock.call()])
|
|
||||||
self.assertEqual(con._tunnel_host, 'myproxy.local')
|
|
||||||
self.assertEqual(con._tunnel_port, 3128)
|
|
||||||
self.assertEqual(con.sock, self.wrap_mock())
|
|
||||||
|
|
||||||
|
|
||||||
class TestSockets(test_rp.BigSwitchProxyPluginV2TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestSockets, self).setUp()
|
|
||||||
# http patch must not be running or it will mangle the servermanager
|
|
||||||
# import where the https connection classes are defined
|
|
||||||
self.httpPatch.stop()
|
|
||||||
self.sm = importutils.import_module(SERVERMANAGER)
|
|
||||||
|
|
||||||
def test_socket_create_attempt(self):
|
|
||||||
# exercise the socket creation to make sure it works on both python
|
|
||||||
# versions
|
|
||||||
con = self.sm.HTTPSConnectionWithValidation('127.0.0.1', 0, timeout=1)
|
|
||||||
# if httpcon was created, a connect attempt should raise a socket error
|
|
||||||
self.assertRaises(socket.error, con.connect)
|
|
|
@ -1,250 +0,0 @@
|
||||||
# Copyright 2014 Big Switch Networks, Inc. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Kevin Benton, kevin.benton@bigswitch.com
|
|
||||||
#
|
|
||||||
import contextlib
|
|
||||||
import os
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
import webob.exc
|
|
||||||
|
|
||||||
from neutron.openstack.common import log as logging
|
|
||||||
from neutron.tests.unit.bigswitch import fake_server
|
|
||||||
from neutron.tests.unit.bigswitch import test_base
|
|
||||||
from neutron.tests.unit import test_api_v2
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
SERVERMANAGER = 'neutron.plugins.bigswitch.servermanager'
|
|
||||||
HTTPS = SERVERMANAGER + '.HTTPSConnectionWithValidation'
|
|
||||||
CERTCOMBINER = SERVERMANAGER + '.ServerPool._combine_certs_to_file'
|
|
||||||
FILEPUT = SERVERMANAGER + '.ServerPool._file_put_contents'
|
|
||||||
GETCACERTS = SERVERMANAGER + '.ServerPool._get_ca_cert_paths'
|
|
||||||
GETHOSTCERT = SERVERMANAGER + '.ServerPool._get_host_cert_path'
|
|
||||||
SSLGETCERT = SERVERMANAGER + '.ssl.get_server_certificate'
|
|
||||||
FAKECERTGET = 'neutron.tests.unit.bigswitch.fake_server.get_cert_contents'
|
|
||||||
|
|
||||||
|
|
||||||
class test_ssl_certificate_base(test_plugin.NeutronDbPluginV2TestCase,
|
|
||||||
test_base.BigSwitchTestBase):
|
|
||||||
|
|
||||||
plugin_str = ('%s.NeutronRestProxyV2' %
|
|
||||||
test_base.RESTPROXY_PKG_PATH)
|
|
||||||
servername = None
|
|
||||||
cert_base = None
|
|
||||||
|
|
||||||
def _setUp(self):
|
|
||||||
self.servername = test_api_v2._uuid()
|
|
||||||
self.cert_base = cfg.CONF.RESTPROXY.ssl_cert_directory
|
|
||||||
self.host_cert_val = 'DUMMYCERTFORHOST%s' % self.servername
|
|
||||||
self.host_cert_path = os.path.join(
|
|
||||||
self.cert_base,
|
|
||||||
'host_certs',
|
|
||||||
'%s.pem' % self.servername
|
|
||||||
)
|
|
||||||
self.comb_cert_path = os.path.join(
|
|
||||||
self.cert_base,
|
|
||||||
'combined',
|
|
||||||
'%s.pem' % self.servername
|
|
||||||
)
|
|
||||||
self.ca_certs_path = os.path.join(
|
|
||||||
self.cert_base,
|
|
||||||
'ca_certs'
|
|
||||||
)
|
|
||||||
cfg.CONF.set_override('servers', ["%s:443" % self.servername],
|
|
||||||
'RESTPROXY')
|
|
||||||
self.setup_patches()
|
|
||||||
|
|
||||||
# Mock method SSL lib uses to grab cert from server
|
|
||||||
self.sslgetcert_m = mock.patch(SSLGETCERT, create=True).start()
|
|
||||||
self.sslgetcert_m.return_value = self.host_cert_val
|
|
||||||
|
|
||||||
# Mock methods that write and read certs from the file-system
|
|
||||||
self.fileput_m = mock.patch(FILEPUT, create=True).start()
|
|
||||||
self.certcomb_m = mock.patch(CERTCOMBINER, create=True).start()
|
|
||||||
self.getcacerts_m = mock.patch(GETCACERTS, create=True).start()
|
|
||||||
|
|
||||||
# this is used to configure what certificate contents the fake HTTPS
|
|
||||||
# lib should expect to receive
|
|
||||||
self.fake_certget_m = mock.patch(FAKECERTGET, create=True).start()
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(test_ssl_certificate_base, self).setUp(self.plugin_str)
|
|
||||||
|
|
||||||
|
|
||||||
class TestSslSticky(test_ssl_certificate_base):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.setup_config_files()
|
|
||||||
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
|
|
||||||
cfg.CONF.set_override('ssl_sticky', True, 'RESTPROXY')
|
|
||||||
self._setUp()
|
|
||||||
# Set fake HTTPS connection's expectation
|
|
||||||
self.fake_certget_m.return_value = self.host_cert_val
|
|
||||||
# No CA certs for this test
|
|
||||||
self.getcacerts_m.return_value = []
|
|
||||||
super(TestSslSticky, self).setUp()
|
|
||||||
|
|
||||||
def test_sticky_cert(self):
|
|
||||||
# SSL connection should be successful and cert should be cached
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch(HTTPS, new=fake_server.HTTPSHostValidation),
|
|
||||||
self.network()
|
|
||||||
):
|
|
||||||
# CA certs should have been checked for
|
|
||||||
self.getcacerts_m.assert_has_calls([mock.call(self.ca_certs_path)])
|
|
||||||
# cert should have been fetched via SSL lib
|
|
||||||
self.sslgetcert_m.assert_has_calls(
|
|
||||||
[mock.call((self.servername, 443))]
|
|
||||||
)
|
|
||||||
|
|
||||||
# cert should have been recorded
|
|
||||||
self.fileput_m.assert_has_calls([mock.call(self.host_cert_path,
|
|
||||||
self.host_cert_val)])
|
|
||||||
# no ca certs, so host cert only for this combined cert
|
|
||||||
self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
|
|
||||||
self.comb_cert_path)])
|
|
||||||
|
|
||||||
|
|
||||||
class TestSslHostCert(test_ssl_certificate_base):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.setup_config_files()
|
|
||||||
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
|
|
||||||
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
|
|
||||||
self.httpsPatch = mock.patch(HTTPS, create=True,
|
|
||||||
new=fake_server.HTTPSHostValidation)
|
|
||||||
self.httpsPatch.start()
|
|
||||||
self._setUp()
|
|
||||||
# Set fake HTTPS connection's expectation
|
|
||||||
self.fake_certget_m.return_value = self.host_cert_val
|
|
||||||
# No CA certs for this test
|
|
||||||
self.getcacerts_m.return_value = []
|
|
||||||
# Pretend host cert exists
|
|
||||||
self.hcertpath_p = mock.patch(GETHOSTCERT,
|
|
||||||
return_value=(self.host_cert_path, True),
|
|
||||||
create=True).start()
|
|
||||||
super(TestSslHostCert, self).setUp()
|
|
||||||
|
|
||||||
def test_host_cert(self):
|
|
||||||
# SSL connection should be successful because of pre-configured cert
|
|
||||||
with self.network():
|
|
||||||
self.hcertpath_p.assert_has_calls([
|
|
||||||
mock.call(os.path.join(self.cert_base, 'host_certs'),
|
|
||||||
self.servername)
|
|
||||||
])
|
|
||||||
# sticky is disabled, no fetching allowed
|
|
||||||
self.assertFalse(self.sslgetcert_m.call_count)
|
|
||||||
# no ca certs, so host cert is only for this combined cert
|
|
||||||
self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
|
|
||||||
self.comb_cert_path)])
|
|
||||||
|
|
||||||
|
|
||||||
class TestSslCaCert(test_ssl_certificate_base):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.setup_config_files()
|
|
||||||
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
|
|
||||||
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
|
|
||||||
self.httpsPatch = mock.patch(HTTPS, create=True,
|
|
||||||
new=fake_server.HTTPSCAValidation)
|
|
||||||
self.httpsPatch.start()
|
|
||||||
self._setUp()
|
|
||||||
|
|
||||||
# pretend to have a few ca certs
|
|
||||||
self.getcacerts_m.return_value = ['ca1.pem', 'ca2.pem']
|
|
||||||
|
|
||||||
# Set fake HTTPS connection's expectation
|
|
||||||
self.fake_certget_m.return_value = 'DUMMYCERTIFICATEAUTHORITY'
|
|
||||||
|
|
||||||
super(TestSslCaCert, self).setUp()
|
|
||||||
|
|
||||||
def test_ca_cert(self):
|
|
||||||
# SSL connection should be successful because CA cert was present
|
|
||||||
# If not, attempting to create a network would raise an exception
|
|
||||||
with self.network():
|
|
||||||
# sticky is disabled, no fetching allowed
|
|
||||||
self.assertFalse(self.sslgetcert_m.call_count)
|
|
||||||
# 2 CAs and no host cert so combined should only contain both CAs
|
|
||||||
self.certcomb_m.assert_has_calls([mock.call(['ca1.pem', 'ca2.pem'],
|
|
||||||
self.comb_cert_path)])
|
|
||||||
|
|
||||||
|
|
||||||
class TestSslWrongHostCert(test_ssl_certificate_base):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.setup_config_files()
|
|
||||||
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
|
|
||||||
cfg.CONF.set_override('ssl_sticky', True, 'RESTPROXY')
|
|
||||||
self._setUp()
|
|
||||||
|
|
||||||
# Set fake HTTPS connection's expectation to something wrong
|
|
||||||
self.fake_certget_m.return_value = 'OTHERCERT'
|
|
||||||
|
|
||||||
# No CA certs for this test
|
|
||||||
self.getcacerts_m.return_value = []
|
|
||||||
|
|
||||||
# Pretend host cert exists
|
|
||||||
self.hcertpath_p = mock.patch(GETHOSTCERT,
|
|
||||||
return_value=(self.host_cert_path, True),
|
|
||||||
create=True).start()
|
|
||||||
super(TestSslWrongHostCert, self).setUp()
|
|
||||||
|
|
||||||
def test_error_no_cert(self):
|
|
||||||
# since there will already be a host cert, sticky should not take
|
|
||||||
# effect and there will be an error because the host cert's contents
|
|
||||||
# will be incorrect
|
|
||||||
tid = test_api_v2._uuid()
|
|
||||||
data = {}
|
|
||||||
data['network'] = {'tenant_id': tid, 'name': 'name',
|
|
||||||
'admin_state_up': True}
|
|
||||||
with mock.patch(HTTPS, new=fake_server.HTTPSHostValidation):
|
|
||||||
req = self.new_create_request('networks', data, 'json')
|
|
||||||
res = req.get_response(self.api)
|
|
||||||
self.assertEqual(res.status_int,
|
|
||||||
webob.exc.HTTPInternalServerError.code)
|
|
||||||
self.hcertpath_p.assert_has_calls([
|
|
||||||
mock.call(os.path.join(self.cert_base, 'host_certs'),
|
|
||||||
self.servername)
|
|
||||||
])
|
|
||||||
# sticky is enabled, but a host cert already exists so it shant fetch
|
|
||||||
self.assertFalse(self.sslgetcert_m.call_count)
|
|
||||||
# no ca certs, so host cert only for this combined cert
|
|
||||||
self.certcomb_m.assert_has_calls([mock.call([self.host_cert_path],
|
|
||||||
self.comb_cert_path)])
|
|
||||||
|
|
||||||
|
|
||||||
class TestSslNoValidation(test_ssl_certificate_base):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.setup_config_files()
|
|
||||||
cfg.CONF.set_override('server_ssl', True, 'RESTPROXY')
|
|
||||||
cfg.CONF.set_override('ssl_sticky', False, 'RESTPROXY')
|
|
||||||
cfg.CONF.set_override('no_ssl_validation', True, 'RESTPROXY')
|
|
||||||
self._setUp()
|
|
||||||
super(TestSslNoValidation, self).setUp()
|
|
||||||
|
|
||||||
def test_validation_disabled(self):
|
|
||||||
# SSL connection should be successful without any certificates
|
|
||||||
# If not, attempting to create a network will raise an exception
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch(HTTPS, new=fake_server.HTTPSNoValidation),
|
|
||||||
self.network()
|
|
||||||
):
|
|
||||||
# no sticky grabbing and no cert combining with no enforcement
|
|
||||||
self.assertFalse(self.sslgetcert_m.call_count)
|
|
||||||
self.assertFalse(self.certcomb_m.call_count)
|
|
|
@ -1,17 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# Copyright 2013 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,100 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# Copyright (c) 2013 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit test brocade db.
|
|
||||||
"""
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
from neutron import context
|
|
||||||
from neutron.plugins.brocade.db import models as brocade_db
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
|
|
||||||
TEST_VLAN = 1000
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadeDb(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
"""Test brocade db functionality."""
|
|
||||||
|
|
||||||
def test_create_network(self):
|
|
||||||
"""Test brocade specific network db."""
|
|
||||||
|
|
||||||
net_id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
# Create a network
|
|
||||||
self.context = context.get_admin_context()
|
|
||||||
brocade_db.create_network(self.context, net_id, TEST_VLAN)
|
|
||||||
|
|
||||||
# Get the network and verify
|
|
||||||
net = brocade_db.get_network(self.context, net_id)
|
|
||||||
self.assertEqual(net['id'], net_id)
|
|
||||||
self.assertEqual(int(net['vlan']), TEST_VLAN)
|
|
||||||
|
|
||||||
# Delete the network
|
|
||||||
brocade_db.delete_network(self.context, net['id'])
|
|
||||||
self.assertFalse(brocade_db.get_networks(self.context))
|
|
||||||
|
|
||||||
def test_create_port(self):
|
|
||||||
"""Test brocade specific port db."""
|
|
||||||
|
|
||||||
net_id = str(uuid.uuid4())
|
|
||||||
port_id = str(uuid.uuid4())
|
|
||||||
# port_id is truncated: since the linux-bridge tap device names are
|
|
||||||
# based on truncated port id, this enables port lookups using
|
|
||||||
# tap devices
|
|
||||||
port_id = port_id[0:11]
|
|
||||||
tenant_id = str(uuid.uuid4())
|
|
||||||
admin_state_up = True
|
|
||||||
|
|
||||||
# Create Port
|
|
||||||
|
|
||||||
# To create a port a network must exists, Create a network
|
|
||||||
self.context = context.get_admin_context()
|
|
||||||
brocade_db.create_network(self.context, net_id, TEST_VLAN)
|
|
||||||
|
|
||||||
physical_interface = "em1"
|
|
||||||
brocade_db.create_port(self.context, port_id, net_id,
|
|
||||||
physical_interface,
|
|
||||||
TEST_VLAN, tenant_id, admin_state_up)
|
|
||||||
|
|
||||||
port = brocade_db.get_port(self.context, port_id)
|
|
||||||
self.assertEqual(port['port_id'], port_id)
|
|
||||||
self.assertEqual(port['network_id'], net_id)
|
|
||||||
self.assertEqual(port['physical_interface'], physical_interface)
|
|
||||||
self.assertEqual(int(port['vlan_id']), TEST_VLAN)
|
|
||||||
self.assertEqual(port['tenant_id'], tenant_id)
|
|
||||||
self.assertEqual(port['admin_state_up'], admin_state_up)
|
|
||||||
|
|
||||||
admin_state_up = True
|
|
||||||
brocade_db.update_port_state(self.context, port_id, admin_state_up)
|
|
||||||
port = brocade_db.get_port(self.context, port_id)
|
|
||||||
self.assertEqual(port['admin_state_up'], admin_state_up)
|
|
||||||
|
|
||||||
admin_state_up = False
|
|
||||||
brocade_db.update_port_state(self.context, port_id, admin_state_up)
|
|
||||||
port = brocade_db.get_port(self.context, port_id)
|
|
||||||
self.assertEqual(port['admin_state_up'], admin_state_up)
|
|
||||||
|
|
||||||
admin_state_up = True
|
|
||||||
brocade_db.update_port_state(self.context, port_id, admin_state_up)
|
|
||||||
port = brocade_db.get_port(self.context, port_id)
|
|
||||||
self.assertEqual(port['admin_state_up'], admin_state_up)
|
|
||||||
|
|
||||||
# Delete Port
|
|
||||||
brocade_db.delete_port(self.context, port_id)
|
|
||||||
self.assertFalse(brocade_db.get_ports(self.context))
|
|
|
@ -1,74 +0,0 @@
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron.openstack.common import importutils
|
|
||||||
from neutron.plugins.brocade import NeutronPlugin as brocade_plugin
|
|
||||||
from neutron.tests.unit import _test_extension_portbindings as test_bindings
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
|
|
||||||
|
|
||||||
PLUGIN_NAME = ('neutron.plugins.brocade.'
|
|
||||||
'NeutronPlugin.BrocadePluginV2')
|
|
||||||
NOS_DRIVER = ('neutron.plugins.brocade.'
|
|
||||||
'nos.fake_nosdriver.NOSdriver')
|
|
||||||
FAKE_IPADDRESS = '2.2.2.2'
|
|
||||||
FAKE_USERNAME = 'user'
|
|
||||||
FAKE_PASSWORD = 'password'
|
|
||||||
FAKE_PHYSICAL_INTERFACE = 'em1'
|
|
||||||
|
|
||||||
|
|
||||||
class BrocadePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
_plugin_name = PLUGIN_NAME
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
|
|
||||||
def mocked_brocade_init(self):
|
|
||||||
|
|
||||||
self._switch = {'address': FAKE_IPADDRESS,
|
|
||||||
'username': FAKE_USERNAME,
|
|
||||||
'password': FAKE_PASSWORD
|
|
||||||
}
|
|
||||||
self._driver = importutils.import_object(NOS_DRIVER)
|
|
||||||
|
|
||||||
with mock.patch.object(brocade_plugin.BrocadePluginV2,
|
|
||||||
'brocade_init', new=mocked_brocade_init):
|
|
||||||
super(BrocadePluginV2TestCase, self).setUp(self._plugin_name)
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadeBasicGet(test_plugin.TestBasicGet,
|
|
||||||
BrocadePluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadeV2HTTPResponse(test_plugin.TestV2HTTPResponse,
|
|
||||||
BrocadePluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadePortsV2(test_plugin.TestPortsV2,
|
|
||||||
BrocadePluginV2TestCase,
|
|
||||||
test_bindings.PortBindingsTestCase):
|
|
||||||
|
|
||||||
VIF_TYPE = portbindings.VIF_TYPE_BRIDGE
|
|
||||||
HAS_PORT_FILTER = True
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadeNetworksV2(test_plugin.TestNetworksV2,
|
|
||||||
BrocadePluginV2TestCase):
|
|
||||||
pass
|
|
|
@ -1,73 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# Copyright (c) 2013 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
Test vlans alloc/dealloc.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.openstack.common import context
|
|
||||||
from neutron.plugins.brocade import vlanbm as vlan_bitmap
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestVlanBitmap(base.BaseTestCase):
|
|
||||||
"""exercise Vlan bitmap ."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestVlanBitmap, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
self.context = context.get_admin_context()
|
|
||||||
self.context.session = db.get_session()
|
|
||||||
|
|
||||||
def test_vlan(self):
|
|
||||||
"""test vlan allocation/de-alloc."""
|
|
||||||
|
|
||||||
self.vbm_ = vlan_bitmap.VlanBitmap(self.context)
|
|
||||||
vlan_id = self.vbm_.get_next_vlan(None)
|
|
||||||
|
|
||||||
# First vlan is always 2
|
|
||||||
self.assertEqual(vlan_id, 2)
|
|
||||||
|
|
||||||
# next vlan is always 3
|
|
||||||
vlan_id = self.vbm_.get_next_vlan(None)
|
|
||||||
self.assertEqual(vlan_id, 3)
|
|
||||||
|
|
||||||
# get a specific vlan i.e. 4
|
|
||||||
vlan_id = self.vbm_.get_next_vlan(4)
|
|
||||||
self.assertEqual(vlan_id, 4)
|
|
||||||
|
|
||||||
# get a specific vlan i.e. 5
|
|
||||||
vlan_id = self.vbm_.get_next_vlan(5)
|
|
||||||
self.assertEqual(vlan_id, 5)
|
|
||||||
|
|
||||||
# Skip 6
|
|
||||||
|
|
||||||
# get a specific vlan i.e. 7
|
|
||||||
vlan_id = self.vbm_.get_next_vlan(7)
|
|
||||||
self.assertEqual(vlan_id, 7)
|
|
||||||
|
|
||||||
# get a specific vlan i.e. 1900
|
|
||||||
vlan_id = self.vbm_.get_next_vlan(1900)
|
|
||||||
self.assertEqual(vlan_id, 1900)
|
|
||||||
|
|
||||||
# Release 4 and get next again
|
|
||||||
self.vbm_.release_vlan(4)
|
|
||||||
vlan_id = self.vbm_.get_next_vlan(None)
|
|
||||||
self.assertEqual(vlan_id, 4)
|
|
|
@ -1,16 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,18 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cisco Systems, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Abhishek Raut, Cisco Systems, Inc.
|
|
||||||
#
|
|
|
@ -1,119 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2014 Cisco Systems, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Abhishek Raut, Cisco Systems Inc.
|
|
||||||
# @author: Sourabh Patwardhan, Cisco Systems Inc.
|
|
||||||
|
|
||||||
from neutron.openstack.common import log as logging
|
|
||||||
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
|
|
||||||
from neutron.plugins.cisco.n1kv import n1kv_client
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_resource_metadata = {'port': ['id', 'macAddress', 'ipAddress', 'subnetId'],
|
|
||||||
'vmnetwork': ['name', 'networkSegmentId',
|
|
||||||
'networkSegment', 'portProfile',
|
|
||||||
'portProfileId', 'tenantId',
|
|
||||||
'portId', 'macAddress',
|
|
||||||
'ipAddress', 'subnetId']}
|
|
||||||
|
|
||||||
|
|
||||||
class TestClient(n1kv_client.Client):
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
self.broken = False
|
|
||||||
self.inject_params = False
|
|
||||||
self.total_profiles = 2
|
|
||||||
super(TestClient, self).__init__()
|
|
||||||
|
|
||||||
def _get_total_profiles(self):
|
|
||||||
return self.total_profiles
|
|
||||||
|
|
||||||
def _do_request(self, method, action, body=None, headers=None):
|
|
||||||
if self.broken:
|
|
||||||
raise c_exc.VSMError(reason='VSM:Internal Server Error')
|
|
||||||
if self.inject_params and body:
|
|
||||||
body['invalidKey'] = 'catchMeIfYouCan'
|
|
||||||
if method == 'POST':
|
|
||||||
return _validate_resource(action, body)
|
|
||||||
elif method == 'GET':
|
|
||||||
if 'virtual-port-profile' in action:
|
|
||||||
return _policy_profile_generator(
|
|
||||||
self._get_total_profiles())
|
|
||||||
else:
|
|
||||||
raise c_exc.VSMError(reason='VSM:Internal Server Error')
|
|
||||||
|
|
||||||
|
|
||||||
class TestClientInvalidRequest(TestClient):
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super(TestClientInvalidRequest, self).__init__()
|
|
||||||
self.inject_params = True
|
|
||||||
|
|
||||||
|
|
||||||
def _validate_resource(action, body=None):
|
|
||||||
if body:
|
|
||||||
body_set = set(body.keys())
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
if 'vm-network' in action and 'port' not in action:
|
|
||||||
vmnetwork_set = set(_resource_metadata['vmnetwork'])
|
|
||||||
if body_set - vmnetwork_set:
|
|
||||||
raise c_exc.VSMError(reason='Invalid Request')
|
|
||||||
elif 'port' in action:
|
|
||||||
port_set = set(_resource_metadata['port'])
|
|
||||||
if body_set - port_set:
|
|
||||||
raise c_exc.VSMError(reason='Invalid Request')
|
|
||||||
else:
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
def _policy_profile_generator(total_profiles):
|
|
||||||
"""
|
|
||||||
Generate policy profile response and return a dictionary.
|
|
||||||
|
|
||||||
:param total_profiles: integer representing total number of profiles to
|
|
||||||
return
|
|
||||||
"""
|
|
||||||
profiles = {}
|
|
||||||
for num in range(1, total_profiles + 1):
|
|
||||||
name = "pp-%s" % num
|
|
||||||
profile_id = "00000000-0000-0000-0000-00000000000%s" % num
|
|
||||||
profiles[name] = {"properties": {"name": name, "id": profile_id}}
|
|
||||||
return profiles
|
|
||||||
|
|
||||||
|
|
||||||
def _policy_profile_generator_xml(total_profiles):
|
|
||||||
"""
|
|
||||||
Generate policy profile response in XML format.
|
|
||||||
|
|
||||||
:param total_profiles: integer representing total number of profiles to
|
|
||||||
return
|
|
||||||
"""
|
|
||||||
xml = ["""<?xml version="1.0" encoding="utf-8"?>
|
|
||||||
<set name="virtual_port_profile_set">"""]
|
|
||||||
template = (
|
|
||||||
'<instance name="%(num)d"'
|
|
||||||
' url="/api/n1k/virtual-port-profile/%(num)s">'
|
|
||||||
'<properties>'
|
|
||||||
'<id>00000000-0000-0000-0000-00000000000%(num)s</id>'
|
|
||||||
'<name>pp-%(num)s</name>'
|
|
||||||
'</properties>'
|
|
||||||
'</instance>'
|
|
||||||
)
|
|
||||||
xml.extend(template % {'num': n} for n in range(1, total_profiles + 1))
|
|
||||||
xml.append("</set>")
|
|
||||||
return ''.join(xml)
|
|
|
@ -1,870 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cisco Systems, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Juergen Brendel, Cisco Systems Inc.
|
|
||||||
# @author: Abhishek Raut, Cisco Systems Inc.
|
|
||||||
# @author: Rudrajit Tapadar, Cisco Systems Inc.
|
|
||||||
|
|
||||||
from six import moves
|
|
||||||
from sqlalchemy.orm import exc as s_exc
|
|
||||||
from testtools import matchers
|
|
||||||
|
|
||||||
from neutron.common import exceptions as n_exc
|
|
||||||
from neutron import context
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.db import db_base_plugin_v2
|
|
||||||
from neutron.plugins.cisco.common import cisco_constants
|
|
||||||
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
|
|
||||||
from neutron.plugins.cisco.db import n1kv_db_v2
|
|
||||||
from neutron.plugins.cisco.db import n1kv_models_v2
|
|
||||||
from neutron.tests import base
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
|
|
||||||
|
|
||||||
PHYS_NET = 'physnet1'
|
|
||||||
PHYS_NET_2 = 'physnet2'
|
|
||||||
VLAN_MIN = 10
|
|
||||||
VLAN_MAX = 19
|
|
||||||
VXLAN_MIN = 5000
|
|
||||||
VXLAN_MAX = 5009
|
|
||||||
SEGMENT_RANGE = '200-220'
|
|
||||||
SEGMENT_RANGE_MIN_OVERLAP = '210-230'
|
|
||||||
SEGMENT_RANGE_MAX_OVERLAP = '190-209'
|
|
||||||
SEGMENT_RANGE_OVERLAP = '190-230'
|
|
||||||
TEST_NETWORK_ID = 'abcdefghijklmnopqrstuvwxyz'
|
|
||||||
TEST_NETWORK_ID2 = 'abcdefghijklmnopqrstuvwxy2'
|
|
||||||
TEST_NETWORK_ID3 = 'abcdefghijklmnopqrstuvwxy3'
|
|
||||||
TEST_NETWORK_PROFILE = {'name': 'test_profile',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'physnet1',
|
|
||||||
'segment_range': '10-19'}
|
|
||||||
TEST_NETWORK_PROFILE_2 = {'name': 'test_profile_2',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'physnet1',
|
|
||||||
'segment_range': SEGMENT_RANGE}
|
|
||||||
TEST_NETWORK_PROFILE_VXLAN = {'name': 'test_profile',
|
|
||||||
'segment_type': 'overlay',
|
|
||||||
'sub_type': 'native_vxlan',
|
|
||||||
'segment_range': '5000-5009',
|
|
||||||
'multicast_ip_range': '239.0.0.70-239.0.0.80'}
|
|
||||||
TEST_POLICY_PROFILE = {'id': '4a417990-76fb-11e2-bcfd-0800200c9a66',
|
|
||||||
'name': 'test_policy_profile'}
|
|
||||||
TEST_NETWORK_PROFILE_MULTI_SEGMENT = {'name': 'test_profile',
|
|
||||||
'segment_type': 'multi-segment'}
|
|
||||||
TEST_NETWORK_PROFILE_VLAN_TRUNK = {'name': 'test_profile',
|
|
||||||
'segment_type': 'trunk',
|
|
||||||
'sub_type': 'vlan'}
|
|
||||||
TEST_NETWORK_PROFILE_VXLAN_TRUNK = {'name': 'test_profile',
|
|
||||||
'segment_type': 'trunk',
|
|
||||||
'sub_type': 'overlay'}
|
|
||||||
|
|
||||||
|
|
||||||
def _create_test_network_profile_if_not_there(session,
|
|
||||||
profile=TEST_NETWORK_PROFILE):
|
|
||||||
try:
|
|
||||||
_profile = session.query(n1kv_models_v2.NetworkProfile).filter_by(
|
|
||||||
name=profile['name']).one()
|
|
||||||
except s_exc.NoResultFound:
|
|
||||||
_profile = n1kv_db_v2.create_network_profile(session, profile)
|
|
||||||
return _profile
|
|
||||||
|
|
||||||
|
|
||||||
def _create_test_policy_profile_if_not_there(session,
|
|
||||||
profile=TEST_POLICY_PROFILE):
|
|
||||||
try:
|
|
||||||
_profile = session.query(n1kv_models_v2.PolicyProfile).filter_by(
|
|
||||||
name=profile['name']).one()
|
|
||||||
except s_exc.NoResultFound:
|
|
||||||
_profile = n1kv_db_v2.create_policy_profile(profile)
|
|
||||||
return _profile
|
|
||||||
|
|
||||||
|
|
||||||
class VlanAllocationsTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(VlanAllocationsTest, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.session = db.get_session()
|
|
||||||
self.net_p = _create_test_network_profile_if_not_there(self.session)
|
|
||||||
n1kv_db_v2.sync_vlan_allocations(self.session, self.net_p)
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def test_sync_vlan_allocations_outside_segment_range(self):
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MIN - 1)
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MAX + 1)
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
self.session,
|
|
||||||
PHYS_NET_2,
|
|
||||||
VLAN_MIN + 20)
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
self.session,
|
|
||||||
PHYS_NET_2,
|
|
||||||
VLAN_MIN + 20)
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
self.session,
|
|
||||||
PHYS_NET_2,
|
|
||||||
VLAN_MAX + 20)
|
|
||||||
|
|
||||||
def test_sync_vlan_allocations_unallocated_vlans(self):
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MIN).allocated)
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MIN + 1).
|
|
||||||
allocated)
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MAX - 1).
|
|
||||||
allocated)
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MAX).allocated)
|
|
||||||
|
|
||||||
def test_vlan_pool(self):
|
|
||||||
vlan_ids = set()
|
|
||||||
for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1):
|
|
||||||
(physical_network, seg_type,
|
|
||||||
vlan_id, m_ip) = n1kv_db_v2.reserve_vlan(self.session, self.net_p)
|
|
||||||
self.assertEqual(physical_network, PHYS_NET)
|
|
||||||
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
|
|
||||||
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
|
|
||||||
vlan_ids.add(vlan_id)
|
|
||||||
|
|
||||||
self.assertRaises(n_exc.NoNetworkAvailable,
|
|
||||||
n1kv_db_v2.reserve_vlan,
|
|
||||||
self.session,
|
|
||||||
self.net_p)
|
|
||||||
|
|
||||||
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop())
|
|
||||||
physical_network, seg_type, vlan_id, m_ip = (n1kv_db_v2.reserve_vlan(
|
|
||||||
self.session, self.net_p))
|
|
||||||
self.assertEqual(physical_network, PHYS_NET)
|
|
||||||
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
|
|
||||||
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
|
|
||||||
vlan_ids.add(vlan_id)
|
|
||||||
|
|
||||||
for vlan_id in vlan_ids:
|
|
||||||
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id)
|
|
||||||
|
|
||||||
def test_specific_vlan_inside_pool(self):
|
|
||||||
vlan_id = VLAN_MIN + 5
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan_id).allocated)
|
|
||||||
n1kv_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
|
|
||||||
self.assertTrue(n1kv_db_v2.get_vlan_allocation(self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan_id).allocated)
|
|
||||||
|
|
||||||
self.assertRaises(n_exc.VlanIdInUse,
|
|
||||||
n1kv_db_v2.reserve_specific_vlan,
|
|
||||||
self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan_id)
|
|
||||||
|
|
||||||
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id)
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan_id).allocated)
|
|
||||||
|
|
||||||
def test_specific_vlan_outside_pool(self):
|
|
||||||
vlan_id = VLAN_MAX + 5
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan_id)
|
|
||||||
self.assertRaises(c_exc.VlanIDOutsidePool,
|
|
||||||
n1kv_db_v2.reserve_specific_vlan,
|
|
||||||
self.session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan_id)
|
|
||||||
|
|
||||||
|
|
||||||
class VxlanAllocationsTest(base.BaseTestCase,
|
|
||||||
n1kv_db_v2.NetworkProfile_db_mixin):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(VxlanAllocationsTest, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.session = db.get_session()
|
|
||||||
self.net_p = _create_test_network_profile_if_not_there(
|
|
||||||
self.session, TEST_NETWORK_PROFILE_VXLAN)
|
|
||||||
n1kv_db_v2.sync_vxlan_allocations(self.session, self.net_p)
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def test_sync_vxlan_allocations_outside_segment_range(self):
|
|
||||||
self.assertRaises(c_exc.VxlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vxlan_allocation,
|
|
||||||
self.session,
|
|
||||||
VXLAN_MIN - 1)
|
|
||||||
self.assertRaises(c_exc.VxlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vxlan_allocation,
|
|
||||||
self.session,
|
|
||||||
VXLAN_MAX + 1)
|
|
||||||
|
|
||||||
def test_sync_vxlan_allocations_unallocated_vxlans(self):
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
|
|
||||||
VXLAN_MIN).allocated)
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
|
|
||||||
VXLAN_MIN + 1).
|
|
||||||
allocated)
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
|
|
||||||
VXLAN_MAX - 1).
|
|
||||||
allocated)
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
|
|
||||||
VXLAN_MAX).allocated)
|
|
||||||
|
|
||||||
def test_vxlan_pool(self):
|
|
||||||
vxlan_ids = set()
|
|
||||||
for x in moves.xrange(VXLAN_MIN, VXLAN_MAX + 1):
|
|
||||||
vxlan = n1kv_db_v2.reserve_vxlan(self.session, self.net_p)
|
|
||||||
vxlan_id = vxlan[2]
|
|
||||||
self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1))
|
|
||||||
self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1))
|
|
||||||
vxlan_ids.add(vxlan_id)
|
|
||||||
|
|
||||||
self.assertRaises(n_exc.NoNetworkAvailable,
|
|
||||||
n1kv_db_v2.reserve_vxlan,
|
|
||||||
self.session,
|
|
||||||
self.net_p)
|
|
||||||
n1kv_db_v2.release_vxlan(self.session, vxlan_ids.pop())
|
|
||||||
vxlan = n1kv_db_v2.reserve_vxlan(self.session, self.net_p)
|
|
||||||
vxlan_id = vxlan[2]
|
|
||||||
self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1))
|
|
||||||
self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1))
|
|
||||||
vxlan_ids.add(vxlan_id)
|
|
||||||
|
|
||||||
for vxlan_id in vxlan_ids:
|
|
||||||
n1kv_db_v2.release_vxlan(self.session, vxlan_id)
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, self.net_p.id)
|
|
||||||
|
|
||||||
def test_specific_vxlan_inside_pool(self):
|
|
||||||
vxlan_id = VXLAN_MIN + 5
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
|
|
||||||
vxlan_id).allocated)
|
|
||||||
n1kv_db_v2.reserve_specific_vxlan(self.session, vxlan_id)
|
|
||||||
self.assertTrue(n1kv_db_v2.get_vxlan_allocation(self.session,
|
|
||||||
vxlan_id).allocated)
|
|
||||||
|
|
||||||
self.assertRaises(c_exc.VxlanIDInUse,
|
|
||||||
n1kv_db_v2.reserve_specific_vxlan,
|
|
||||||
self.session,
|
|
||||||
vxlan_id)
|
|
||||||
|
|
||||||
n1kv_db_v2.release_vxlan(self.session, vxlan_id)
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
|
|
||||||
vxlan_id).allocated)
|
|
||||||
|
|
||||||
def test_specific_vxlan_outside_pool(self):
|
|
||||||
vxlan_id = VXLAN_MAX + 5
|
|
||||||
self.assertRaises(c_exc.VxlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vxlan_allocation,
|
|
||||||
self.session,
|
|
||||||
vxlan_id)
|
|
||||||
self.assertRaises(c_exc.VxlanIDOutsidePool,
|
|
||||||
n1kv_db_v2.reserve_specific_vxlan,
|
|
||||||
self.session,
|
|
||||||
vxlan_id)
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(NetworkBindingsTest, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.session = db.get_session()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def test_add_network_binding(self):
|
|
||||||
with self.network() as network:
|
|
||||||
TEST_NETWORK_ID = network['network']['id']
|
|
||||||
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID)
|
|
||||||
|
|
||||||
p = _create_test_network_profile_if_not_there(self.session)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID, 'vlan',
|
|
||||||
PHYS_NET, 1234, '0.0.0.0', p.id, None)
|
|
||||||
binding = n1kv_db_v2.get_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID)
|
|
||||||
self.assertIsNotNone(binding)
|
|
||||||
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(binding.network_type, 'vlan')
|
|
||||||
self.assertEqual(binding.physical_network, PHYS_NET)
|
|
||||||
self.assertEqual(binding.segmentation_id, 1234)
|
|
||||||
|
|
||||||
def test_create_multi_segment_network(self):
|
|
||||||
with self.network() as network:
|
|
||||||
TEST_NETWORK_ID = network['network']['id']
|
|
||||||
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID)
|
|
||||||
|
|
||||||
p = _create_test_network_profile_if_not_there(
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_PROFILE_MULTI_SEGMENT)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID, 'multi-segment',
|
|
||||||
None, 0, '0.0.0.0', p.id, None)
|
|
||||||
binding = n1kv_db_v2.get_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID)
|
|
||||||
self.assertIsNotNone(binding)
|
|
||||||
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(binding.network_type, 'multi-segment')
|
|
||||||
self.assertIsNone(binding.physical_network)
|
|
||||||
self.assertEqual(binding.segmentation_id, 0)
|
|
||||||
|
|
||||||
def test_add_multi_segment_binding(self):
|
|
||||||
with self.network() as network:
|
|
||||||
TEST_NETWORK_ID = network['network']['id']
|
|
||||||
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID)
|
|
||||||
|
|
||||||
p = _create_test_network_profile_if_not_there(
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_PROFILE_MULTI_SEGMENT)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID, 'multi-segment',
|
|
||||||
None, 0, '0.0.0.0', p.id,
|
|
||||||
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
|
|
||||||
binding = n1kv_db_v2.get_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID)
|
|
||||||
self.assertIsNotNone(binding)
|
|
||||||
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(binding.network_type, 'multi-segment')
|
|
||||||
self.assertIsNone(binding.physical_network)
|
|
||||||
self.assertEqual(binding.segmentation_id, 0)
|
|
||||||
ms_binding = (n1kv_db_v2.get_multi_segment_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID,
|
|
||||||
(TEST_NETWORK_ID2, TEST_NETWORK_ID3)))
|
|
||||||
self.assertIsNotNone(ms_binding)
|
|
||||||
self.assertEqual(ms_binding.multi_segment_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(ms_binding.segment1_id, TEST_NETWORK_ID2)
|
|
||||||
self.assertEqual(ms_binding.segment2_id, TEST_NETWORK_ID3)
|
|
||||||
ms_members = (n1kv_db_v2.get_multi_segment_members(
|
|
||||||
self.session, TEST_NETWORK_ID))
|
|
||||||
self.assertEqual(ms_members,
|
|
||||||
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
|
|
||||||
self.assertTrue(n1kv_db_v2.is_multi_segment_member(
|
|
||||||
self.session, TEST_NETWORK_ID2))
|
|
||||||
self.assertTrue(n1kv_db_v2.is_multi_segment_member(
|
|
||||||
self.session, TEST_NETWORK_ID3))
|
|
||||||
n1kv_db_v2.del_multi_segment_binding(
|
|
||||||
self.session, TEST_NETWORK_ID,
|
|
||||||
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
|
|
||||||
ms_members = (n1kv_db_v2.get_multi_segment_members(
|
|
||||||
self.session, TEST_NETWORK_ID))
|
|
||||||
self.assertEqual(ms_members, [])
|
|
||||||
|
|
||||||
def test_create_vlan_trunk_network(self):
|
|
||||||
with self.network() as network:
|
|
||||||
TEST_NETWORK_ID = network['network']['id']
|
|
||||||
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID)
|
|
||||||
|
|
||||||
p = _create_test_network_profile_if_not_there(
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_PROFILE_VLAN_TRUNK)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID, 'trunk',
|
|
||||||
None, 0, '0.0.0.0', p.id, None)
|
|
||||||
binding = n1kv_db_v2.get_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID)
|
|
||||||
self.assertIsNotNone(binding)
|
|
||||||
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(binding.network_type, 'trunk')
|
|
||||||
self.assertIsNone(binding.physical_network)
|
|
||||||
self.assertEqual(binding.segmentation_id, 0)
|
|
||||||
|
|
||||||
def test_create_vxlan_trunk_network(self):
|
|
||||||
with self.network() as network:
|
|
||||||
TEST_NETWORK_ID = network['network']['id']
|
|
||||||
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID)
|
|
||||||
|
|
||||||
p = _create_test_network_profile_if_not_there(
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_PROFILE_VXLAN_TRUNK)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID, 'trunk',
|
|
||||||
None, 0, '0.0.0.0', p.id, None)
|
|
||||||
binding = n1kv_db_v2.get_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID)
|
|
||||||
self.assertIsNotNone(binding)
|
|
||||||
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(binding.network_type, 'trunk')
|
|
||||||
self.assertIsNone(binding.physical_network)
|
|
||||||
self.assertEqual(binding.segmentation_id, 0)
|
|
||||||
|
|
||||||
def test_add_vlan_trunk_binding(self):
|
|
||||||
with self.network() as network1:
|
|
||||||
with self.network() as network2:
|
|
||||||
TEST_NETWORK_ID = network1['network']['id']
|
|
||||||
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID)
|
|
||||||
TEST_NETWORK_ID2 = network2['network']['id']
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID2)
|
|
||||||
p_v = _create_test_network_profile_if_not_there(self.session)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID2, 'vlan',
|
|
||||||
PHYS_NET, 1234, '0.0.0.0', p_v.id, None)
|
|
||||||
p = _create_test_network_profile_if_not_there(
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_PROFILE_VLAN_TRUNK)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID, 'trunk',
|
|
||||||
None, 0, '0.0.0.0', p.id, [(TEST_NETWORK_ID2, 0)])
|
|
||||||
binding = n1kv_db_v2.get_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID)
|
|
||||||
self.assertIsNotNone(binding)
|
|
||||||
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(binding.network_type, 'trunk')
|
|
||||||
self.assertEqual(binding.physical_network, PHYS_NET)
|
|
||||||
self.assertEqual(binding.segmentation_id, 0)
|
|
||||||
t_binding = (n1kv_db_v2.get_trunk_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID,
|
|
||||||
(TEST_NETWORK_ID2, 0)))
|
|
||||||
self.assertIsNotNone(t_binding)
|
|
||||||
self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2)
|
|
||||||
self.assertEqual(t_binding.dot1qtag, '0')
|
|
||||||
t_members = (n1kv_db_v2.get_trunk_members(
|
|
||||||
self.session, TEST_NETWORK_ID))
|
|
||||||
self.assertEqual(t_members,
|
|
||||||
[(TEST_NETWORK_ID2, '0')])
|
|
||||||
self.assertTrue(n1kv_db_v2.is_trunk_member(
|
|
||||||
self.session, TEST_NETWORK_ID2))
|
|
||||||
n1kv_db_v2.del_trunk_segment_binding(
|
|
||||||
self.session, TEST_NETWORK_ID,
|
|
||||||
[(TEST_NETWORK_ID2, '0')])
|
|
||||||
t_members = (n1kv_db_v2.get_multi_segment_members(
|
|
||||||
self.session, TEST_NETWORK_ID))
|
|
||||||
self.assertEqual(t_members, [])
|
|
||||||
|
|
||||||
def test_add_vxlan_trunk_binding(self):
|
|
||||||
with self.network() as network1:
|
|
||||||
with self.network() as network2:
|
|
||||||
TEST_NETWORK_ID = network1['network']['id']
|
|
||||||
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID)
|
|
||||||
TEST_NETWORK_ID2 = network2['network']['id']
|
|
||||||
self.assertRaises(c_exc.NetworkBindingNotFound,
|
|
||||||
n1kv_db_v2.get_network_binding,
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_ID2)
|
|
||||||
p_v = _create_test_network_profile_if_not_there(
|
|
||||||
self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID2, 'overlay',
|
|
||||||
None, 5100, '224.10.10.10', p_v.id, None)
|
|
||||||
p = _create_test_network_profile_if_not_there(
|
|
||||||
self.session,
|
|
||||||
TEST_NETWORK_PROFILE_VXLAN_TRUNK)
|
|
||||||
n1kv_db_v2.add_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID, 'trunk',
|
|
||||||
None, 0, '0.0.0.0', p.id,
|
|
||||||
[(TEST_NETWORK_ID2, 5)])
|
|
||||||
binding = n1kv_db_v2.get_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID)
|
|
||||||
self.assertIsNotNone(binding)
|
|
||||||
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(binding.network_type, 'trunk')
|
|
||||||
self.assertIsNone(binding.physical_network)
|
|
||||||
self.assertEqual(binding.segmentation_id, 0)
|
|
||||||
t_binding = (n1kv_db_v2.get_trunk_network_binding(
|
|
||||||
self.session, TEST_NETWORK_ID,
|
|
||||||
(TEST_NETWORK_ID2, '5')))
|
|
||||||
self.assertIsNotNone(t_binding)
|
|
||||||
self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2)
|
|
||||||
self.assertEqual(t_binding.dot1qtag, '5')
|
|
||||||
t_members = (n1kv_db_v2.get_trunk_members(
|
|
||||||
self.session, TEST_NETWORK_ID))
|
|
||||||
self.assertEqual(t_members,
|
|
||||||
[(TEST_NETWORK_ID2, '5')])
|
|
||||||
self.assertTrue(n1kv_db_v2.is_trunk_member(
|
|
||||||
self.session, TEST_NETWORK_ID2))
|
|
||||||
n1kv_db_v2.del_trunk_segment_binding(
|
|
||||||
self.session, TEST_NETWORK_ID,
|
|
||||||
[(TEST_NETWORK_ID2, '5')])
|
|
||||||
t_members = (n1kv_db_v2.get_multi_segment_members(
|
|
||||||
self.session, TEST_NETWORK_ID))
|
|
||||||
self.assertEqual(t_members, [])
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkProfileTests(base.BaseTestCase,
|
|
||||||
n1kv_db_v2.NetworkProfile_db_mixin):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(NetworkProfileTests, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.session = db.get_session()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def test_create_network_profile(self):
|
|
||||||
_db_profile = n1kv_db_v2.create_network_profile(self.session,
|
|
||||||
TEST_NETWORK_PROFILE)
|
|
||||||
self.assertIsNotNone(_db_profile)
|
|
||||||
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
|
|
||||||
filter_by(name=TEST_NETWORK_PROFILE['name']).one())
|
|
||||||
self.assertIsNotNone(db_profile)
|
|
||||||
self.assertEqual(_db_profile.id, db_profile.id)
|
|
||||||
self.assertEqual(_db_profile.name, db_profile.name)
|
|
||||||
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
|
|
||||||
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
|
|
||||||
self.assertEqual(_db_profile.multicast_ip_index,
|
|
||||||
db_profile.multicast_ip_index)
|
|
||||||
self.assertEqual(_db_profile.multicast_ip_range,
|
|
||||||
db_profile.multicast_ip_range)
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
|
|
||||||
|
|
||||||
def test_create_multi_segment_network_profile(self):
|
|
||||||
_db_profile = (n1kv_db_v2.create_network_profile(
|
|
||||||
self.session, TEST_NETWORK_PROFILE_MULTI_SEGMENT))
|
|
||||||
self.assertIsNotNone(_db_profile)
|
|
||||||
db_profile = (
|
|
||||||
self.session.query(
|
|
||||||
n1kv_models_v2.NetworkProfile).filter_by(
|
|
||||||
name=TEST_NETWORK_PROFILE_MULTI_SEGMENT['name'])
|
|
||||||
.one())
|
|
||||||
self.assertIsNotNone(db_profile)
|
|
||||||
self.assertEqual(_db_profile.id, db_profile.id)
|
|
||||||
self.assertEqual(_db_profile.name, db_profile.name)
|
|
||||||
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
|
|
||||||
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
|
|
||||||
self.assertEqual(_db_profile.multicast_ip_index,
|
|
||||||
db_profile.multicast_ip_index)
|
|
||||||
self.assertEqual(_db_profile.multicast_ip_range,
|
|
||||||
db_profile.multicast_ip_range)
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
|
|
||||||
|
|
||||||
def test_create_vlan_trunk_network_profile(self):
|
|
||||||
_db_profile = (n1kv_db_v2.create_network_profile(
|
|
||||||
self.session, TEST_NETWORK_PROFILE_VLAN_TRUNK))
|
|
||||||
self.assertIsNotNone(_db_profile)
|
|
||||||
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
|
|
||||||
filter_by(name=TEST_NETWORK_PROFILE_VLAN_TRUNK['name']).
|
|
||||||
one())
|
|
||||||
self.assertIsNotNone(db_profile)
|
|
||||||
self.assertEqual(_db_profile.id, db_profile.id)
|
|
||||||
self.assertEqual(_db_profile.name, db_profile.name)
|
|
||||||
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
|
|
||||||
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
|
|
||||||
self.assertEqual(_db_profile.multicast_ip_index,
|
|
||||||
db_profile.multicast_ip_index)
|
|
||||||
self.assertEqual(_db_profile.multicast_ip_range,
|
|
||||||
db_profile.multicast_ip_range)
|
|
||||||
self.assertEqual(_db_profile.sub_type, db_profile.sub_type)
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
|
|
||||||
|
|
||||||
def test_create_vxlan_trunk_network_profile(self):
|
|
||||||
_db_profile = (n1kv_db_v2.create_network_profile(
|
|
||||||
self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK))
|
|
||||||
self.assertIsNotNone(_db_profile)
|
|
||||||
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
|
|
||||||
filter_by(name=TEST_NETWORK_PROFILE_VXLAN_TRUNK['name']).
|
|
||||||
one())
|
|
||||||
self.assertIsNotNone(db_profile)
|
|
||||||
self.assertEqual(_db_profile.id, db_profile.id)
|
|
||||||
self.assertEqual(_db_profile.name, db_profile.name)
|
|
||||||
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
|
|
||||||
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
|
|
||||||
self.assertEqual(_db_profile.multicast_ip_index,
|
|
||||||
db_profile.multicast_ip_index)
|
|
||||||
self.assertEqual(_db_profile.multicast_ip_range,
|
|
||||||
db_profile.multicast_ip_range)
|
|
||||||
self.assertEqual(_db_profile.sub_type, db_profile.sub_type)
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
|
|
||||||
|
|
||||||
def test_create_network_profile_overlap(self):
|
|
||||||
_db_profile = n1kv_db_v2.create_network_profile(self.session,
|
|
||||||
TEST_NETWORK_PROFILE_2)
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-min-overlap'
|
|
||||||
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_MIN_OVERLAP
|
|
||||||
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
|
|
||||||
self.assertRaises(n_exc.InvalidInput,
|
|
||||||
self.create_network_profile,
|
|
||||||
ctx,
|
|
||||||
test_net_profile)
|
|
||||||
|
|
||||||
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-max-overlap'
|
|
||||||
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_MAX_OVERLAP
|
|
||||||
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
|
|
||||||
self.assertRaises(n_exc.InvalidInput,
|
|
||||||
self.create_network_profile,
|
|
||||||
ctx,
|
|
||||||
test_net_profile)
|
|
||||||
|
|
||||||
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-overlap'
|
|
||||||
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_OVERLAP
|
|
||||||
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
|
|
||||||
self.assertRaises(n_exc.InvalidInput,
|
|
||||||
self.create_network_profile,
|
|
||||||
ctx,
|
|
||||||
test_net_profile)
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
|
|
||||||
|
|
||||||
def test_delete_network_profile(self):
|
|
||||||
try:
|
|
||||||
profile = (self.session.query(n1kv_models_v2.NetworkProfile).
|
|
||||||
filter_by(name=TEST_NETWORK_PROFILE['name']).one())
|
|
||||||
except s_exc.NoResultFound:
|
|
||||||
profile = n1kv_db_v2.create_network_profile(self.session,
|
|
||||||
TEST_NETWORK_PROFILE)
|
|
||||||
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, profile.id)
|
|
||||||
try:
|
|
||||||
self.session.query(n1kv_models_v2.NetworkProfile).filter_by(
|
|
||||||
name=TEST_NETWORK_PROFILE['name']).one()
|
|
||||||
except s_exc.NoResultFound:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
self.fail("Network Profile (%s) was not deleted" %
|
|
||||||
TEST_NETWORK_PROFILE['name'])
|
|
||||||
|
|
||||||
def test_update_network_profile(self):
|
|
||||||
TEST_PROFILE_1 = {'name': 'test_profile_1'}
|
|
||||||
profile = _create_test_network_profile_if_not_there(self.session)
|
|
||||||
updated_profile = n1kv_db_v2.update_network_profile(self.session,
|
|
||||||
profile.id,
|
|
||||||
TEST_PROFILE_1)
|
|
||||||
self.assertEqual(updated_profile.name, TEST_PROFILE_1['name'])
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, profile.id)
|
|
||||||
|
|
||||||
def test_get_network_profile(self):
|
|
||||||
profile = n1kv_db_v2.create_network_profile(self.session,
|
|
||||||
TEST_NETWORK_PROFILE)
|
|
||||||
got_profile = n1kv_db_v2.get_network_profile(self.session, profile.id)
|
|
||||||
self.assertEqual(profile.id, got_profile.id)
|
|
||||||
self.assertEqual(profile.name, got_profile.name)
|
|
||||||
n1kv_db_v2.delete_network_profile(self.session, profile.id)
|
|
||||||
|
|
||||||
def test_get_network_profiles(self):
|
|
||||||
test_profiles = [{'name': 'test_profile1',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'phys1',
|
|
||||||
'segment_range': '200-210'},
|
|
||||||
{'name': 'test_profile2',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'phys1',
|
|
||||||
'segment_range': '211-220'},
|
|
||||||
{'name': 'test_profile3',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'phys1',
|
|
||||||
'segment_range': '221-230'},
|
|
||||||
{'name': 'test_profile4',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'phys1',
|
|
||||||
'segment_range': '231-240'},
|
|
||||||
{'name': 'test_profile5',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'phys1',
|
|
||||||
'segment_range': '241-250'},
|
|
||||||
{'name': 'test_profile6',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'phys1',
|
|
||||||
'segment_range': '251-260'},
|
|
||||||
{'name': 'test_profile7',
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': 'phys1',
|
|
||||||
'segment_range': '261-270'}]
|
|
||||||
[n1kv_db_v2.create_network_profile(self.session, p)
|
|
||||||
for p in test_profiles]
|
|
||||||
# TODO(abhraut): Fix this test to work with real tenant_td
|
|
||||||
profiles = n1kv_db_v2._get_network_profiles(db_session=self.session)
|
|
||||||
self.assertEqual(len(test_profiles), len(list(profiles)))
|
|
||||||
|
|
||||||
|
|
||||||
class PolicyProfileTests(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(PolicyProfileTests, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.session = db.get_session()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def test_create_policy_profile(self):
|
|
||||||
_db_profile = n1kv_db_v2.create_policy_profile(TEST_POLICY_PROFILE)
|
|
||||||
self.assertIsNotNone(_db_profile)
|
|
||||||
db_profile = (self.session.query(n1kv_models_v2.PolicyProfile).
|
|
||||||
filter_by(name=TEST_POLICY_PROFILE['name']).one)()
|
|
||||||
self.assertIsNotNone(db_profile)
|
|
||||||
self.assertTrue(_db_profile.id == db_profile.id)
|
|
||||||
self.assertTrue(_db_profile.name == db_profile.name)
|
|
||||||
|
|
||||||
def test_delete_policy_profile(self):
|
|
||||||
profile = _create_test_policy_profile_if_not_there(self.session)
|
|
||||||
n1kv_db_v2.delete_policy_profile(profile.id)
|
|
||||||
try:
|
|
||||||
self.session.query(n1kv_models_v2.PolicyProfile).filter_by(
|
|
||||||
name=TEST_POLICY_PROFILE['name']).one()
|
|
||||||
except s_exc.NoResultFound:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
self.fail("Policy Profile (%s) was not deleted" %
|
|
||||||
TEST_POLICY_PROFILE['name'])
|
|
||||||
|
|
||||||
def test_update_policy_profile(self):
|
|
||||||
TEST_PROFILE_1 = {'name': 'test_profile_1'}
|
|
||||||
profile = _create_test_policy_profile_if_not_there(self.session)
|
|
||||||
updated_profile = n1kv_db_v2.update_policy_profile(self.session,
|
|
||||||
profile.id,
|
|
||||||
TEST_PROFILE_1)
|
|
||||||
self.assertEqual(updated_profile.name, TEST_PROFILE_1['name'])
|
|
||||||
|
|
||||||
def test_get_policy_profile(self):
|
|
||||||
profile = _create_test_policy_profile_if_not_there(self.session)
|
|
||||||
got_profile = n1kv_db_v2.get_policy_profile(self.session, profile.id)
|
|
||||||
self.assertEqual(profile.id, got_profile.id)
|
|
||||||
self.assertEqual(profile.name, got_profile.name)
|
|
||||||
|
|
||||||
|
|
||||||
class ProfileBindingTests(base.BaseTestCase,
|
|
||||||
n1kv_db_v2.NetworkProfile_db_mixin,
|
|
||||||
db_base_plugin_v2.CommonDbMixin):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(ProfileBindingTests, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.session = db.get_session()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def _create_test_binding_if_not_there(self, tenant_id, profile_id,
|
|
||||||
profile_type):
|
|
||||||
try:
|
|
||||||
_binding = (self.session.query(n1kv_models_v2.ProfileBinding).
|
|
||||||
filter_by(profile_type=profile_type,
|
|
||||||
tenant_id=tenant_id,
|
|
||||||
profile_id=profile_id).one())
|
|
||||||
except s_exc.NoResultFound:
|
|
||||||
_binding = n1kv_db_v2.create_profile_binding(self.session,
|
|
||||||
tenant_id,
|
|
||||||
profile_id,
|
|
||||||
profile_type)
|
|
||||||
return _binding
|
|
||||||
|
|
||||||
def test_create_profile_binding(self):
|
|
||||||
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
|
|
||||||
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
|
|
||||||
test_profile_type = "network"
|
|
||||||
n1kv_db_v2.create_profile_binding(self.session,
|
|
||||||
test_tenant_id,
|
|
||||||
test_profile_id,
|
|
||||||
test_profile_type)
|
|
||||||
try:
|
|
||||||
self.session.query(n1kv_models_v2.ProfileBinding).filter_by(
|
|
||||||
profile_type=test_profile_type,
|
|
||||||
tenant_id=test_tenant_id,
|
|
||||||
profile_id=test_profile_id).one()
|
|
||||||
except s_exc.MultipleResultsFound:
|
|
||||||
self.fail("Bindings must be unique")
|
|
||||||
except s_exc.NoResultFound:
|
|
||||||
self.fail("Could not create Profile Binding")
|
|
||||||
|
|
||||||
def test_get_profile_binding(self):
|
|
||||||
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
|
|
||||||
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
|
|
||||||
test_profile_type = "network"
|
|
||||||
self._create_test_binding_if_not_there(test_tenant_id,
|
|
||||||
test_profile_id,
|
|
||||||
test_profile_type)
|
|
||||||
binding = n1kv_db_v2.get_profile_binding(self.session,
|
|
||||||
test_tenant_id,
|
|
||||||
test_profile_id)
|
|
||||||
self.assertEqual(binding.tenant_id, test_tenant_id)
|
|
||||||
self.assertEqual(binding.profile_id, test_profile_id)
|
|
||||||
self.assertEqual(binding.profile_type, test_profile_type)
|
|
||||||
|
|
||||||
def test_get_profile_binding_not_found(self):
|
|
||||||
self.assertRaises(
|
|
||||||
c_exc.ProfileTenantBindingNotFound,
|
|
||||||
n1kv_db_v2.get_profile_binding, self.session, "123", "456")
|
|
||||||
|
|
||||||
def test_delete_profile_binding(self):
|
|
||||||
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
|
|
||||||
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
|
|
||||||
test_profile_type = "network"
|
|
||||||
self._create_test_binding_if_not_there(test_tenant_id,
|
|
||||||
test_profile_id,
|
|
||||||
test_profile_type)
|
|
||||||
n1kv_db_v2.delete_profile_binding(self.session,
|
|
||||||
test_tenant_id,
|
|
||||||
test_profile_id)
|
|
||||||
q = (self.session.query(n1kv_models_v2.ProfileBinding).filter_by(
|
|
||||||
profile_type=test_profile_type,
|
|
||||||
tenant_id=test_tenant_id,
|
|
||||||
profile_id=test_profile_id))
|
|
||||||
self.assertFalse(q.count())
|
|
||||||
|
|
||||||
def test_default_tenant_replace(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
ctx.tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
|
|
||||||
test_profile_id = "AAAAAAAA-76ec-11e2-bcfd-0800200c9a66"
|
|
||||||
test_profile_type = "policy"
|
|
||||||
n1kv_db_v2.create_profile_binding(self.session,
|
|
||||||
cisco_constants.TENANT_ID_NOT_SET,
|
|
||||||
test_profile_id,
|
|
||||||
test_profile_type)
|
|
||||||
network_profile = {"network_profile": TEST_NETWORK_PROFILE}
|
|
||||||
self.create_network_profile(ctx, network_profile)
|
|
||||||
binding = n1kv_db_v2.get_profile_binding(self.session,
|
|
||||||
ctx.tenant_id,
|
|
||||||
test_profile_id)
|
|
||||||
self.assertRaises(
|
|
||||||
c_exc.ProfileTenantBindingNotFound,
|
|
||||||
n1kv_db_v2.get_profile_binding,
|
|
||||||
self.session,
|
|
||||||
cisco_constants.TENANT_ID_NOT_SET,
|
|
||||||
test_profile_id)
|
|
||||||
self.assertNotEqual(binding.tenant_id,
|
|
||||||
cisco_constants.TENANT_ID_NOT_SET)
|
|
|
@ -1,709 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cisco Systems, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Juergen Brendel, Cisco Systems Inc.
|
|
||||||
# @author: Abhishek Raut, Cisco Systems Inc.
|
|
||||||
# @author: Sourabh Patwardhan, Cisco Systems Inc.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.api import extensions as neutron_extensions
|
|
||||||
from neutron.api.v2 import attributes
|
|
||||||
from neutron import context
|
|
||||||
import neutron.db.api as db
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
|
|
||||||
from neutron.plugins.cisco.db import n1kv_db_v2
|
|
||||||
from neutron.plugins.cisco.db import network_db_v2 as cdb
|
|
||||||
from neutron.plugins.cisco import extensions
|
|
||||||
from neutron.plugins.cisco.extensions import n1kv
|
|
||||||
from neutron.plugins.cisco.extensions import network_profile
|
|
||||||
from neutron.plugins.cisco.n1kv import n1kv_client
|
|
||||||
from neutron.plugins.cisco.n1kv import n1kv_neutron_plugin
|
|
||||||
from neutron.tests.unit import _test_extension_portbindings as test_bindings
|
|
||||||
from neutron.tests.unit.cisco.n1kv import fake_client
|
|
||||||
from neutron.tests.unit import test_api_v2
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
from neutron.tests.unit import test_l3_plugin
|
|
||||||
from neutron.tests.unit import test_l3_schedulers
|
|
||||||
|
|
||||||
|
|
||||||
PHYS_NET = 'some-phys-net'
|
|
||||||
VLAN_MIN = 100
|
|
||||||
VLAN_MAX = 110
|
|
||||||
|
|
||||||
|
|
||||||
class FakeResponse(object):
|
|
||||||
|
|
||||||
"""
|
|
||||||
This object is returned by mocked requests lib instead of normal response.
|
|
||||||
|
|
||||||
Initialize it with the status code, header and buffer contents you wish to
|
|
||||||
return.
|
|
||||||
|
|
||||||
"""
|
|
||||||
def __init__(self, status, response_text, headers):
|
|
||||||
self.buffer = response_text
|
|
||||||
self.status_code = status
|
|
||||||
self.headers = headers
|
|
||||||
|
|
||||||
def json(self, *args, **kwargs):
|
|
||||||
return self.buffer
|
|
||||||
|
|
||||||
|
|
||||||
def _fake_setup_vsm(self):
|
|
||||||
"""Fake establish Communication with Cisco Nexus1000V VSM."""
|
|
||||||
self.agent_vsm = True
|
|
||||||
self._populate_policy_profiles()
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkProfileTestExtensionManager(object):
|
|
||||||
|
|
||||||
def get_resources(self):
|
|
||||||
# Add the resources to the global attribute map
|
|
||||||
# This is done here as the setup process won't
|
|
||||||
# initialize the main API router which extends
|
|
||||||
# the global attribute map
|
|
||||||
attributes.RESOURCE_ATTRIBUTE_MAP.update(
|
|
||||||
network_profile.RESOURCE_ATTRIBUTE_MAP)
|
|
||||||
return network_profile.Network_profile.get_resources()
|
|
||||||
|
|
||||||
def get_actions(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_request_extensions(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
class N1kvPluginTestCase(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
|
|
||||||
_plugin_name = ('neutron.plugins.cisco.n1kv.'
|
|
||||||
'n1kv_neutron_plugin.N1kvNeutronPluginV2')
|
|
||||||
|
|
||||||
tenant_id = "some_tenant"
|
|
||||||
|
|
||||||
DEFAULT_RESP_BODY = ""
|
|
||||||
DEFAULT_RESP_CODE = 200
|
|
||||||
DEFAULT_CONTENT_TYPE = ""
|
|
||||||
fmt = "json"
|
|
||||||
|
|
||||||
def _make_test_policy_profile(self, name='service_profile'):
|
|
||||||
"""
|
|
||||||
Create a policy profile record for testing purpose.
|
|
||||||
|
|
||||||
:param name: string representing the name of the policy profile to
|
|
||||||
create. Default argument value chosen to correspond to the
|
|
||||||
default name specified in config.py file.
|
|
||||||
"""
|
|
||||||
uuid = test_api_v2._uuid()
|
|
||||||
profile = {'id': uuid,
|
|
||||||
'name': name}
|
|
||||||
return n1kv_db_v2.create_policy_profile(profile)
|
|
||||||
|
|
||||||
def _make_test_profile(self,
|
|
||||||
name='default_network_profile',
|
|
||||||
segment_range='386-400'):
|
|
||||||
"""
|
|
||||||
Create a profile record for testing purposes.
|
|
||||||
|
|
||||||
:param name: string representing the name of the network profile to
|
|
||||||
create. Default argument value chosen to correspond to the
|
|
||||||
default name specified in config.py file.
|
|
||||||
:param segment_range: string representing the segment range for network
|
|
||||||
profile.
|
|
||||||
"""
|
|
||||||
db_session = db.get_session()
|
|
||||||
profile = {'name': name,
|
|
||||||
'segment_type': 'vlan',
|
|
||||||
'physical_network': PHYS_NET,
|
|
||||||
'tenant_id': self.tenant_id,
|
|
||||||
'segment_range': segment_range}
|
|
||||||
net_p = n1kv_db_v2.create_network_profile(db_session, profile)
|
|
||||||
n1kv_db_v2.sync_vlan_allocations(db_session, net_p)
|
|
||||||
return net_p
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
"""
|
|
||||||
Setup method for n1kv plugin tests.
|
|
||||||
|
|
||||||
First step is to define an acceptable response from the VSM to
|
|
||||||
our requests. This needs to be done BEFORE the setUp() function
|
|
||||||
of the super-class is called.
|
|
||||||
|
|
||||||
This default here works for many cases. If you need something
|
|
||||||
extra, please define your own setUp() function in your test class,
|
|
||||||
and set your DEFAULT_RESPONSE value also BEFORE calling the
|
|
||||||
setUp() of the super-function (this one here). If you have set
|
|
||||||
a value already, it will not be overwritten by this code.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not self.DEFAULT_RESP_BODY:
|
|
||||||
self.DEFAULT_RESP_BODY = {
|
|
||||||
"icehouse-pp": {"properties": {"name": "icehouse-pp",
|
|
||||||
"id": "some-uuid-1"}},
|
|
||||||
"havana_pp": {"properties": {"name": "havana_pp",
|
|
||||||
"id": "some-uuid-2"}},
|
|
||||||
"dhcp_pp": {"properties": {"name": "dhcp_pp",
|
|
||||||
"id": "some-uuid-3"}},
|
|
||||||
}
|
|
||||||
# Creating a mock HTTP connection object for requests lib. The N1KV
|
|
||||||
# client interacts with the VSM via HTTP. Since we don't have a VSM
|
|
||||||
# running in the unit tests, we need to 'fake' it by patching the HTTP
|
|
||||||
# library itself. We install a patch for a fake HTTP connection class.
|
|
||||||
# Using __name__ to avoid having to enter the full module path.
|
|
||||||
http_patcher = mock.patch(n1kv_client.requests.__name__ + ".request")
|
|
||||||
FakeHttpConnection = http_patcher.start()
|
|
||||||
# Now define the return values for a few functions that may be called
|
|
||||||
# on any instance of the fake HTTP connection class.
|
|
||||||
self.resp_headers = {"content-type": "application/json"}
|
|
||||||
FakeHttpConnection.return_value = (FakeResponse(
|
|
||||||
self.DEFAULT_RESP_CODE,
|
|
||||||
self.DEFAULT_RESP_BODY,
|
|
||||||
self.resp_headers))
|
|
||||||
|
|
||||||
# Patch some internal functions in a few other parts of the system.
|
|
||||||
# These help us move along, without having to mock up even more systems
|
|
||||||
# in the background.
|
|
||||||
|
|
||||||
# Return a dummy VSM IP address
|
|
||||||
mock.patch(n1kv_client.__name__ + ".Client._get_vsm_hosts",
|
|
||||||
new=lambda self: "127.0.0.1").start()
|
|
||||||
|
|
||||||
# Return dummy user profiles
|
|
||||||
mock.patch(cdb.__name__ + ".get_credential_name",
|
|
||||||
new=lambda self: {"user_name": "admin",
|
|
||||||
"password": "admin_password"}).start()
|
|
||||||
|
|
||||||
n1kv_neutron_plugin.N1kvNeutronPluginV2._setup_vsm = _fake_setup_vsm
|
|
||||||
|
|
||||||
neutron_extensions.append_api_extensions_path(extensions.__path__)
|
|
||||||
ext_mgr = NetworkProfileTestExtensionManager()
|
|
||||||
|
|
||||||
# Save the original RESOURCE_ATTRIBUTE_MAP
|
|
||||||
self.saved_attr_map = {}
|
|
||||||
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.items():
|
|
||||||
self.saved_attr_map[resource] = attrs.copy()
|
|
||||||
# Update the RESOURCE_ATTRIBUTE_MAP with n1kv specific extended attrs.
|
|
||||||
attributes.RESOURCE_ATTRIBUTE_MAP["networks"].update(
|
|
||||||
n1kv.EXTENDED_ATTRIBUTES_2_0["networks"])
|
|
||||||
attributes.RESOURCE_ATTRIBUTE_MAP["ports"].update(
|
|
||||||
n1kv.EXTENDED_ATTRIBUTES_2_0["ports"])
|
|
||||||
self.addCleanup(self.restore_resource_attribute_map)
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
super(N1kvPluginTestCase, self).setUp(self._plugin_name,
|
|
||||||
ext_mgr=ext_mgr)
|
|
||||||
# Create some of the database entries that we require.
|
|
||||||
self._make_test_profile()
|
|
||||||
self._make_test_policy_profile()
|
|
||||||
|
|
||||||
def restore_resource_attribute_map(self):
|
|
||||||
# Restore the original RESOURCE_ATTRIBUTE_MAP
|
|
||||||
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
|
|
||||||
|
|
||||||
def test_plugin(self):
|
|
||||||
self._make_network('json',
|
|
||||||
'some_net',
|
|
||||||
True,
|
|
||||||
tenant_id=self.tenant_id,
|
|
||||||
set_context=True)
|
|
||||||
|
|
||||||
req = self.new_list_request('networks', params="fields=tenant_id")
|
|
||||||
req.environ['neutron.context'] = context.Context('', self.tenant_id)
|
|
||||||
res = req.get_response(self.api)
|
|
||||||
self.assertEqual(res.status_int, 200)
|
|
||||||
body = self.deserialize('json', res)
|
|
||||||
self.assertIn('tenant_id', body['networks'][0])
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvNetworkProfiles(N1kvPluginTestCase):
|
|
||||||
def _prepare_net_profile_data(self, segment_type):
|
|
||||||
netp = {'network_profile': {'name': 'netp1',
|
|
||||||
'segment_type': segment_type,
|
|
||||||
'tenant_id': self.tenant_id}}
|
|
||||||
if segment_type == 'vlan':
|
|
||||||
netp['network_profile']['segment_range'] = '100-110'
|
|
||||||
netp['network_profile']['physical_network'] = PHYS_NET
|
|
||||||
elif segment_type == 'overlay':
|
|
||||||
netp['network_profile']['segment_range'] = '10000-10010'
|
|
||||||
netp['network_profile']['sub_type'] = 'enhanced' or 'native_vxlan'
|
|
||||||
netp['network_profile']['multicast_ip_range'] = ("224.1.1.1-"
|
|
||||||
"224.1.1.10")
|
|
||||||
elif segment_type == 'trunk':
|
|
||||||
netp['network_profile']['sub_type'] = 'vlan'
|
|
||||||
return netp
|
|
||||||
|
|
||||||
def test_create_network_profile_vlan(self):
|
|
||||||
data = self._prepare_net_profile_data('vlan')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 201)
|
|
||||||
|
|
||||||
def test_create_network_profile_overlay(self):
|
|
||||||
data = self._prepare_net_profile_data('overlay')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 201)
|
|
||||||
|
|
||||||
def test_create_network_profile_trunk(self):
|
|
||||||
data = self._prepare_net_profile_data('trunk')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 201)
|
|
||||||
|
|
||||||
def test_create_network_profile_trunk_missing_subtype(self):
|
|
||||||
data = self._prepare_net_profile_data('trunk')
|
|
||||||
data['network_profile'].pop('sub_type')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_create_network_profile_overlay_unreasonable_seg_range(self):
|
|
||||||
data = self._prepare_net_profile_data('overlay')
|
|
||||||
data['network_profile']['segment_range'] = '10000-100000000001'
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_update_network_profile_plugin(self):
|
|
||||||
net_p_dict = self._prepare_net_profile_data('overlay')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', net_p_dict)
|
|
||||||
net_p = self.deserialize(self.fmt,
|
|
||||||
net_p_req.get_response(self.ext_api))
|
|
||||||
data = {'network_profile': {'name': 'netp2'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['network_profile']['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 200)
|
|
||||||
|
|
||||||
def test_update_network_profile_physical_network_fail(self):
|
|
||||||
net_p = self._make_test_profile(name='netp1')
|
|
||||||
data = {'network_profile': {'physical_network': PHYS_NET}}
|
|
||||||
net_p_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['id'])
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_update_network_profile_segment_type_fail(self):
|
|
||||||
net_p = self._make_test_profile(name='netp1')
|
|
||||||
data = {'network_profile': {'segment_type': 'overlay'}}
|
|
||||||
net_p_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['id'])
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_update_network_profile_sub_type_fail(self):
|
|
||||||
net_p_dict = self._prepare_net_profile_data('overlay')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', net_p_dict)
|
|
||||||
net_p = self.deserialize(self.fmt,
|
|
||||||
net_p_req.get_response(self.ext_api))
|
|
||||||
data = {'network_profile': {'sub_type': 'vlan'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['network_profile']['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 400)
|
|
||||||
|
|
||||||
def test_update_network_profiles_with_networks_fail(self):
|
|
||||||
net_p = self._make_test_profile(name='netp1')
|
|
||||||
data = {'network_profile': {'segment_range': '200-210'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 200)
|
|
||||||
net_data = {'network': {'name': 'net1',
|
|
||||||
n1kv.PROFILE_ID: net_p['id'],
|
|
||||||
'tenant_id': 'some_tenant'}}
|
|
||||||
network_req = self.new_create_request('networks', net_data)
|
|
||||||
network_res = network_req.get_response(self.api)
|
|
||||||
self.assertEqual(network_res.status_int, 201)
|
|
||||||
data = {'network_profile': {'segment_range': '300-310'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 409)
|
|
||||||
|
|
||||||
def test_create_overlay_network_profile_invalid_multicast_fail(self):
|
|
||||||
net_p_dict = self._prepare_net_profile_data('overlay')
|
|
||||||
data = {'network_profile': {'sub_type': 'native_vxlan',
|
|
||||||
'multicast_ip_range': '1.1.1.1'}}
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data,
|
|
||||||
net_p_dict)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_create_overlay_network_profile_no_multicast_fail(self):
|
|
||||||
net_p_dict = self._prepare_net_profile_data('overlay')
|
|
||||||
data = {'network_profile': {'sub_type': 'native_vxlan',
|
|
||||||
'multicast_ip_range': ''}}
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data,
|
|
||||||
net_p_dict)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_create_overlay_network_profile_wrong_split_multicast_fail(self):
|
|
||||||
net_p_dict = self._prepare_net_profile_data('overlay')
|
|
||||||
data = {'network_profile': {
|
|
||||||
'sub_type': 'native_vxlan',
|
|
||||||
'multicast_ip_range': '224.1.1.1.224.1.1.3'}}
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data,
|
|
||||||
net_p_dict)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_create_overlay_network_profile_invalid_minip_multicast_fail(self):
|
|
||||||
net_p_dict = self._prepare_net_profile_data('overlay')
|
|
||||||
data = {'network_profile': {
|
|
||||||
'sub_type': 'native_vxlan',
|
|
||||||
'multicast_ip_range': '10.0.0.1-224.1.1.3'}}
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data,
|
|
||||||
net_p_dict)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_create_overlay_network_profile_invalid_maxip_multicast_fail(self):
|
|
||||||
net_p_dict = self._prepare_net_profile_data('overlay')
|
|
||||||
data = {'network_profile': {
|
|
||||||
'sub_type': 'native_vxlan',
|
|
||||||
'multicast_ip_range': '224.1.1.1-20.0.0.1'}}
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data,
|
|
||||||
net_p_dict)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_create_overlay_network_profile_correct_multicast_pass(self):
|
|
||||||
data = self._prepare_net_profile_data('overlay')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 201)
|
|
||||||
|
|
||||||
def test_update_overlay_network_profile_correct_multicast_pass(self):
|
|
||||||
data = self._prepare_net_profile_data('overlay')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 201)
|
|
||||||
net_p = self.deserialize(self.fmt, res)
|
|
||||||
data = {'network_profile': {'multicast_ip_range':
|
|
||||||
'224.0.1.0-224.0.1.100'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['network_profile']['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 200)
|
|
||||||
|
|
||||||
def test_create_overlay_network_profile_reservedip_multicast_fail(self):
|
|
||||||
net_p_dict = self._prepare_net_profile_data('overlay')
|
|
||||||
data = {'network_profile': {'multicast_ip_range':
|
|
||||||
'224.0.0.100-224.0.1.100'}}
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data,
|
|
||||||
net_p_dict)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_update_overlay_network_profile_reservedip_multicast_fail(self):
|
|
||||||
data = self._prepare_net_profile_data('overlay')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 201)
|
|
||||||
net_p = self.deserialize(self.fmt, res)
|
|
||||||
data = {'network_profile': {'multicast_ip_range':
|
|
||||||
'224.0.0.11-224.0.0.111'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['network_profile']['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 400)
|
|
||||||
|
|
||||||
def test_update_vlan_network_profile_multicast_fail(self):
|
|
||||||
net_p = self._make_test_profile(name='netp1')
|
|
||||||
data = {'network_profile': {'multicast_ip_range':
|
|
||||||
'224.0.1.0-224.0.1.100'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 400)
|
|
||||||
|
|
||||||
def test_update_trunk_network_profile_segment_range_fail(self):
|
|
||||||
data = self._prepare_net_profile_data('trunk')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 201)
|
|
||||||
net_p = self.deserialize(self.fmt, res)
|
|
||||||
data = {'network_profile': {'segment_range':
|
|
||||||
'100-200'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['network_profile']['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 400)
|
|
||||||
|
|
||||||
def test_update_trunk_network_profile_multicast_fail(self):
|
|
||||||
data = self._prepare_net_profile_data('trunk')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', data)
|
|
||||||
res = net_p_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(res.status_int, 201)
|
|
||||||
net_p = self.deserialize(self.fmt, res)
|
|
||||||
data = {'network_profile': {'multicast_ip_range':
|
|
||||||
'224.0.1.0-224.0.1.100'}}
|
|
||||||
update_req = self.new_update_request('network_profiles',
|
|
||||||
data,
|
|
||||||
net_p['network_profile']['id'])
|
|
||||||
update_res = update_req.get_response(self.ext_api)
|
|
||||||
self.assertEqual(update_res.status_int, 400)
|
|
||||||
|
|
||||||
def test_create_network_profile_populate_vlan_segment_pool(self):
|
|
||||||
db_session = db.get_session()
|
|
||||||
net_p_dict = self._prepare_net_profile_data('vlan')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', net_p_dict)
|
|
||||||
self.deserialize(self.fmt,
|
|
||||||
net_p_req.get_response(self.ext_api))
|
|
||||||
for vlan in range(VLAN_MIN, VLAN_MAX + 1):
|
|
||||||
self.assertIsNotNone(n1kv_db_v2.get_vlan_allocation(db_session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan))
|
|
||||||
self.assertFalse(n1kv_db_v2.get_vlan_allocation(db_session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan).allocated)
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
db_session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MIN - 1)
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
db_session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MAX + 1)
|
|
||||||
|
|
||||||
def test_delete_network_profile_with_network_fail(self):
|
|
||||||
net_p = self._make_test_profile(name='netp1')
|
|
||||||
net_data = {'network': {'name': 'net1',
|
|
||||||
n1kv.PROFILE_ID: net_p['id'],
|
|
||||||
'tenant_id': 'some_tenant'}}
|
|
||||||
network_req = self.new_create_request('networks', net_data)
|
|
||||||
network_res = network_req.get_response(self.api)
|
|
||||||
self.assertEqual(network_res.status_int, 201)
|
|
||||||
self._delete('network_profiles', net_p['id'],
|
|
||||||
expected_code=409)
|
|
||||||
|
|
||||||
def test_delete_network_profile_deallocate_vlan_segment_pool(self):
|
|
||||||
db_session = db.get_session()
|
|
||||||
net_p_dict = self._prepare_net_profile_data('vlan')
|
|
||||||
net_p_req = self.new_create_request('network_profiles', net_p_dict)
|
|
||||||
net_p = self.deserialize(self.fmt,
|
|
||||||
net_p_req.get_response(self.ext_api))
|
|
||||||
self.assertIsNotNone(n1kv_db_v2.get_vlan_allocation(db_session,
|
|
||||||
PHYS_NET,
|
|
||||||
VLAN_MIN))
|
|
||||||
self._delete('network_profiles', net_p['network_profile']['id'])
|
|
||||||
for vlan in range(VLAN_MIN, VLAN_MAX + 1):
|
|
||||||
self.assertRaises(c_exc.VlanIDNotFound,
|
|
||||||
n1kv_db_v2.get_vlan_allocation,
|
|
||||||
db_session,
|
|
||||||
PHYS_NET,
|
|
||||||
vlan)
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvBasicGet(test_plugin.TestBasicGet,
|
|
||||||
N1kvPluginTestCase):
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvHTTPResponse(test_plugin.TestV2HTTPResponse,
|
|
||||||
N1kvPluginTestCase):
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvPorts(test_plugin.TestPortsV2,
|
|
||||||
N1kvPluginTestCase,
|
|
||||||
test_bindings.PortBindingsTestCase):
|
|
||||||
VIF_TYPE = portbindings.VIF_TYPE_OVS
|
|
||||||
HAS_PORT_FILTER = False
|
|
||||||
|
|
||||||
def test_create_port_with_default_n1kv_policy_profile_id(self):
|
|
||||||
"""Test port create without passing policy profile id."""
|
|
||||||
with self.port() as port:
|
|
||||||
db_session = db.get_session()
|
|
||||||
pp = n1kv_db_v2.get_policy_profile(
|
|
||||||
db_session, port['port'][n1kv.PROFILE_ID])
|
|
||||||
self.assertEqual(pp['name'], 'service_profile')
|
|
||||||
|
|
||||||
def test_create_port_with_n1kv_policy_profile_id(self):
|
|
||||||
"""Test port create with policy profile id."""
|
|
||||||
profile_obj = self._make_test_policy_profile(name='test_profile')
|
|
||||||
with self.network() as network:
|
|
||||||
data = {'port': {n1kv.PROFILE_ID: profile_obj.id,
|
|
||||||
'tenant_id': self.tenant_id,
|
|
||||||
'network_id': network['network']['id']}}
|
|
||||||
port_req = self.new_create_request('ports', data)
|
|
||||||
port = self.deserialize(self.fmt,
|
|
||||||
port_req.get_response(self.api))
|
|
||||||
self.assertEqual(port['port'][n1kv.PROFILE_ID],
|
|
||||||
profile_obj.id)
|
|
||||||
self._delete('ports', port['port']['id'])
|
|
||||||
|
|
||||||
def test_update_port_with_n1kv_policy_profile_id(self):
|
|
||||||
"""Test port update failure while updating policy profile id."""
|
|
||||||
with self.port() as port:
|
|
||||||
data = {'port': {n1kv.PROFILE_ID: 'some-profile-uuid'}}
|
|
||||||
port_req = self.new_update_request('ports',
|
|
||||||
data,
|
|
||||||
port['port']['id'])
|
|
||||||
res = port_req.get_response(self.api)
|
|
||||||
# Port update should fail to update policy profile id.
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_create_first_port_invalid_parameters_fail(self):
|
|
||||||
"""Test parameters for first port create sent to the VSM."""
|
|
||||||
profile_obj = self._make_test_policy_profile(name='test_profile')
|
|
||||||
with self.network() as network:
|
|
||||||
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
|
|
||||||
new=fake_client.TestClientInvalidRequest)
|
|
||||||
client_patch.start()
|
|
||||||
data = {'port': {n1kv.PROFILE_ID: profile_obj.id,
|
|
||||||
'tenant_id': self.tenant_id,
|
|
||||||
'network_id': network['network']['id'],
|
|
||||||
}}
|
|
||||||
port_req = self.new_create_request('ports', data)
|
|
||||||
res = port_req.get_response(self.api)
|
|
||||||
self.assertEqual(res.status_int, 500)
|
|
||||||
client_patch.stop()
|
|
||||||
|
|
||||||
def test_create_next_port_invalid_parameters_fail(self):
|
|
||||||
"""Test parameters for subsequent port create sent to the VSM."""
|
|
||||||
with self.port() as port:
|
|
||||||
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
|
|
||||||
new=fake_client.TestClientInvalidRequest)
|
|
||||||
client_patch.start()
|
|
||||||
data = {'port': {n1kv.PROFILE_ID: port['port']['n1kv:profile_id'],
|
|
||||||
'tenant_id': port['port']['tenant_id'],
|
|
||||||
'network_id': port['port']['network_id']}}
|
|
||||||
port_req = self.new_create_request('ports', data)
|
|
||||||
res = port_req.get_response(self.api)
|
|
||||||
self.assertEqual(res.status_int, 500)
|
|
||||||
client_patch.stop()
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvPolicyProfiles(N1kvPluginTestCase):
|
|
||||||
def test_populate_policy_profile(self):
|
|
||||||
client_patch = mock.patch(n1kv_client.__name__ + ".Client",
|
|
||||||
new=fake_client.TestClient)
|
|
||||||
client_patch.start()
|
|
||||||
instance = n1kv_neutron_plugin.N1kvNeutronPluginV2()
|
|
||||||
instance._populate_policy_profiles()
|
|
||||||
db_session = db.get_session()
|
|
||||||
profile = n1kv_db_v2.get_policy_profile(
|
|
||||||
db_session, '00000000-0000-0000-0000-000000000001')
|
|
||||||
self.assertEqual('pp-1', profile['name'])
|
|
||||||
client_patch.stop()
|
|
||||||
|
|
||||||
def test_populate_policy_profile_delete(self):
|
|
||||||
# Patch the Client class with the TestClient class
|
|
||||||
with mock.patch(n1kv_client.__name__ + ".Client",
|
|
||||||
new=fake_client.TestClient):
|
|
||||||
# Patch the _get_total_profiles() method to return a custom value
|
|
||||||
with mock.patch(fake_client.__name__ +
|
|
||||||
'.TestClient._get_total_profiles') as obj_inst:
|
|
||||||
# Return 3 policy profiles
|
|
||||||
obj_inst.return_value = 3
|
|
||||||
plugin = manager.NeutronManager.get_plugin()
|
|
||||||
plugin._populate_policy_profiles()
|
|
||||||
db_session = db.get_session()
|
|
||||||
profile = n1kv_db_v2.get_policy_profile(
|
|
||||||
db_session, '00000000-0000-0000-0000-000000000001')
|
|
||||||
# Verify that DB contains only 3 policy profiles
|
|
||||||
self.assertEqual('pp-1', profile['name'])
|
|
||||||
profile = n1kv_db_v2.get_policy_profile(
|
|
||||||
db_session, '00000000-0000-0000-0000-000000000002')
|
|
||||||
self.assertEqual('pp-2', profile['name'])
|
|
||||||
profile = n1kv_db_v2.get_policy_profile(
|
|
||||||
db_session, '00000000-0000-0000-0000-000000000003')
|
|
||||||
self.assertEqual('pp-3', profile['name'])
|
|
||||||
self.assertRaises(c_exc.PolicyProfileIdNotFound,
|
|
||||||
n1kv_db_v2.get_policy_profile,
|
|
||||||
db_session,
|
|
||||||
'00000000-0000-0000-0000-000000000004')
|
|
||||||
# Return 2 policy profiles
|
|
||||||
obj_inst.return_value = 2
|
|
||||||
plugin._populate_policy_profiles()
|
|
||||||
# Verify that the third policy profile is deleted
|
|
||||||
self.assertRaises(c_exc.PolicyProfileIdNotFound,
|
|
||||||
n1kv_db_v2.get_policy_profile,
|
|
||||||
db_session,
|
|
||||||
'00000000-0000-0000-0000-000000000003')
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvNetworks(test_plugin.TestNetworksV2,
|
|
||||||
N1kvPluginTestCase):
|
|
||||||
|
|
||||||
def _prepare_net_data(self, net_profile_id):
|
|
||||||
return {'network': {'name': 'net1',
|
|
||||||
n1kv.PROFILE_ID: net_profile_id,
|
|
||||||
'tenant_id': self.tenant_id}}
|
|
||||||
|
|
||||||
def test_create_network_with_default_n1kv_network_profile_id(self):
|
|
||||||
"""Test network create without passing network profile id."""
|
|
||||||
with self.network() as network:
|
|
||||||
db_session = db.get_session()
|
|
||||||
np = n1kv_db_v2.get_network_profile(
|
|
||||||
db_session, network['network'][n1kv.PROFILE_ID])
|
|
||||||
self.assertEqual(np['name'], 'default_network_profile')
|
|
||||||
|
|
||||||
def test_create_network_with_n1kv_network_profile_id(self):
|
|
||||||
"""Test network create with network profile id."""
|
|
||||||
profile_obj = self._make_test_profile(name='test_profile')
|
|
||||||
data = self._prepare_net_data(profile_obj.id)
|
|
||||||
network_req = self.new_create_request('networks', data)
|
|
||||||
network = self.deserialize(self.fmt,
|
|
||||||
network_req.get_response(self.api))
|
|
||||||
self.assertEqual(network['network'][n1kv.PROFILE_ID],
|
|
||||||
profile_obj.id)
|
|
||||||
|
|
||||||
def test_update_network_with_n1kv_network_profile_id(self):
|
|
||||||
"""Test network update failure while updating network profile id."""
|
|
||||||
with self.network() as network:
|
|
||||||
data = {'network': {n1kv.PROFILE_ID: 'some-profile-uuid'}}
|
|
||||||
network_req = self.new_update_request('networks',
|
|
||||||
data,
|
|
||||||
network['network']['id'])
|
|
||||||
res = network_req.get_response(self.api)
|
|
||||||
# Network update should fail to update network profile id.
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvSubnets(test_plugin.TestSubnetsV2,
|
|
||||||
N1kvPluginTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestN1kvSubnets, self).setUp()
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvL3Test(test_l3_plugin.L3NatExtensionTestCase):
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestN1kvL3SchedulersTest(test_l3_schedulers.L3SchedulerTestCase):
|
|
||||||
|
|
||||||
pass
|
|
|
@ -1,72 +0,0 @@
|
||||||
# Copyright (c) 2013 Cisco Systems Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.plugins.cisco.common import config as cisco_config
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestCiscoNexusPluginConfig(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
# Point neutron config file to: neutron/tests/etc/neutron.conf.test
|
|
||||||
self.config_parse()
|
|
||||||
|
|
||||||
super(TestCiscoNexusPluginConfig, self).setUp()
|
|
||||||
|
|
||||||
def test_config_parse_error(self):
|
|
||||||
"""Check that config error is raised upon config parser failure."""
|
|
||||||
with mock.patch.object(cfg, 'MultiConfigParser') as parser:
|
|
||||||
parser.return_value.read.return_value = []
|
|
||||||
self.assertRaises(cfg.Error, cisco_config.CiscoConfigOptions)
|
|
||||||
|
|
||||||
def test_create_device_dictionary(self):
|
|
||||||
"""Test creation of the device dictionary based on nexus config."""
|
|
||||||
test_config = {
|
|
||||||
'NEXUS_SWITCH:1.1.1.1': {
|
|
||||||
'username': ['admin'],
|
|
||||||
'password': ['mySecretPassword'],
|
|
||||||
'ssh_port': [22],
|
|
||||||
'compute1': ['1/1'],
|
|
||||||
'compute2': ['1/2'],
|
|
||||||
},
|
|
||||||
'NEXUS_SWITCH:2.2.2.2': {
|
|
||||||
'username': ['admin'],
|
|
||||||
'password': ['mySecretPassword'],
|
|
||||||
'ssh_port': [22],
|
|
||||||
'compute3': ['1/1'],
|
|
||||||
'compute4': ['1/2'],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expected_dev_dict = {
|
|
||||||
('NEXUS_SWITCH', '1.1.1.1', 'username'): 'admin',
|
|
||||||
('NEXUS_SWITCH', '1.1.1.1', 'password'): 'mySecretPassword',
|
|
||||||
('NEXUS_SWITCH', '1.1.1.1', 'ssh_port'): 22,
|
|
||||||
('NEXUS_SWITCH', '1.1.1.1', 'compute1'): '1/1',
|
|
||||||
('NEXUS_SWITCH', '1.1.1.1', 'compute2'): '1/2',
|
|
||||||
('NEXUS_SWITCH', '2.2.2.2', 'username'): 'admin',
|
|
||||||
('NEXUS_SWITCH', '2.2.2.2', 'password'): 'mySecretPassword',
|
|
||||||
('NEXUS_SWITCH', '2.2.2.2', 'ssh_port'): 22,
|
|
||||||
('NEXUS_SWITCH', '2.2.2.2', 'compute3'): '1/1',
|
|
||||||
('NEXUS_SWITCH', '2.2.2.2', 'compute4'): '1/2',
|
|
||||||
}
|
|
||||||
with mock.patch.object(cfg, 'MultiConfigParser') as parser:
|
|
||||||
parser.return_value.read.return_value = cfg.CONF.config_file
|
|
||||||
parser.return_value.parsed = [test_config]
|
|
||||||
cisco_config.CiscoConfigOptions()
|
|
||||||
self.assertEqual(cisco_config.device_dictionary,
|
|
||||||
expected_dev_dict)
|
|
|
@ -1,291 +0,0 @@
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import mock
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.plugins.cisco.common import cisco_constants
|
|
||||||
from neutron.plugins.cisco.common import cisco_credentials_v2
|
|
||||||
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
|
|
||||||
from neutron.plugins.cisco.common import config as config
|
|
||||||
from neutron.plugins.cisco.db import network_db_v2 as cdb
|
|
||||||
from neutron.plugins.cisco import network_plugin
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class CiscoNetworkDbTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
"""Base class for Cisco network database unit tests."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(CiscoNetworkDbTest, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
|
|
||||||
# The Cisco network plugin includes a thin layer of QoS and
|
|
||||||
# credential API methods which indirectly call Cisco QoS and
|
|
||||||
# credential database access methods. For better code coverage,
|
|
||||||
# this test suite will make calls to the QoS and credential database
|
|
||||||
# access methods indirectly through the network plugin. The network
|
|
||||||
# plugin's init function can be mocked out for this purpose.
|
|
||||||
def new_network_plugin_init(instance):
|
|
||||||
pass
|
|
||||||
with mock.patch.object(network_plugin.PluginV2,
|
|
||||||
'__init__', new=new_network_plugin_init):
|
|
||||||
self._network_plugin = network_plugin.PluginV2()
|
|
||||||
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
|
|
||||||
class CiscoNetworkQosDbTest(CiscoNetworkDbTest):
|
|
||||||
|
|
||||||
"""Unit tests for Cisco network QoS database model."""
|
|
||||||
|
|
||||||
QosObj = collections.namedtuple('QosObj', 'tenant qname desc')
|
|
||||||
|
|
||||||
def _qos_test_obj(self, tnum, qnum, desc=None):
|
|
||||||
"""Create a Qos test object from a pair of numbers."""
|
|
||||||
if desc is None:
|
|
||||||
desc = 'test qos %s-%s' % (str(tnum), str(qnum))
|
|
||||||
tenant = 'tenant_%s' % str(tnum)
|
|
||||||
qname = 'qos_%s' % str(qnum)
|
|
||||||
return self.QosObj(tenant, qname, desc)
|
|
||||||
|
|
||||||
def _assert_equal(self, qos, qos_obj):
|
|
||||||
self.assertEqual(qos.tenant_id, qos_obj.tenant)
|
|
||||||
self.assertEqual(qos.qos_name, qos_obj.qname)
|
|
||||||
self.assertEqual(qos.qos_desc, qos_obj.desc)
|
|
||||||
|
|
||||||
def test_qos_add_remove(self):
|
|
||||||
qos11 = self._qos_test_obj(1, 1)
|
|
||||||
qos = self._network_plugin.create_qos(qos11.tenant, qos11.qname,
|
|
||||||
qos11.desc)
|
|
||||||
self._assert_equal(qos, qos11)
|
|
||||||
qos_id = qos.qos_id
|
|
||||||
qos = self._network_plugin.delete_qos(qos11.tenant, qos_id)
|
|
||||||
self._assert_equal(qos, qos11)
|
|
||||||
qos = self._network_plugin.delete_qos(qos11.tenant, qos_id)
|
|
||||||
self.assertIsNone(qos)
|
|
||||||
|
|
||||||
def test_qos_add_dup(self):
|
|
||||||
qos22 = self._qos_test_obj(2, 2)
|
|
||||||
qos = self._network_plugin.create_qos(qos22.tenant, qos22.qname,
|
|
||||||
qos22.desc)
|
|
||||||
self._assert_equal(qos, qos22)
|
|
||||||
qos_id = qos.qos_id
|
|
||||||
with testtools.ExpectedException(c_exc.QosNameAlreadyExists):
|
|
||||||
self._network_plugin.create_qos(qos22.tenant, qos22.qname,
|
|
||||||
"duplicate 22")
|
|
||||||
qos = self._network_plugin.delete_qos(qos22.tenant, qos_id)
|
|
||||||
self._assert_equal(qos, qos22)
|
|
||||||
qos = self._network_plugin.delete_qos(qos22.tenant, qos_id)
|
|
||||||
self.assertIsNone(qos)
|
|
||||||
|
|
||||||
def test_qos_get(self):
|
|
||||||
qos11 = self._qos_test_obj(1, 1)
|
|
||||||
qos11_id = self._network_plugin.create_qos(qos11.tenant, qos11.qname,
|
|
||||||
qos11.desc).qos_id
|
|
||||||
qos21 = self._qos_test_obj(2, 1)
|
|
||||||
qos21_id = self._network_plugin.create_qos(qos21.tenant, qos21.qname,
|
|
||||||
qos21.desc).qos_id
|
|
||||||
qos22 = self._qos_test_obj(2, 2)
|
|
||||||
qos22_id = self._network_plugin.create_qos(qos22.tenant, qos22.qname,
|
|
||||||
qos22.desc).qos_id
|
|
||||||
|
|
||||||
qos = self._network_plugin.get_qos_details(qos11.tenant, qos11_id)
|
|
||||||
self._assert_equal(qos, qos11)
|
|
||||||
qos = self._network_plugin.get_qos_details(qos21.tenant, qos21_id)
|
|
||||||
self._assert_equal(qos, qos21)
|
|
||||||
qos = self._network_plugin.get_qos_details(qos21.tenant, qos22_id)
|
|
||||||
self._assert_equal(qos, qos22)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(c_exc.QosNotFound):
|
|
||||||
self._network_plugin.get_qos_details(qos11.tenant, "dummyQosId")
|
|
||||||
with testtools.ExpectedException(c_exc.QosNotFound):
|
|
||||||
self._network_plugin.get_qos_details(qos11.tenant, qos21_id)
|
|
||||||
with testtools.ExpectedException(c_exc.QosNotFound):
|
|
||||||
self._network_plugin.get_qos_details(qos21.tenant, qos11_id)
|
|
||||||
|
|
||||||
qos_all_t1 = self._network_plugin.get_all_qoss(qos11.tenant)
|
|
||||||
self.assertEqual(len(qos_all_t1), 1)
|
|
||||||
qos_all_t2 = self._network_plugin.get_all_qoss(qos21.tenant)
|
|
||||||
self.assertEqual(len(qos_all_t2), 2)
|
|
||||||
qos_all_t3 = self._network_plugin.get_all_qoss("tenant3")
|
|
||||||
self.assertEqual(len(qos_all_t3), 0)
|
|
||||||
|
|
||||||
def test_qos_update(self):
|
|
||||||
qos11 = self._qos_test_obj(1, 1)
|
|
||||||
qos11_id = self._network_plugin.create_qos(qos11.tenant, qos11.qname,
|
|
||||||
qos11.desc).qos_id
|
|
||||||
self._network_plugin.rename_qos(qos11.tenant, qos11_id,
|
|
||||||
new_name=None)
|
|
||||||
new_qname = "new qos name"
|
|
||||||
new_qos = self._network_plugin.rename_qos(qos11.tenant, qos11_id,
|
|
||||||
new_qname)
|
|
||||||
expected_qobj = self.QosObj(qos11.tenant, new_qname, qos11.desc)
|
|
||||||
self._assert_equal(new_qos, expected_qobj)
|
|
||||||
new_qos = self._network_plugin.get_qos_details(qos11.tenant, qos11_id)
|
|
||||||
self._assert_equal(new_qos, expected_qobj)
|
|
||||||
with testtools.ExpectedException(c_exc.QosNotFound):
|
|
||||||
self._network_plugin.rename_qos(qos11.tenant, "dummyQosId",
|
|
||||||
new_name=None)
|
|
||||||
|
|
||||||
|
|
||||||
class CiscoNetworkCredentialDbTest(CiscoNetworkDbTest):
|
|
||||||
|
|
||||||
"""Unit tests for Cisco network credentials database model."""
|
|
||||||
|
|
||||||
CredObj = collections.namedtuple('CredObj', 'cname usr pwd ctype')
|
|
||||||
|
|
||||||
def _cred_test_obj(self, tnum, cnum):
|
|
||||||
"""Create a Credential test object from a pair of numbers."""
|
|
||||||
cname = 'credential_%s_%s' % (str(tnum), str(cnum))
|
|
||||||
usr = 'User_%s_%s' % (str(tnum), str(cnum))
|
|
||||||
pwd = 'Password_%s_%s' % (str(tnum), str(cnum))
|
|
||||||
ctype = 'ctype_%s' % str(tnum)
|
|
||||||
return self.CredObj(cname, usr, pwd, ctype)
|
|
||||||
|
|
||||||
def _assert_equal(self, credential, cred_obj):
|
|
||||||
self.assertEqual(credential.type, cred_obj.ctype)
|
|
||||||
self.assertEqual(credential.credential_name, cred_obj.cname)
|
|
||||||
self.assertEqual(credential.user_name, cred_obj.usr)
|
|
||||||
self.assertEqual(credential.password, cred_obj.pwd)
|
|
||||||
|
|
||||||
def test_credential_add_remove(self):
|
|
||||||
cred11 = self._cred_test_obj(1, 1)
|
|
||||||
cred = cdb.add_credential(
|
|
||||||
cred11.cname, cred11.usr, cred11.pwd, cred11.ctype)
|
|
||||||
self._assert_equal(cred, cred11)
|
|
||||||
cred_id = cred.credential_id
|
|
||||||
cred = cdb.remove_credential(cred_id)
|
|
||||||
self._assert_equal(cred, cred11)
|
|
||||||
cred = cdb.remove_credential(cred_id)
|
|
||||||
self.assertIsNone(cred)
|
|
||||||
|
|
||||||
def test_credential_add_dup(self):
|
|
||||||
cred22 = self._cred_test_obj(2, 2)
|
|
||||||
cred = cdb.add_credential(
|
|
||||||
cred22.cname, cred22.usr, cred22.pwd, cred22.ctype)
|
|
||||||
self._assert_equal(cred, cred22)
|
|
||||||
cred_id = cred.credential_id
|
|
||||||
with testtools.ExpectedException(c_exc.CredentialAlreadyExists):
|
|
||||||
cdb.add_credential(
|
|
||||||
cred22.cname, cred22.usr, cred22.pwd, cred22.ctype)
|
|
||||||
cred = cdb.remove_credential(cred_id)
|
|
||||||
self._assert_equal(cred, cred22)
|
|
||||||
cred = cdb.remove_credential(cred_id)
|
|
||||||
self.assertIsNone(cred)
|
|
||||||
|
|
||||||
def test_credential_get_id(self):
|
|
||||||
cred11 = self._cred_test_obj(1, 1)
|
|
||||||
cred11_id = cdb.add_credential(
|
|
||||||
cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id
|
|
||||||
cred21 = self._cred_test_obj(2, 1)
|
|
||||||
cred21_id = cdb.add_credential(
|
|
||||||
cred21.cname, cred21.usr, cred21.pwd, cred21.ctype).credential_id
|
|
||||||
cred22 = self._cred_test_obj(2, 2)
|
|
||||||
cred22_id = cdb.add_credential(
|
|
||||||
cred22.cname, cred22.usr, cred22.pwd, cred22.ctype).credential_id
|
|
||||||
|
|
||||||
cred = self._network_plugin.get_credential_details(cred11_id)
|
|
||||||
self._assert_equal(cred, cred11)
|
|
||||||
cred = self._network_plugin.get_credential_details(cred21_id)
|
|
||||||
self._assert_equal(cred, cred21)
|
|
||||||
cred = self._network_plugin.get_credential_details(cred22_id)
|
|
||||||
self._assert_equal(cred, cred22)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(c_exc.CredentialNotFound):
|
|
||||||
self._network_plugin.get_credential_details("dummyCredentialId")
|
|
||||||
|
|
||||||
cred_all_t1 = self._network_plugin.get_all_credentials()
|
|
||||||
self.assertEqual(len(cred_all_t1), 3)
|
|
||||||
|
|
||||||
def test_credential_get_name(self):
|
|
||||||
cred11 = self._cred_test_obj(1, 1)
|
|
||||||
cred11_id = cdb.add_credential(
|
|
||||||
cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id
|
|
||||||
cred21 = self._cred_test_obj(2, 1)
|
|
||||||
cred21_id = cdb.add_credential(
|
|
||||||
cred21.cname, cred21.usr, cred21.pwd, cred21.ctype).credential_id
|
|
||||||
cred22 = self._cred_test_obj(2, 2)
|
|
||||||
cred22_id = cdb.add_credential(
|
|
||||||
cred22.cname, cred22.usr, cred22.pwd, cred22.ctype).credential_id
|
|
||||||
self.assertNotEqual(cred11_id, cred21_id)
|
|
||||||
self.assertNotEqual(cred11_id, cred22_id)
|
|
||||||
self.assertNotEqual(cred21_id, cred22_id)
|
|
||||||
|
|
||||||
cred = cdb.get_credential_name(cred11.cname)
|
|
||||||
self._assert_equal(cred, cred11)
|
|
||||||
cred = cdb.get_credential_name(cred21.cname)
|
|
||||||
self._assert_equal(cred, cred21)
|
|
||||||
cred = cdb.get_credential_name(cred22.cname)
|
|
||||||
self._assert_equal(cred, cred22)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(c_exc.CredentialNameNotFound):
|
|
||||||
cdb.get_credential_name("dummyCredentialName")
|
|
||||||
|
|
||||||
def test_credential_update(self):
|
|
||||||
cred11 = self._cred_test_obj(1, 1)
|
|
||||||
cred11_id = cdb.add_credential(
|
|
||||||
cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id
|
|
||||||
self._network_plugin.rename_credential(cred11_id, new_name=None,
|
|
||||||
new_password=None)
|
|
||||||
new_usr = "new user name"
|
|
||||||
new_pwd = "new password"
|
|
||||||
new_credential = self._network_plugin.rename_credential(
|
|
||||||
cred11_id, new_usr, new_pwd)
|
|
||||||
expected_cred = self.CredObj(
|
|
||||||
cred11.cname, new_usr, new_pwd, cred11.ctype)
|
|
||||||
self._assert_equal(new_credential, expected_cred)
|
|
||||||
new_credential = self._network_plugin.get_credential_details(
|
|
||||||
cred11_id)
|
|
||||||
self._assert_equal(new_credential, expected_cred)
|
|
||||||
with testtools.ExpectedException(c_exc.CredentialNotFound):
|
|
||||||
self._network_plugin.rename_credential(
|
|
||||||
"dummyCredentialId", new_usr, new_pwd)
|
|
||||||
|
|
||||||
def test_get_credential_not_found_exception(self):
|
|
||||||
self.assertRaises(c_exc.CredentialNotFound,
|
|
||||||
self._network_plugin.get_credential_details,
|
|
||||||
"dummyCredentialId")
|
|
||||||
|
|
||||||
|
|
||||||
class CiscoCredentialStoreTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
"""Cisco Credential Store unit tests."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(CiscoCredentialStoreTest, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def test_cred_store_init_duplicate_creds_ignored(self):
|
|
||||||
"""Check that with multi store instances, dup creds are ignored."""
|
|
||||||
# Create a device dictionary containing credentials for 1 switch.
|
|
||||||
dev_dict = {
|
|
||||||
('dev_id', '1.1.1.1', cisco_constants.USERNAME): 'user_1',
|
|
||||||
('dev_id', '1.1.1.1', cisco_constants.PASSWORD): 'password_1',
|
|
||||||
('dev_id', '1.1.1.1', 'host_a'): '1/1',
|
|
||||||
('dev_id', '1.1.1.1', 'host_b'): '1/2',
|
|
||||||
('dev_id', '1.1.1.1', 'host_c'): '1/3',
|
|
||||||
}
|
|
||||||
with mock.patch.object(config, 'get_device_dictionary',
|
|
||||||
return_value=dev_dict):
|
|
||||||
# Create and initialize 2 instances of credential store.
|
|
||||||
cisco_credentials_v2.Store().initialize()
|
|
||||||
cisco_credentials_v2.Store().initialize()
|
|
||||||
# There should be only 1 switch credential in the database.
|
|
||||||
self.assertEqual(len(cdb.get_all_credentials()), 1)
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,239 +0,0 @@
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import mock
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
|
|
||||||
from neutron.plugins.cisco.common import config
|
|
||||||
from neutron.plugins.cisco.db import nexus_db_v2 as nxdb
|
|
||||||
from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class CiscoNexusDbTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
"""Unit tests for cisco.db.nexus_models_v2.NexusPortBinding model."""
|
|
||||||
|
|
||||||
NpbObj = collections.namedtuple('NpbObj', 'port vlan switch instance')
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(CiscoNexusDbTest, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
self.session = db.get_session()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def _npb_test_obj(self, pnum, vnum, switch=None, instance=None):
|
|
||||||
"""Create a Nexus port binding test object from a pair of numbers."""
|
|
||||||
if pnum is 'router':
|
|
||||||
port = pnum
|
|
||||||
else:
|
|
||||||
port = '1/%s' % str(pnum)
|
|
||||||
vlan = str(vnum)
|
|
||||||
if switch is None:
|
|
||||||
switch = '10.9.8.7'
|
|
||||||
if instance is None:
|
|
||||||
instance = 'instance_%s_%s' % (str(pnum), str(vnum))
|
|
||||||
return self.NpbObj(port, vlan, switch, instance)
|
|
||||||
|
|
||||||
def _assert_equal(self, npb, npb_obj):
|
|
||||||
self.assertEqual(npb.port_id, npb_obj.port)
|
|
||||||
self.assertEqual(int(npb.vlan_id), int(npb_obj.vlan))
|
|
||||||
self.assertEqual(npb.switch_ip, npb_obj.switch)
|
|
||||||
self.assertEqual(npb.instance_id, npb_obj.instance)
|
|
||||||
|
|
||||||
def _add_to_db(self, npbs):
|
|
||||||
for npb in npbs:
|
|
||||||
nxdb.add_nexusport_binding(
|
|
||||||
npb.port, npb.vlan, npb.switch, npb.instance)
|
|
||||||
|
|
||||||
def test_nexusportbinding_add_remove(self):
|
|
||||||
npb11 = self._npb_test_obj(10, 100)
|
|
||||||
npb = nxdb.add_nexusport_binding(
|
|
||||||
npb11.port, npb11.vlan, npb11.switch, npb11.instance)
|
|
||||||
self._assert_equal(npb, npb11)
|
|
||||||
npb = nxdb.remove_nexusport_binding(
|
|
||||||
npb11.port, npb11.vlan, npb11.switch, npb11.instance)
|
|
||||||
self.assertEqual(len(npb), 1)
|
|
||||||
self._assert_equal(npb[0], npb11)
|
|
||||||
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
|
|
||||||
nxdb.remove_nexusport_binding(
|
|
||||||
npb11.port, npb11.vlan, npb11.switch, npb11.instance)
|
|
||||||
|
|
||||||
def test_nexusportbinding_get(self):
|
|
||||||
npb11 = self._npb_test_obj(10, 100)
|
|
||||||
npb21 = self._npb_test_obj(20, 100)
|
|
||||||
npb22 = self._npb_test_obj(20, 200)
|
|
||||||
self._add_to_db([npb11, npb21, npb22])
|
|
||||||
|
|
||||||
npb = nxdb.get_nexusport_binding(
|
|
||||||
npb11.port, npb11.vlan, npb11.switch, npb11.instance)
|
|
||||||
self.assertEqual(len(npb), 1)
|
|
||||||
self._assert_equal(npb[0], npb11)
|
|
||||||
npb = nxdb.get_nexusport_binding(
|
|
||||||
npb21.port, npb21.vlan, npb21.switch, npb21.instance)
|
|
||||||
self.assertEqual(len(npb), 1)
|
|
||||||
self._assert_equal(npb[0], npb21)
|
|
||||||
npb = nxdb.get_nexusport_binding(
|
|
||||||
npb22.port, npb22.vlan, npb22.switch, npb22.instance)
|
|
||||||
self.assertEqual(len(npb), 1)
|
|
||||||
self._assert_equal(npb[0], npb22)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
|
|
||||||
nxdb.get_nexusport_binding(
|
|
||||||
npb21.port, npb21.vlan, npb21.switch, "dummyInstance")
|
|
||||||
|
|
||||||
def test_nexusvlanbinding_get(self):
|
|
||||||
npb11 = self._npb_test_obj(10, 100)
|
|
||||||
npb21 = self._npb_test_obj(20, 100)
|
|
||||||
npb22 = self._npb_test_obj(20, 200)
|
|
||||||
self._add_to_db([npb11, npb21, npb22])
|
|
||||||
|
|
||||||
npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, npb11.switch)
|
|
||||||
self.assertEqual(len(npb_all_v100), 2)
|
|
||||||
npb_v200 = nxdb.get_nexusvlan_binding(npb22.vlan, npb22.switch)
|
|
||||||
self.assertEqual(len(npb_v200), 1)
|
|
||||||
self._assert_equal(npb_v200[0], npb22)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
|
|
||||||
nxdb.get_nexusvlan_binding(npb21.vlan, "dummySwitch")
|
|
||||||
|
|
||||||
def test_nexusvmbinding_get(self):
|
|
||||||
npb11 = self._npb_test_obj(10, 100)
|
|
||||||
npb21 = self._npb_test_obj(20, 100)
|
|
||||||
npb22 = self._npb_test_obj(20, 200)
|
|
||||||
self._add_to_db([npb11, npb21, npb22])
|
|
||||||
|
|
||||||
npb = nxdb.get_nexusvm_bindings(npb21.vlan, npb21.instance)[0]
|
|
||||||
self._assert_equal(npb, npb21)
|
|
||||||
npb = nxdb.get_nexusvm_bindings(npb22.vlan, npb22.instance)[0]
|
|
||||||
self._assert_equal(npb, npb22)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
|
|
||||||
nxdb.get_nexusvm_bindings(npb21.vlan, "dummyInstance")
|
|
||||||
|
|
||||||
def test_nexusportvlanswitchbinding_get(self):
|
|
||||||
npb11 = self._npb_test_obj(10, 100)
|
|
||||||
npb21 = self._npb_test_obj(20, 100)
|
|
||||||
self._add_to_db([npb11, npb21])
|
|
||||||
|
|
||||||
npb = nxdb.get_port_vlan_switch_binding(
|
|
||||||
npb11.port, npb11.vlan, npb11.switch)
|
|
||||||
self.assertEqual(len(npb), 1)
|
|
||||||
self._assert_equal(npb[0], npb11)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
|
|
||||||
nxdb.get_port_vlan_switch_binding(
|
|
||||||
npb21.port, npb21.vlan, "dummySwitch")
|
|
||||||
|
|
||||||
def test_nexusportswitchbinding_get(self):
|
|
||||||
npb11 = self._npb_test_obj(10, 100)
|
|
||||||
npb21 = self._npb_test_obj(20, 100, switch='2.2.2.2')
|
|
||||||
npb22 = self._npb_test_obj(20, 200, switch='2.2.2.2')
|
|
||||||
self._add_to_db([npb11, npb21, npb22])
|
|
||||||
|
|
||||||
npb = nxdb.get_port_switch_bindings(npb11.port, npb11.switch)
|
|
||||||
self.assertEqual(len(npb), 1)
|
|
||||||
self._assert_equal(npb[0], npb11)
|
|
||||||
npb_all_p20 = nxdb.get_port_switch_bindings(npb21.port, npb21.switch)
|
|
||||||
self.assertEqual(len(npb_all_p20), 2)
|
|
||||||
|
|
||||||
npb = nxdb.get_port_switch_bindings(npb21.port, "dummySwitch")
|
|
||||||
self.assertIsNone(npb)
|
|
||||||
|
|
||||||
def test_nexussvibinding_get(self):
|
|
||||||
npbr1 = self._npb_test_obj('router', 100)
|
|
||||||
npb21 = self._npb_test_obj(20, 100)
|
|
||||||
self._add_to_db([npbr1, npb21])
|
|
||||||
|
|
||||||
npb_svi = nxdb.get_nexussvi_bindings()
|
|
||||||
self.assertEqual(len(npb_svi), 1)
|
|
||||||
self._assert_equal(npb_svi[0], npbr1)
|
|
||||||
|
|
||||||
npbr2 = self._npb_test_obj('router', 200)
|
|
||||||
self._add_to_db([npbr2])
|
|
||||||
npb_svi = nxdb.get_nexussvi_bindings()
|
|
||||||
self.assertEqual(len(npb_svi), 2)
|
|
||||||
|
|
||||||
def test_nexussviswitch_find(self):
|
|
||||||
"""Test Nexus switch selection for SVI placement."""
|
|
||||||
# Configure 2 Nexus switches
|
|
||||||
nexus_switches = {
|
|
||||||
('1.1.1.1', 'username'): 'admin',
|
|
||||||
('1.1.1.1', 'password'): 'password1',
|
|
||||||
('1.1.1.1', 'host1'): '1/1',
|
|
||||||
('2.2.2.2', 'username'): 'admin',
|
|
||||||
('2.2.2.2', 'password'): 'password2',
|
|
||||||
('2.2.2.2', 'host2'): '1/1',
|
|
||||||
}
|
|
||||||
nexus_plugin = cisco_nexus_plugin_v2.NexusPlugin()
|
|
||||||
nexus_plugin._client = mock.Mock()
|
|
||||||
nexus_plugin._client.nexus_switches = nexus_switches
|
|
||||||
|
|
||||||
# Set the Cisco config module's first configured device IP address
|
|
||||||
# according to the preceding switch config
|
|
||||||
with mock.patch.object(config, 'first_device_ip', new='1.1.1.1'):
|
|
||||||
|
|
||||||
# Enable round-robin mode with no SVIs configured on any of the
|
|
||||||
# Nexus switches (i.e. no entries in the SVI database). The
|
|
||||||
# plugin should select the first switch in the configuration.
|
|
||||||
config.CONF.set_override('svi_round_robin', True, 'CISCO')
|
|
||||||
switch_ip = nexus_plugin._find_switch_for_svi()
|
|
||||||
self.assertEqual(switch_ip, '1.1.1.1')
|
|
||||||
|
|
||||||
# Keep round-robin mode enabled, and add entries to the SVI
|
|
||||||
# database. The plugin should select the switch with the least
|
|
||||||
# number of entries in the SVI database.
|
|
||||||
vlan = 100
|
|
||||||
npbr11 = self._npb_test_obj('router', vlan, switch='1.1.1.1',
|
|
||||||
instance='instance11')
|
|
||||||
npbr12 = self._npb_test_obj('router', vlan, switch='1.1.1.1',
|
|
||||||
instance='instance12')
|
|
||||||
npbr21 = self._npb_test_obj('router', vlan, switch='2.2.2.2',
|
|
||||||
instance='instance21')
|
|
||||||
self._add_to_db([npbr11, npbr12, npbr21])
|
|
||||||
switch_ip = nexus_plugin._find_switch_for_svi()
|
|
||||||
self.assertEqual(switch_ip, '2.2.2.2')
|
|
||||||
|
|
||||||
# Disable round-robin mode. The plugin should select the
|
|
||||||
# first switch in the configuration.
|
|
||||||
config.CONF.clear_override('svi_round_robin', 'CISCO')
|
|
||||||
switch_ip = nexus_plugin._find_switch_for_svi()
|
|
||||||
self.assertEqual(switch_ip, '1.1.1.1')
|
|
||||||
|
|
||||||
def test_nexusbinding_update(self):
|
|
||||||
npb11 = self._npb_test_obj(10, 100, switch='1.1.1.1', instance='test')
|
|
||||||
npb21 = self._npb_test_obj(20, 100, switch='1.1.1.1', instance='test')
|
|
||||||
self._add_to_db([npb11, npb21])
|
|
||||||
|
|
||||||
npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, '1.1.1.1')
|
|
||||||
self.assertEqual(len(npb_all_v100), 2)
|
|
||||||
|
|
||||||
npb22 = self._npb_test_obj(20, 200, switch='1.1.1.1', instance='test')
|
|
||||||
npb = nxdb.update_nexusport_binding(npb21.port, 200)
|
|
||||||
self._assert_equal(npb, npb22)
|
|
||||||
|
|
||||||
npb_all_v100 = nxdb.get_nexusvlan_binding(npb11.vlan, '1.1.1.1')
|
|
||||||
self.assertEqual(len(npb_all_v100), 1)
|
|
||||||
self._assert_equal(npb_all_v100[0], npb11)
|
|
||||||
|
|
||||||
npb = nxdb.update_nexusport_binding(npb21.port, 0)
|
|
||||||
self.assertIsNone(npb)
|
|
||||||
|
|
||||||
npb33 = self._npb_test_obj(30, 300, switch='1.1.1.1', instance='test')
|
|
||||||
with testtools.ExpectedException(c_exc.NexusPortBindingNotFound):
|
|
||||||
nxdb.update_nexusport_binding(npb33.port, 200)
|
|
|
@ -1,301 +0,0 @@
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.extensions import providernet as provider
|
|
||||||
from neutron.openstack.common import importutils
|
|
||||||
from neutron.plugins.cisco.common import cisco_constants as const
|
|
||||||
from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc
|
|
||||||
from neutron.plugins.cisco.common import config as cisco_config
|
|
||||||
from neutron.plugins.cisco.db import network_db_v2 as cdb
|
|
||||||
from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
NEXUS_IP_ADDRESS = '1.1.1.1'
|
|
||||||
HOSTNAME1 = 'testhost1'
|
|
||||||
HOSTNAME2 = 'testhost2'
|
|
||||||
HOSTNAME3 = 'testhost3'
|
|
||||||
INSTANCE1 = 'testvm1'
|
|
||||||
INSTANCE2 = 'testvm2'
|
|
||||||
INSTANCE3 = 'testvm3'
|
|
||||||
NEXUS_PORT1 = '1/10'
|
|
||||||
NEXUS_PORT2 = '1/20'
|
|
||||||
NEXUS_PC_IP_ADDRESS = '2.2.2.2'
|
|
||||||
NEXUS_PORTCHANNELS = 'portchannel:2'
|
|
||||||
PC_HOSTNAME = 'testpchost'
|
|
||||||
NEXUS_SSH_PORT = '22'
|
|
||||||
NEXUS_DRIVER = ('neutron.plugins.cisco.nexus.'
|
|
||||||
'cisco_nexus_network_driver_v2.CiscoNEXUSDriver')
|
|
||||||
NET_ATTRS = [const.NET_ID,
|
|
||||||
const.NET_NAME,
|
|
||||||
const.NET_VLAN_NAME,
|
|
||||||
const.NET_VLAN_ID]
|
|
||||||
|
|
||||||
|
|
||||||
class TestCiscoNexusPlugin(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
"""Set up function."""
|
|
||||||
super(TestCiscoNexusPlugin, self).setUp()
|
|
||||||
self.tenant_id = "test_tenant_cisco1"
|
|
||||||
self.net_name = "test_network_cisco1"
|
|
||||||
self.net_id = 7
|
|
||||||
self.vlan_name = "q-" + str(self.net_id) + "vlan"
|
|
||||||
self.vlan_id = 267
|
|
||||||
self.second_tenant_id = "test_tenant_2"
|
|
||||||
self.second_net_name = "test_network_cisco2"
|
|
||||||
self.second_net_id = 5
|
|
||||||
self.second_vlan_name = "q-" + str(self.second_net_id) + "vlan"
|
|
||||||
self.second_vlan_id = 265
|
|
||||||
self._pchostname = PC_HOSTNAME
|
|
||||||
|
|
||||||
self.attachment1 = {
|
|
||||||
const.TENANT_ID: self.tenant_id,
|
|
||||||
const.INSTANCE_ID: INSTANCE1,
|
|
||||||
const.HOST_NAME: HOSTNAME1,
|
|
||||||
}
|
|
||||||
self.attachment2 = {
|
|
||||||
const.TENANT_ID: self.second_tenant_id,
|
|
||||||
const.INSTANCE_ID: INSTANCE2,
|
|
||||||
const.HOST_NAME: HOSTNAME2,
|
|
||||||
}
|
|
||||||
self.attachment3 = {
|
|
||||||
const.TENANT_ID: self.second_tenant_id,
|
|
||||||
const.INSTANCE_ID: INSTANCE3,
|
|
||||||
const.HOST_NAME: HOSTNAME3,
|
|
||||||
}
|
|
||||||
self.network1 = {
|
|
||||||
const.NET_ID: self.net_id,
|
|
||||||
const.NET_NAME: self.net_name,
|
|
||||||
const.NET_VLAN_NAME: self.vlan_name,
|
|
||||||
const.NET_VLAN_ID: self.vlan_id,
|
|
||||||
}
|
|
||||||
self.network2 = {
|
|
||||||
const.NET_ID: self.second_net_id,
|
|
||||||
const.NET_NAME: self.second_net_name,
|
|
||||||
const.NET_VLAN_NAME: self.second_vlan_name,
|
|
||||||
const.NET_VLAN_ID: self.second_vlan_id,
|
|
||||||
}
|
|
||||||
self.network3 = {
|
|
||||||
const.NET_ID: 8,
|
|
||||||
const.NET_NAME: 'vpc_net',
|
|
||||||
const.NET_VLAN_NAME: 'q-268',
|
|
||||||
const.NET_VLAN_ID: '268',
|
|
||||||
}
|
|
||||||
self.delete_port_args_1 = [
|
|
||||||
self.attachment1[const.INSTANCE_ID],
|
|
||||||
self.network1[const.NET_VLAN_ID],
|
|
||||||
]
|
|
||||||
self.providernet = {
|
|
||||||
const.NET_ID: 9,
|
|
||||||
const.NET_NAME: 'pnet1',
|
|
||||||
const.NET_VLAN_NAME: 'p-300',
|
|
||||||
const.NET_VLAN_ID: 300,
|
|
||||||
provider.NETWORK_TYPE: 'vlan',
|
|
||||||
provider.PHYSICAL_NETWORK: self.net_name + '200:299',
|
|
||||||
provider.SEGMENTATION_ID: 300,
|
|
||||||
}
|
|
||||||
|
|
||||||
def new_nexus_init(self):
|
|
||||||
self._client = importutils.import_object(NEXUS_DRIVER)
|
|
||||||
self._client.nexus_switches = {
|
|
||||||
(NEXUS_IP_ADDRESS, HOSTNAME1): NEXUS_PORT1,
|
|
||||||
(NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
|
|
||||||
(NEXUS_IP_ADDRESS, HOSTNAME2): NEXUS_PORT2,
|
|
||||||
(NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
|
|
||||||
(NEXUS_PC_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
|
|
||||||
}
|
|
||||||
self._nexus_switches = {
|
|
||||||
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME1): NEXUS_PORT1,
|
|
||||||
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME2): NEXUS_PORT2,
|
|
||||||
('NEXUS_SWITCH', NEXUS_PC_IP_ADDRESS, HOSTNAME3):
|
|
||||||
NEXUS_PORTCHANNELS,
|
|
||||||
('NEXUS_SWITCH', NEXUS_PC_IP_ADDRESS, 'ssh_port'):
|
|
||||||
NEXUS_SSH_PORT,
|
|
||||||
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME3):
|
|
||||||
NEXUS_PORTCHANNELS,
|
|
||||||
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
|
|
||||||
}
|
|
||||||
self._client.credentials = {
|
|
||||||
NEXUS_IP_ADDRESS: {
|
|
||||||
'username': 'admin',
|
|
||||||
'password': 'pass1234'
|
|
||||||
},
|
|
||||||
NEXUS_PC_IP_ADDRESS: {
|
|
||||||
'username': 'admin',
|
|
||||||
'password': 'password'
|
|
||||||
},
|
|
||||||
}
|
|
||||||
db.configure_db()
|
|
||||||
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
# Use a mock netconf client
|
|
||||||
self.mock_ncclient = mock.Mock()
|
|
||||||
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch.dict('sys.modules', {'ncclient': self.mock_ncclient}),
|
|
||||||
mock.patch.object(cisco_nexus_plugin_v2.NexusPlugin,
|
|
||||||
'__init__', new=new_nexus_init)
|
|
||||||
):
|
|
||||||
self._cisco_nexus_plugin = cisco_nexus_plugin_v2.NexusPlugin()
|
|
||||||
|
|
||||||
# Set the Cisco config module's first configured device IP address
|
|
||||||
# according to the preceding switch config.
|
|
||||||
mock.patch.object(cisco_config, 'first_device_ip',
|
|
||||||
new=NEXUS_IP_ADDRESS).start()
|
|
||||||
|
|
||||||
def test_create_delete_networks(self):
|
|
||||||
"""Tests creation of two new Virtual Networks."""
|
|
||||||
new_net_dict = self._cisco_nexus_plugin.create_network(
|
|
||||||
self.network1, self.attachment1)
|
|
||||||
for attr in NET_ATTRS:
|
|
||||||
self.assertEqual(new_net_dict[attr], self.network1[attr])
|
|
||||||
|
|
||||||
expected_instance_id = self._cisco_nexus_plugin.delete_port(
|
|
||||||
INSTANCE1, self.vlan_id)
|
|
||||||
|
|
||||||
self.assertEqual(expected_instance_id, INSTANCE1)
|
|
||||||
|
|
||||||
new_net_dict = self._cisco_nexus_plugin.create_network(
|
|
||||||
self.network2, self.attachment1)
|
|
||||||
for attr in NET_ATTRS:
|
|
||||||
self.assertEqual(new_net_dict[attr], self.network2[attr])
|
|
||||||
|
|
||||||
expected_instance_id = self._cisco_nexus_plugin.delete_port(
|
|
||||||
INSTANCE1, self.second_vlan_id)
|
|
||||||
|
|
||||||
self.assertEqual(expected_instance_id, INSTANCE1)
|
|
||||||
|
|
||||||
def _create_delete_providernet(self, auto_create, auto_trunk):
|
|
||||||
cfg.CONF.set_override(
|
|
||||||
'provider_vlan_auto_create', auto_create, 'CISCO')
|
|
||||||
cfg.CONF.set_override(
|
|
||||||
'provider_vlan_auto_trunk', auto_trunk, 'CISCO')
|
|
||||||
with mock.patch.object(cdb, 'is_provider_vlan',
|
|
||||||
return_value=True) as mock_db:
|
|
||||||
# Create a provider network
|
|
||||||
new_net_dict = self._cisco_nexus_plugin.create_network(
|
|
||||||
self.providernet, self.attachment1)
|
|
||||||
self.assertEqual(mock_db.call_count, 1)
|
|
||||||
for attr in NET_ATTRS:
|
|
||||||
self.assertEqual(new_net_dict[attr], self.providernet[attr])
|
|
||||||
# Delete the provider network
|
|
||||||
instance_id = self._cisco_nexus_plugin.delete_port(
|
|
||||||
self.attachment1[const.INSTANCE_ID],
|
|
||||||
self.providernet[const.NET_VLAN_ID])
|
|
||||||
self.assertEqual(instance_id,
|
|
||||||
self.attachment1[const.INSTANCE_ID])
|
|
||||||
|
|
||||||
def test_create_delete_providernet(self):
|
|
||||||
self._create_delete_providernet(auto_create=True, auto_trunk=True)
|
|
||||||
|
|
||||||
def test_create_delete_provider_vlan_network_cfg_auto_man(self):
|
|
||||||
self._create_delete_providernet(auto_create=True, auto_trunk=False)
|
|
||||||
|
|
||||||
def test_create_delete_provider_vlan_network_cfg_man_auto(self):
|
|
||||||
self._create_delete_providernet(auto_create=False, auto_trunk=True)
|
|
||||||
|
|
||||||
def test_create_delete_provider_vlan_network_cfg_man_man(self):
|
|
||||||
self._create_delete_providernet(auto_create=False, auto_trunk=False)
|
|
||||||
|
|
||||||
def test_create_delete_network_portchannel(self):
|
|
||||||
"""Tests creation of a network over a portchannel."""
|
|
||||||
new_net_dict = self._cisco_nexus_plugin.create_network(
|
|
||||||
self.network3, self.attachment3)
|
|
||||||
self.assertEqual(new_net_dict[const.NET_ID],
|
|
||||||
self.network3[const.NET_ID])
|
|
||||||
self.assertEqual(new_net_dict[const.NET_NAME],
|
|
||||||
self.network3[const.NET_NAME])
|
|
||||||
self.assertEqual(new_net_dict[const.NET_VLAN_NAME],
|
|
||||||
self.network3[const.NET_VLAN_NAME])
|
|
||||||
self.assertEqual(new_net_dict[const.NET_VLAN_ID],
|
|
||||||
self.network3[const.NET_VLAN_ID])
|
|
||||||
|
|
||||||
self._cisco_nexus_plugin.delete_port(
|
|
||||||
INSTANCE3, self.network3[const.NET_VLAN_ID]
|
|
||||||
)
|
|
||||||
|
|
||||||
def _add_router_interface(self):
|
|
||||||
"""Add a router interface using fixed (canned) parameters."""
|
|
||||||
vlan_name = self.vlan_name
|
|
||||||
vlan_id = self.vlan_id
|
|
||||||
gateway_ip = '10.0.0.1/24'
|
|
||||||
router_id = '00000R1'
|
|
||||||
subnet_id = '00001'
|
|
||||||
return self._cisco_nexus_plugin.add_router_interface(
|
|
||||||
vlan_name, vlan_id, subnet_id, gateway_ip, router_id)
|
|
||||||
|
|
||||||
def _remove_router_interface(self):
|
|
||||||
"""Remove a router interface created with _add_router_interface."""
|
|
||||||
vlan_id = self.vlan_id
|
|
||||||
router_id = '00000R1'
|
|
||||||
return self._cisco_nexus_plugin.remove_router_interface(vlan_id,
|
|
||||||
router_id)
|
|
||||||
|
|
||||||
def test_nexus_add_remove_router_interface(self):
|
|
||||||
"""Tests addition of a router interface."""
|
|
||||||
self.assertTrue(self._add_router_interface())
|
|
||||||
self.assertEqual(self._remove_router_interface(), '00000R1')
|
|
||||||
|
|
||||||
def test_nexus_dup_add_router_interface(self):
|
|
||||||
"""Tests a duplicate add of a router interface."""
|
|
||||||
self._add_router_interface()
|
|
||||||
try:
|
|
||||||
self.assertRaises(
|
|
||||||
cisco_exc.SubnetInterfacePresent,
|
|
||||||
self._add_router_interface)
|
|
||||||
finally:
|
|
||||||
self._remove_router_interface()
|
|
||||||
|
|
||||||
def test_nexus_no_svi_switch_exception(self):
|
|
||||||
"""Tests failure to find a Nexus switch for SVI placement."""
|
|
||||||
# Clear the Nexus switches dictionary.
|
|
||||||
with mock.patch.dict(self._cisco_nexus_plugin._client.nexus_switches,
|
|
||||||
{}, clear=True):
|
|
||||||
# Clear the first Nexus IP address discovered in config
|
|
||||||
with mock.patch.object(cisco_config, 'first_device_ip',
|
|
||||||
new=None):
|
|
||||||
self.assertRaises(cisco_exc.NoNexusSviSwitch,
|
|
||||||
self._add_router_interface)
|
|
||||||
|
|
||||||
def test_nexus_add_port_after_router_interface(self):
|
|
||||||
"""Tests creating a port after a router interface.
|
|
||||||
|
|
||||||
Test creating a port after an SVI router interface has
|
|
||||||
been created. Only a trunk call should be invoked and the
|
|
||||||
plugin should not attempt to recreate the vlan.
|
|
||||||
"""
|
|
||||||
self._add_router_interface()
|
|
||||||
# Create a network on the switch
|
|
||||||
self._cisco_nexus_plugin.create_network(
|
|
||||||
self.network1, self.attachment1)
|
|
||||||
|
|
||||||
# Grab a list of all mock calls from ncclient
|
|
||||||
last_cfgs = (self.mock_ncclient.manager.connect.return_value.
|
|
||||||
edit_config.mock_calls)
|
|
||||||
|
|
||||||
# The last ncclient call should be for trunking and the second
|
|
||||||
# to last call should be creating the SVI interface
|
|
||||||
last_cfg = last_cfgs[-1][2]['config']
|
|
||||||
self.assertIn('allowed', last_cfg)
|
|
||||||
|
|
||||||
slast_cfg = last_cfgs[-2][2]['config']
|
|
||||||
self.assertIn('10.0.0.1/24', slast_cfg)
|
|
|
@ -1,63 +0,0 @@
|
||||||
# Copyright 2014 Cisco Systems, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron import context
|
|
||||||
from neutron.plugins.cisco.common import cisco_constants as const
|
|
||||||
from neutron.plugins.cisco.common import config as cisco_config
|
|
||||||
from neutron.plugins.cisco.models import virt_phy_sw_v2
|
|
||||||
from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestCiscoPluginModel(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
# Point config file to: neutron/tests/etc/neutron.conf.test
|
|
||||||
self.config_parse()
|
|
||||||
|
|
||||||
super(TestCiscoPluginModel, self).setUp()
|
|
||||||
|
|
||||||
def test_non_nexus_device_driver(self):
|
|
||||||
"""Tests handling of an non-Nexus device driver being configured."""
|
|
||||||
with mock.patch.dict(sys.modules, {'mock_driver': mock.Mock()}):
|
|
||||||
cisco_config.CONF.set_override('nexus_driver',
|
|
||||||
'mock_driver.Non_Nexus_Driver',
|
|
||||||
'CISCO')
|
|
||||||
# Plugin model instance should have is_nexus_plugin set to False
|
|
||||||
model = virt_phy_sw_v2.VirtualPhysicalSwitchModelV2()
|
|
||||||
self.assertFalse(model.is_nexus_plugin)
|
|
||||||
|
|
||||||
# Model's _invoke_nexus_for_net_create should just return False
|
|
||||||
user_id = 'user_id'
|
|
||||||
tenant_id = 'tenant_id'
|
|
||||||
ctx = context.Context(user_id, tenant_id)
|
|
||||||
self.assertFalse(model._invoke_nexus_for_net_create(
|
|
||||||
ctx, tenant_id, net_id='net_id',
|
|
||||||
instance_id='instance_id', host_id='host_id'))
|
|
||||||
|
|
||||||
def test_nexus_plugin_calls_ignored_if_plugin_not_loaded(self):
|
|
||||||
"""Verifies Nexus plugin calls are ignored if plugin is not loaded."""
|
|
||||||
cisco_config.CONF.set_override(const.NEXUS_PLUGIN,
|
|
||||||
None, 'CISCO_PLUGINS')
|
|
||||||
with mock.patch.object(cisco_nexus_plugin_v2.NexusPlugin,
|
|
||||||
'create_network') as mock_create_network:
|
|
||||||
model = virt_phy_sw_v2.VirtualPhysicalSwitchModelV2()
|
|
||||||
model._invoke_plugin_per_device(model, const.NEXUS_PLUGIN,
|
|
||||||
'create_network')
|
|
||||||
self.assertFalse(mock_create_network.called)
|
|
|
@ -1,15 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,15 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,15 +0,0 @@
|
||||||
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
|
|
||||||
#
|
|
||||||
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,291 +0,0 @@
|
||||||
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
|
|
||||||
#
|
|
||||||
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import webob.exc
|
|
||||||
|
|
||||||
from neutron.api import extensions
|
|
||||||
from neutron.common import config
|
|
||||||
from neutron import context
|
|
||||||
import neutron.extensions
|
|
||||||
from neutron.extensions import metering
|
|
||||||
from neutron.plugins.common import constants
|
|
||||||
from neutron.services.metering import metering_plugin
|
|
||||||
from neutron.tests.unit import test_db_plugin
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
DB_METERING_PLUGIN_KLASS = (
|
|
||||||
"neutron.services.metering."
|
|
||||||
"metering_plugin.MeteringPlugin"
|
|
||||||
)
|
|
||||||
|
|
||||||
extensions_path = ':'.join(neutron.extensions.__path__)
|
|
||||||
|
|
||||||
|
|
||||||
class MeteringPluginDbTestCaseMixin(object):
|
|
||||||
def _create_metering_label(self, fmt, name, description, **kwargs):
|
|
||||||
data = {'metering_label': {'name': name,
|
|
||||||
'tenant_id': kwargs.get('tenant_id',
|
|
||||||
'test_tenant'),
|
|
||||||
'description': description}}
|
|
||||||
req = self.new_create_request('metering-labels', data,
|
|
||||||
fmt)
|
|
||||||
|
|
||||||
if kwargs.get('set_context') and 'tenant_id' in kwargs:
|
|
||||||
# create a specific auth context for this request
|
|
||||||
req.environ['neutron.context'] = (
|
|
||||||
context.Context('', kwargs['tenant_id'],
|
|
||||||
is_admin=kwargs.get('is_admin', True)))
|
|
||||||
|
|
||||||
return req.get_response(self.ext_api)
|
|
||||||
|
|
||||||
def _make_metering_label(self, fmt, name, description, **kwargs):
|
|
||||||
res = self._create_metering_label(fmt, name, description, **kwargs)
|
|
||||||
if res.status_int >= 400:
|
|
||||||
raise webob.exc.HTTPClientError(code=res.status_int)
|
|
||||||
return self.deserialize(fmt, res)
|
|
||||||
|
|
||||||
def _create_metering_label_rule(self, fmt, metering_label_id, direction,
|
|
||||||
remote_ip_prefix, excluded, **kwargs):
|
|
||||||
data = {'metering_label_rule':
|
|
||||||
{'metering_label_id': metering_label_id,
|
|
||||||
'tenant_id': kwargs.get('tenant_id', 'test_tenant'),
|
|
||||||
'direction': direction,
|
|
||||||
'excluded': excluded,
|
|
||||||
'remote_ip_prefix': remote_ip_prefix}}
|
|
||||||
req = self.new_create_request('metering-label-rules',
|
|
||||||
data, fmt)
|
|
||||||
|
|
||||||
if kwargs.get('set_context') and 'tenant_id' in kwargs:
|
|
||||||
# create a specific auth context for this request
|
|
||||||
req.environ['neutron.context'] = (
|
|
||||||
context.Context('', kwargs['tenant_id']))
|
|
||||||
|
|
||||||
return req.get_response(self.ext_api)
|
|
||||||
|
|
||||||
def _make_metering_label_rule(self, fmt, metering_label_id, direction,
|
|
||||||
remote_ip_prefix, excluded, **kwargs):
|
|
||||||
res = self._create_metering_label_rule(fmt, metering_label_id,
|
|
||||||
direction, remote_ip_prefix,
|
|
||||||
excluded, **kwargs)
|
|
||||||
if res.status_int >= 400:
|
|
||||||
raise webob.exc.HTTPClientError(code=res.status_int)
|
|
||||||
return self.deserialize(fmt, res)
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def metering_label(self, name='label', description='desc',
|
|
||||||
fmt=None, no_delete=False, **kwargs):
|
|
||||||
if not fmt:
|
|
||||||
fmt = self.fmt
|
|
||||||
metering_label = self._make_metering_label(fmt, name,
|
|
||||||
description, **kwargs)
|
|
||||||
yield metering_label
|
|
||||||
if not no_delete:
|
|
||||||
self._delete('metering-labels',
|
|
||||||
metering_label['metering_label']['id'])
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def metering_label_rule(self, metering_label_id=None, direction='ingress',
|
|
||||||
remote_ip_prefix='10.0.0.0/24',
|
|
||||||
excluded='false', fmt=None, no_delete=False):
|
|
||||||
if not fmt:
|
|
||||||
fmt = self.fmt
|
|
||||||
metering_label_rule = self._make_metering_label_rule(fmt,
|
|
||||||
metering_label_id,
|
|
||||||
direction,
|
|
||||||
remote_ip_prefix,
|
|
||||||
excluded)
|
|
||||||
yield metering_label_rule
|
|
||||||
if not no_delete:
|
|
||||||
self._delete('metering-label-rules',
|
|
||||||
metering_label_rule['metering_label_rule']['id'])
|
|
||||||
|
|
||||||
|
|
||||||
class MeteringPluginDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase,
|
|
||||||
MeteringPluginDbTestCaseMixin):
|
|
||||||
fmt = 'json'
|
|
||||||
|
|
||||||
resource_prefix_map = dict(
|
|
||||||
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
|
|
||||||
for k in metering.RESOURCE_ATTRIBUTE_MAP.keys()
|
|
||||||
)
|
|
||||||
|
|
||||||
def setUp(self, plugin=None):
|
|
||||||
service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS}
|
|
||||||
|
|
||||||
super(MeteringPluginDbTestCase, self).setUp(
|
|
||||||
plugin=plugin,
|
|
||||||
service_plugins=service_plugins
|
|
||||||
)
|
|
||||||
|
|
||||||
self.plugin = metering_plugin.MeteringPlugin()
|
|
||||||
ext_mgr = extensions.PluginAwareExtensionManager(
|
|
||||||
extensions_path,
|
|
||||||
{constants.METERING: self.plugin}
|
|
||||||
)
|
|
||||||
app = config.load_paste_app('extensions_test_app')
|
|
||||||
self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
|
|
||||||
|
|
||||||
def test_create_metering_label(self):
|
|
||||||
name = 'my label'
|
|
||||||
description = 'my metering label'
|
|
||||||
keys = [('name', name,), ('description', description)]
|
|
||||||
with self.metering_label(name, description) as metering_label:
|
|
||||||
for k, v, in keys:
|
|
||||||
self.assertEqual(metering_label['metering_label'][k], v)
|
|
||||||
|
|
||||||
def test_delete_metering_label(self):
|
|
||||||
name = 'my label'
|
|
||||||
description = 'my metering label'
|
|
||||||
|
|
||||||
with self.metering_label(name, description,
|
|
||||||
no_delete=True) as metering_label:
|
|
||||||
metering_label_id = metering_label['metering_label']['id']
|
|
||||||
self._delete('metering-labels', metering_label_id, 204)
|
|
||||||
|
|
||||||
def test_list_metering_label(self):
|
|
||||||
name = 'my label'
|
|
||||||
description = 'my metering label'
|
|
||||||
|
|
||||||
with contextlib.nested(
|
|
||||||
self.metering_label(name, description),
|
|
||||||
self.metering_label(name, description)) as metering_label:
|
|
||||||
|
|
||||||
self._test_list_resources('metering-label', metering_label)
|
|
||||||
|
|
||||||
def test_create_metering_label_rule(self):
|
|
||||||
name = 'my label'
|
|
||||||
description = 'my metering label'
|
|
||||||
|
|
||||||
with self.metering_label(name, description) as metering_label:
|
|
||||||
metering_label_id = metering_label['metering_label']['id']
|
|
||||||
|
|
||||||
direction = 'egress'
|
|
||||||
remote_ip_prefix = '192.168.0.0/24'
|
|
||||||
excluded = True
|
|
||||||
|
|
||||||
keys = [('metering_label_id', metering_label_id),
|
|
||||||
('direction', direction),
|
|
||||||
('excluded', excluded),
|
|
||||||
('remote_ip_prefix', remote_ip_prefix)]
|
|
||||||
with self.metering_label_rule(metering_label_id,
|
|
||||||
direction,
|
|
||||||
remote_ip_prefix,
|
|
||||||
excluded) as label_rule:
|
|
||||||
for k, v, in keys:
|
|
||||||
self.assertEqual(label_rule['metering_label_rule'][k], v)
|
|
||||||
|
|
||||||
def test_delete_metering_label_rule(self):
|
|
||||||
name = 'my label'
|
|
||||||
description = 'my metering label'
|
|
||||||
|
|
||||||
with self.metering_label(name, description) as metering_label:
|
|
||||||
metering_label_id = metering_label['metering_label']['id']
|
|
||||||
|
|
||||||
direction = 'egress'
|
|
||||||
remote_ip_prefix = '192.168.0.0/24'
|
|
||||||
excluded = True
|
|
||||||
|
|
||||||
with self.metering_label_rule(metering_label_id,
|
|
||||||
direction,
|
|
||||||
remote_ip_prefix,
|
|
||||||
excluded,
|
|
||||||
no_delete=True) as label_rule:
|
|
||||||
rule_id = label_rule['metering_label_rule']['id']
|
|
||||||
self._delete('metering-label-rules', rule_id, 204)
|
|
||||||
|
|
||||||
def test_list_metering_label_rule(self):
|
|
||||||
name = 'my label'
|
|
||||||
description = 'my metering label'
|
|
||||||
|
|
||||||
with self.metering_label(name, description) as metering_label:
|
|
||||||
metering_label_id = metering_label['metering_label']['id']
|
|
||||||
|
|
||||||
direction = 'egress'
|
|
||||||
remote_ip_prefix = '192.168.0.0/24'
|
|
||||||
excluded = True
|
|
||||||
|
|
||||||
with contextlib.nested(
|
|
||||||
self.metering_label_rule(metering_label_id,
|
|
||||||
direction,
|
|
||||||
remote_ip_prefix,
|
|
||||||
excluded),
|
|
||||||
self.metering_label_rule(metering_label_id,
|
|
||||||
'ingress',
|
|
||||||
remote_ip_prefix,
|
|
||||||
excluded)) as metering_label_rule:
|
|
||||||
|
|
||||||
self._test_list_resources('metering-label-rule',
|
|
||||||
metering_label_rule)
|
|
||||||
|
|
||||||
def test_create_metering_label_rules(self):
|
|
||||||
name = 'my label'
|
|
||||||
description = 'my metering label'
|
|
||||||
|
|
||||||
with self.metering_label(name, description) as metering_label:
|
|
||||||
metering_label_id = metering_label['metering_label']['id']
|
|
||||||
|
|
||||||
direction = 'egress'
|
|
||||||
remote_ip_prefix = '192.168.0.0/24'
|
|
||||||
excluded = True
|
|
||||||
|
|
||||||
with contextlib.nested(
|
|
||||||
self.metering_label_rule(metering_label_id,
|
|
||||||
direction,
|
|
||||||
remote_ip_prefix,
|
|
||||||
excluded),
|
|
||||||
self.metering_label_rule(metering_label_id,
|
|
||||||
direction,
|
|
||||||
'0.0.0.0/0',
|
|
||||||
False)) as metering_label_rule:
|
|
||||||
|
|
||||||
self._test_list_resources('metering-label-rule',
|
|
||||||
metering_label_rule)
|
|
||||||
|
|
||||||
def test_create_metering_label_rule_two_labels(self):
|
|
||||||
name1 = 'my label 1'
|
|
||||||
name2 = 'my label 2'
|
|
||||||
description = 'my metering label'
|
|
||||||
|
|
||||||
with self.metering_label(name1, description) as metering_label1:
|
|
||||||
metering_label_id1 = metering_label1['metering_label']['id']
|
|
||||||
|
|
||||||
with self.metering_label(name2, description) as metering_label2:
|
|
||||||
metering_label_id2 = metering_label2['metering_label']['id']
|
|
||||||
|
|
||||||
direction = 'egress'
|
|
||||||
remote_ip_prefix = '192.168.0.0/24'
|
|
||||||
excluded = True
|
|
||||||
|
|
||||||
with contextlib.nested(
|
|
||||||
self.metering_label_rule(metering_label_id1,
|
|
||||||
direction,
|
|
||||||
remote_ip_prefix,
|
|
||||||
excluded),
|
|
||||||
self.metering_label_rule(metering_label_id2,
|
|
||||||
direction,
|
|
||||||
remote_ip_prefix,
|
|
||||||
excluded)) as metering_label_rule:
|
|
||||||
|
|
||||||
self._test_list_resources('metering-label-rule',
|
|
||||||
metering_label_rule)
|
|
||||||
|
|
||||||
|
|
||||||
class TestMeteringDbXML(MeteringPluginDbTestCase):
|
|
||||||
fmt = 'xml'
|
|
|
@ -1,86 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# Copyright (c) 2013 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron import context
|
|
||||||
from neutron.db import agents_db
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.db import db_base_plugin_v2 as base_plugin
|
|
||||||
from neutron.openstack.common.db import exception as exc
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class FakePlugin(base_plugin.NeutronDbPluginV2, agents_db.AgentDbMixin):
|
|
||||||
"""A fake plugin class containing all DB methods."""
|
|
||||||
|
|
||||||
|
|
||||||
class TestAgentsDbMixin(base.BaseTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestAgentsDbMixin, self).setUp()
|
|
||||||
|
|
||||||
self.context = context.get_admin_context()
|
|
||||||
self.plugin = FakePlugin()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
self.agent_status = {
|
|
||||||
'agent_type': 'Open vSwitch agent',
|
|
||||||
'binary': 'neutron-openvswitch-agent',
|
|
||||||
'host': 'overcloud-notcompute',
|
|
||||||
'topic': 'N/A'
|
|
||||||
}
|
|
||||||
|
|
||||||
def _assert_ref_fields_are_equal(self, reference, result):
|
|
||||||
"""Compare (key, value) pairs of a reference dict with the result
|
|
||||||
|
|
||||||
Note: the result MAY have additional keys
|
|
||||||
"""
|
|
||||||
|
|
||||||
for field, value in reference.items():
|
|
||||||
self.assertEqual(value, result[field], field)
|
|
||||||
|
|
||||||
def test_create_or_update_agent_new_entry(self):
|
|
||||||
self.plugin.create_or_update_agent(self.context, self.agent_status)
|
|
||||||
|
|
||||||
agent = self.plugin.get_agents(self.context)[0]
|
|
||||||
self._assert_ref_fields_are_equal(self.agent_status, agent)
|
|
||||||
|
|
||||||
def test_create_or_update_agent_existing_entry(self):
|
|
||||||
self.plugin.create_or_update_agent(self.context, self.agent_status)
|
|
||||||
self.plugin.create_or_update_agent(self.context, self.agent_status)
|
|
||||||
self.plugin.create_or_update_agent(self.context, self.agent_status)
|
|
||||||
|
|
||||||
agents = self.plugin.get_agents(self.context)
|
|
||||||
self.assertEqual(len(agents), 1)
|
|
||||||
|
|
||||||
agent = agents[0]
|
|
||||||
self._assert_ref_fields_are_equal(self.agent_status, agent)
|
|
||||||
|
|
||||||
def test_create_or_update_agent_concurrent_insert(self):
|
|
||||||
# NOTE(rpodolyaka): emulate violation of the unique constraint caused
|
|
||||||
# by a concurrent insert. Ensure we make another
|
|
||||||
# attempt on fail
|
|
||||||
with mock.patch('sqlalchemy.orm.Session.add') as add_mock:
|
|
||||||
add_mock.side_effect = [
|
|
||||||
exc.DBDuplicateEntry(columns=['agent_type', 'host']),
|
|
||||||
None
|
|
||||||
]
|
|
||||||
|
|
||||||
self.plugin.create_or_update_agent(self.context, self.agent_status)
|
|
||||||
|
|
||||||
self.assertEqual(add_mock.call_count, 2,
|
|
||||||
"Agent entry creation hasn't been retried")
|
|
|
@ -1,143 +0,0 @@
|
||||||
# Copyright (c) 2014 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
# @author: Sergio Cazzolato, Intel
|
|
||||||
|
|
||||||
from neutron.common import exceptions
|
|
||||||
from neutron import context
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.db import db_base_plugin_v2 as base_plugin
|
|
||||||
from neutron.db import quota_db
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class FakePlugin(base_plugin.NeutronDbPluginV2, quota_db.DbQuotaDriver):
|
|
||||||
"""A fake plugin class containing all DB methods."""
|
|
||||||
|
|
||||||
|
|
||||||
class TestResource(object):
|
|
||||||
"""Describe a test resource for quota checking."""
|
|
||||||
|
|
||||||
def __init__(self, name, default):
|
|
||||||
self.name = name
|
|
||||||
self.quota = default
|
|
||||||
|
|
||||||
@property
|
|
||||||
def default(self):
|
|
||||||
return self.quota
|
|
||||||
|
|
||||||
PROJECT = 'prj_test'
|
|
||||||
RESOURCE = 'res_test'
|
|
||||||
|
|
||||||
|
|
||||||
class TestDbQuotaDriver(base.BaseTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestDbQuotaDriver, self).setUp()
|
|
||||||
self.plugin = FakePlugin()
|
|
||||||
self.context = context.get_admin_context()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def test_create_quota_limit(self):
|
|
||||||
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
|
|
||||||
quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
|
|
||||||
self.assertEqual(2, quotas[RESOURCE])
|
|
||||||
|
|
||||||
def test_update_quota_limit(self):
|
|
||||||
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 3)
|
|
||||||
quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
|
|
||||||
self.assertEqual(3, quotas[RESOURCE])
|
|
||||||
|
|
||||||
def test_delete_tenant_quota_restores_default_limit(self):
|
|
||||||
defaults = {RESOURCE: TestResource(RESOURCE, 4)}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
|
|
||||||
self.plugin.delete_tenant_quota(self.context, PROJECT)
|
|
||||||
quotas = self.plugin.get_tenant_quotas(self.context, defaults, PROJECT)
|
|
||||||
self.assertEqual(4, quotas[RESOURCE])
|
|
||||||
|
|
||||||
def test_get_all_quotas(self):
|
|
||||||
project_1 = 'prj_test_1'
|
|
||||||
project_2 = 'prj_test_2'
|
|
||||||
resource_1 = 'res_test_1'
|
|
||||||
resource_2 = 'res_test_2'
|
|
||||||
|
|
||||||
resources = {resource_1: TestResource(resource_1, 1),
|
|
||||||
resource_2: TestResource(resource_2, 1)}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, project_1, resource_1, 2)
|
|
||||||
self.plugin.update_quota_limit(self.context, project_2, resource_2, 2)
|
|
||||||
quotas = self.plugin.get_all_quotas(self.context, resources)
|
|
||||||
|
|
||||||
self.assertEqual(2, len(quotas))
|
|
||||||
|
|
||||||
self.assertEqual(3, len(quotas[0]))
|
|
||||||
self.assertEqual(project_1, quotas[0]['tenant_id'])
|
|
||||||
self.assertEqual(2, quotas[0][resource_1])
|
|
||||||
self.assertEqual(1, quotas[0][resource_2])
|
|
||||||
|
|
||||||
self.assertEqual(3, len(quotas[1]))
|
|
||||||
self.assertEqual(project_2, quotas[1]['tenant_id'])
|
|
||||||
self.assertEqual(1, quotas[1][resource_1])
|
|
||||||
self.assertEqual(2, quotas[1][resource_2])
|
|
||||||
|
|
||||||
def test_limit_check(self):
|
|
||||||
resources = {RESOURCE: TestResource(RESOURCE, 2)}
|
|
||||||
values = {RESOURCE: 1}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
|
|
||||||
self.plugin.limit_check(self.context, PROJECT, resources, values)
|
|
||||||
|
|
||||||
def test_limit_check_over_quota(self):
|
|
||||||
resources = {RESOURCE: TestResource(RESOURCE, 2)}
|
|
||||||
values = {RESOURCE: 3}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
|
|
||||||
|
|
||||||
self.assertRaises(exceptions.OverQuota, self.plugin.limit_check,
|
|
||||||
context.get_admin_context(), PROJECT, resources,
|
|
||||||
values)
|
|
||||||
|
|
||||||
def test_limit_check_equals_to_quota(self):
|
|
||||||
resources = {RESOURCE: TestResource(RESOURCE, 2)}
|
|
||||||
values = {RESOURCE: 2}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
|
|
||||||
self.plugin.limit_check(self.context, PROJECT, resources, values)
|
|
||||||
|
|
||||||
def test_limit_check_value_lower_than_zero(self):
|
|
||||||
resources = {RESOURCE: TestResource(RESOURCE, 2)}
|
|
||||||
values = {RESOURCE: -1}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, RESOURCE, 2)
|
|
||||||
self.assertRaises(exceptions.InvalidQuotaValue,
|
|
||||||
self.plugin.limit_check, context.get_admin_context(),
|
|
||||||
PROJECT, resources, values)
|
|
||||||
|
|
||||||
def test_limit_check_wrong_values_size(self):
|
|
||||||
resource_1 = 'res_test_1'
|
|
||||||
resource_2 = 'res_test_2'
|
|
||||||
|
|
||||||
resources = {resource_1: TestResource(resource_1, 2)}
|
|
||||||
values = {resource_1: 1, resource_2: 1}
|
|
||||||
|
|
||||||
self.plugin.update_quota_limit(self.context, PROJECT, resource_1, 2)
|
|
||||||
self.assertRaises(exceptions.QuotaResourceUnknown,
|
|
||||||
self.plugin.limit_check, context.get_admin_context(),
|
|
||||||
PROJECT, resources, values)
|
|
|
@ -1,17 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
# @author: Swaminathan Vasudevan, Hewlett-Packard.
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,139 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from neutron.api import extensions
|
|
||||||
from neutron.api.v2 import base
|
|
||||||
from neutron.common import exceptions
|
|
||||||
from neutron.db import servicetype_db
|
|
||||||
from neutron.extensions import servicetype
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.openstack.common import uuidutils
|
|
||||||
from neutron.plugins.common import constants
|
|
||||||
from neutron.services import service_base
|
|
||||||
|
|
||||||
|
|
||||||
DUMMY_PLUGIN_NAME = "dummy_plugin"
|
|
||||||
RESOURCE_NAME = "dummy"
|
|
||||||
COLLECTION_NAME = "%ss" % RESOURCE_NAME
|
|
||||||
|
|
||||||
# Attribute Map for dummy resource
|
|
||||||
RESOURCE_ATTRIBUTE_MAP = {
|
|
||||||
COLLECTION_NAME: {
|
|
||||||
'id': {'allow_post': False, 'allow_put': False,
|
|
||||||
'validate': {'type:uuid': None},
|
|
||||||
'is_visible': True},
|
|
||||||
'name': {'allow_post': True, 'allow_put': True,
|
|
||||||
'validate': {'type:string': None},
|
|
||||||
'is_visible': True, 'default': ''},
|
|
||||||
'tenant_id': {'allow_post': True, 'allow_put': False,
|
|
||||||
'required_by_policy': True,
|
|
||||||
'is_visible': True},
|
|
||||||
'service_type': {'allow_post': True,
|
|
||||||
'allow_put': False,
|
|
||||||
'validate': {'type:servicetype_ref': None},
|
|
||||||
'is_visible': True,
|
|
||||||
'default': None}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class Dummy(object):
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_name(cls):
|
|
||||||
return "dummy"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_alias(cls):
|
|
||||||
return "dummy"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_description(cls):
|
|
||||||
return "Dummy stuff"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_namespace(cls):
|
|
||||||
return "http://docs.openstack.org/ext/neutron/dummy/api/v1.0"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_updated(cls):
|
|
||||||
return "2012-11-20T10:00:00-00:00"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_resources(cls):
|
|
||||||
"""Returns Extended Resource for dummy management."""
|
|
||||||
q_mgr = manager.NeutronManager.get_instance()
|
|
||||||
dummy_inst = q_mgr.get_service_plugins()['DUMMY']
|
|
||||||
controller = base.create_resource(
|
|
||||||
COLLECTION_NAME, RESOURCE_NAME, dummy_inst,
|
|
||||||
RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME])
|
|
||||||
return [extensions.ResourceExtension(COLLECTION_NAME,
|
|
||||||
controller)]
|
|
||||||
|
|
||||||
|
|
||||||
class DummyServicePlugin(service_base.ServicePluginBase):
|
|
||||||
"""This is a simple plugin for managing instantes of a fictional 'dummy'
|
|
||||||
service. This plugin is provided as a proof-of-concept of how
|
|
||||||
advanced service might leverage the service type extension.
|
|
||||||
Ideally, instances of real advanced services, such as load balancing
|
|
||||||
or VPN will adopt a similar solution.
|
|
||||||
"""
|
|
||||||
|
|
||||||
supported_extension_aliases = ['dummy', servicetype.EXT_ALIAS]
|
|
||||||
agent_notifiers = {'dummy': 'dummy_agent_notifier'}
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.svctype_mgr = servicetype_db.ServiceTypeManager.get_instance()
|
|
||||||
self.dummys = {}
|
|
||||||
|
|
||||||
def get_plugin_type(self):
|
|
||||||
return constants.DUMMY
|
|
||||||
|
|
||||||
def get_plugin_name(self):
|
|
||||||
return DUMMY_PLUGIN_NAME
|
|
||||||
|
|
||||||
def get_plugin_description(self):
|
|
||||||
return "Neutron Dummy Service Plugin"
|
|
||||||
|
|
||||||
def get_dummys(self, context, filters, fields):
|
|
||||||
return self.dummys.values()
|
|
||||||
|
|
||||||
def get_dummy(self, context, id, fields):
|
|
||||||
try:
|
|
||||||
return self.dummys[id]
|
|
||||||
except KeyError:
|
|
||||||
raise exceptions.NotFound()
|
|
||||||
|
|
||||||
def create_dummy(self, context, dummy):
|
|
||||||
d = dummy['dummy']
|
|
||||||
d['id'] = uuidutils.generate_uuid()
|
|
||||||
self.dummys[d['id']] = d
|
|
||||||
self.svctype_mgr.increase_service_type_refcount(context,
|
|
||||||
d['service_type'])
|
|
||||||
return d
|
|
||||||
|
|
||||||
def update_dummy(self, context, id, dummy):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def delete_dummy(self, context, id):
|
|
||||||
try:
|
|
||||||
svc_type_id = self.dummys[id]['service_type']
|
|
||||||
del self.dummys[id]
|
|
||||||
self.svctype_mgr.decrease_service_type_refcount(context,
|
|
||||||
svc_type_id)
|
|
||||||
except KeyError:
|
|
||||||
raise exceptions.NotFound()
|
|
|
@ -1,18 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Embrane, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Ivar Lazzaro, Embrane, Inc.
|
|
|
@ -1,31 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Embrane, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Ivar Lazzaro, Embrane, Inc.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.plugins.embrane.common import config # noqa
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class ConfigurationTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_defaults(self):
|
|
||||||
self.assertEqual('admin', cfg.CONF.heleos.admin_username)
|
|
||||||
self.assertEqual('default', cfg.CONF.heleos.resource_pool_id)
|
|
||||||
self.assertTrue(cfg.CONF.heleos.async_requests)
|
|
|
@ -1,41 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Embrane, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Ivar Lazzaro, Embrane, Inc.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.plugins.embrane.common import config # noqa
|
|
||||||
from neutron.tests.unit import test_extension_extraroute as extraroute_test
|
|
||||||
from neutron.tests.unit import test_l3_plugin as router_test
|
|
||||||
|
|
||||||
PLUGIN_NAME = ('neutron.plugins.embrane.plugins.embrane_fake_plugin.'
|
|
||||||
'EmbraneFakePlugin')
|
|
||||||
|
|
||||||
|
|
||||||
class TestEmbraneL3NatDBTestCase(router_test.L3NatDBIntTestCase):
|
|
||||||
_plugin_name = PLUGIN_NAME
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
cfg.CONF.set_override('admin_password', "admin123", 'heleos')
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
super(TestEmbraneL3NatDBTestCase, self).setUp()
|
|
||||||
|
|
||||||
|
|
||||||
class ExtraRouteDBTestCase(extraroute_test.ExtraRouteDBIntTestCase):
|
|
||||||
_plugin_name = PLUGIN_NAME
|
|
|
@ -1,82 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Embrane, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Ivar Lazzaro, Embrane, Inc.
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.plugins.embrane.common import config # noqa
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
|
|
||||||
PLUGIN_NAME = ('neutron.plugins.embrane.plugins.embrane_fake_plugin.'
|
|
||||||
'EmbraneFakePlugin')
|
|
||||||
|
|
||||||
|
|
||||||
class EmbranePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
_plugin_name = PLUGIN_NAME
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
cfg.CONF.set_override('admin_password', "admin123", 'heleos')
|
|
||||||
p = mock.patch.dict(sys.modules, {'heleosapi': mock.Mock()})
|
|
||||||
p.start()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
# dict patches must be explicitly stopped
|
|
||||||
self.addCleanup(p.stop)
|
|
||||||
super(EmbranePluginV2TestCase, self).setUp(self._plugin_name)
|
|
||||||
|
|
||||||
|
|
||||||
class TestEmbraneBasicGet(test_plugin.TestBasicGet, EmbranePluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestEmbraneV2HTTPResponse(test_plugin.TestV2HTTPResponse,
|
|
||||||
EmbranePluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestEmbranePortsV2(test_plugin.TestPortsV2, EmbranePluginV2TestCase):
|
|
||||||
|
|
||||||
def test_create_ports_bulk_emulated_plugin_failure(self):
|
|
||||||
self.skip("Temporary skipping due to incompatibility with the"
|
|
||||||
" plugin dynamic class type")
|
|
||||||
|
|
||||||
def test_recycle_expired_previously_run_within_context(self):
|
|
||||||
self.skip("Temporary skipping due to incompatibility with the"
|
|
||||||
" plugin dynamic class type")
|
|
||||||
|
|
||||||
def test_recycle_held_ip_address(self):
|
|
||||||
self.skip("Temporary skipping due to incompatibility with the"
|
|
||||||
" plugin dynamic class type")
|
|
||||||
|
|
||||||
|
|
||||||
class TestEmbraneNetworksV2(test_plugin.TestNetworksV2,
|
|
||||||
EmbranePluginV2TestCase):
|
|
||||||
|
|
||||||
def test_create_networks_bulk_emulated_plugin_failure(self):
|
|
||||||
self.skip("Temporary skipping due to incompatibility with the"
|
|
||||||
" plugin dynamic class type")
|
|
||||||
|
|
||||||
|
|
||||||
class TestEmbraneSubnetsV2(test_plugin.TestSubnetsV2,
|
|
||||||
EmbranePluginV2TestCase):
|
|
||||||
|
|
||||||
def test_create_subnets_bulk_emulated_plugin_failure(self):
|
|
||||||
self.skip("Temporary skipping due to incompatibility with the"
|
|
||||||
" plugin dynamic class type")
|
|
|
@ -1,16 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cloudbase Solutions SRL
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,221 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cloudbase Solutions SRL
|
|
||||||
# Copyright 2013 Pedro Navarro Perez
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit tests for Windows Hyper-V virtual switch neutron driver
|
|
||||||
"""
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.plugins.hyperv.agent import hyperv_neutron_agent
|
|
||||||
from neutron.plugins.hyperv.agent import utilsfactory
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
cfg.CONF.import_opt('enable_metrics_collection',
|
|
||||||
'neutron.plugins.hyperv.agent.hyperv_neutron_agent',
|
|
||||||
'AGENT')
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVNeutronAgent(base.BaseTestCase):
|
|
||||||
|
|
||||||
_FAKE_PORT_ID = 'fake_port_id'
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestHyperVNeutronAgent, self).setUp()
|
|
||||||
# Avoid rpc initialization for unit tests
|
|
||||||
cfg.CONF.set_override('rpc_backend',
|
|
||||||
'neutron.openstack.common.rpc.impl_fake')
|
|
||||||
|
|
||||||
utilsfactory._get_windows_version = mock.MagicMock(
|
|
||||||
return_value='6.2.0')
|
|
||||||
|
|
||||||
class MockFixedIntervalLoopingCall(object):
|
|
||||||
def __init__(self, f):
|
|
||||||
self.f = f
|
|
||||||
|
|
||||||
def start(self, interval=0):
|
|
||||||
self.f()
|
|
||||||
|
|
||||||
mock.patch('neutron.openstack.common.loopingcall.'
|
|
||||||
'FixedIntervalLoopingCall',
|
|
||||||
new=MockFixedIntervalLoopingCall).start()
|
|
||||||
cfg.CONF.set_default('firewall_driver',
|
|
||||||
'neutron.agent.firewall.NoopFirewallDriver',
|
|
||||||
group='SECURITYGROUP')
|
|
||||||
self.agent = hyperv_neutron_agent.HyperVNeutronAgent()
|
|
||||||
self.agent.plugin_rpc = mock.Mock()
|
|
||||||
self.agent.sec_groups_agent = mock.MagicMock()
|
|
||||||
self.agent.context = mock.Mock()
|
|
||||||
self.agent.agent_id = mock.Mock()
|
|
||||||
|
|
||||||
fake_agent_state = {
|
|
||||||
'binary': 'neutron-hyperv-agent',
|
|
||||||
'host': 'fake_host_name',
|
|
||||||
'topic': 'N/A',
|
|
||||||
'configurations': {'vswitch_mappings': ['*:MyVirtualSwitch']},
|
|
||||||
'agent_type': 'HyperV agent',
|
|
||||||
'start_flag': True}
|
|
||||||
self.agent_state = fake_agent_state
|
|
||||||
|
|
||||||
def test_port_bound_enable_metrics(self):
|
|
||||||
cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT')
|
|
||||||
self._test_port_bound(True)
|
|
||||||
|
|
||||||
def test_port_bound_no_metrics(self):
|
|
||||||
cfg.CONF.set_override('enable_metrics_collection', False, 'AGENT')
|
|
||||||
self._test_port_bound(False)
|
|
||||||
|
|
||||||
def _test_port_bound(self, enable_metrics):
|
|
||||||
port = mock.MagicMock()
|
|
||||||
mock_enable_metrics = mock.MagicMock()
|
|
||||||
net_uuid = 'my-net-uuid'
|
|
||||||
|
|
||||||
with mock.patch.multiple(
|
|
||||||
self.agent._utils,
|
|
||||||
connect_vnic_to_vswitch=mock.MagicMock(),
|
|
||||||
set_vswitch_port_vlan_id=mock.MagicMock(),
|
|
||||||
enable_port_metrics_collection=mock_enable_metrics):
|
|
||||||
|
|
||||||
self.agent._port_bound(port, net_uuid, 'vlan', None, None)
|
|
||||||
|
|
||||||
self.assertEqual(enable_metrics, mock_enable_metrics.called)
|
|
||||||
|
|
||||||
def test_port_unbound(self):
|
|
||||||
map = {
|
|
||||||
'network_type': 'vlan',
|
|
||||||
'vswitch_name': 'fake-vswitch',
|
|
||||||
'ports': [],
|
|
||||||
'vlan_id': 1}
|
|
||||||
net_uuid = 'my-net-uuid'
|
|
||||||
network_vswitch_map = (net_uuid, map)
|
|
||||||
with mock.patch.object(self.agent,
|
|
||||||
'_get_network_vswitch_map_by_port_id',
|
|
||||||
return_value=network_vswitch_map):
|
|
||||||
with mock.patch.object(
|
|
||||||
self.agent._utils,
|
|
||||||
'disconnect_switch_port'):
|
|
||||||
self.agent._port_unbound(net_uuid)
|
|
||||||
|
|
||||||
def test_port_enable_control_metrics_ok(self):
|
|
||||||
cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT')
|
|
||||||
self.agent._port_metric_retries[self._FAKE_PORT_ID] = (
|
|
||||||
cfg.CONF.AGENT.metrics_max_retries)
|
|
||||||
|
|
||||||
with mock.patch.multiple(self.agent._utils,
|
|
||||||
can_enable_control_metrics=mock.MagicMock(),
|
|
||||||
enable_control_metrics=mock.MagicMock()):
|
|
||||||
|
|
||||||
self.agent._utils.can_enable_control_metrics.return_value = True
|
|
||||||
self.agent._port_enable_control_metrics()
|
|
||||||
self.agent._utils.enable_control_metrics.assert_called_with(
|
|
||||||
self._FAKE_PORT_ID)
|
|
||||||
|
|
||||||
self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries)
|
|
||||||
|
|
||||||
def test_port_enable_control_metrics_maxed(self):
|
|
||||||
cfg.CONF.set_override('enable_metrics_collection', True, 'AGENT')
|
|
||||||
cfg.CONF.set_override('metrics_max_retries', 3, 'AGENT')
|
|
||||||
self.agent._port_metric_retries[self._FAKE_PORT_ID] = (
|
|
||||||
cfg.CONF.AGENT.metrics_max_retries)
|
|
||||||
|
|
||||||
with mock.patch.multiple(self.agent._utils,
|
|
||||||
can_enable_control_metrics=mock.MagicMock(),
|
|
||||||
enable_control_metrics=mock.MagicMock()):
|
|
||||||
|
|
||||||
self.agent._utils.can_enable_control_metrics.return_value = False
|
|
||||||
for i in range(cfg.CONF.AGENT.metrics_max_retries + 1):
|
|
||||||
self.assertIn(self._FAKE_PORT_ID,
|
|
||||||
self.agent._port_metric_retries)
|
|
||||||
self.agent._port_enable_control_metrics()
|
|
||||||
|
|
||||||
self.assertNotIn(self._FAKE_PORT_ID, self.agent._port_metric_retries)
|
|
||||||
|
|
||||||
def test_treat_devices_added_returns_true_for_missing_device(self):
|
|
||||||
attrs = {'get_device_details.side_effect': Exception()}
|
|
||||||
self.agent.plugin_rpc.configure_mock(**attrs)
|
|
||||||
self.assertTrue(self.agent._treat_devices_added([{}]))
|
|
||||||
|
|
||||||
def mock_treat_devices_added(self, details, func_name):
|
|
||||||
"""Mock treat devices added.
|
|
||||||
|
|
||||||
:param details: the details to return for the device
|
|
||||||
:param func_name: the function that should be called
|
|
||||||
:returns: whether the named function was called
|
|
||||||
"""
|
|
||||||
attrs = {'get_device_details.return_value': details}
|
|
||||||
self.agent.plugin_rpc.configure_mock(**attrs)
|
|
||||||
with mock.patch.object(self.agent, func_name) as func:
|
|
||||||
self.assertFalse(self.agent._treat_devices_added([{}]))
|
|
||||||
return func.called
|
|
||||||
|
|
||||||
def test_treat_devices_added_updates_known_port(self):
|
|
||||||
details = mock.MagicMock()
|
|
||||||
details.__contains__.side_effect = lambda x: True
|
|
||||||
with mock.patch.object(self.agent.plugin_rpc,
|
|
||||||
"update_device_up") as func:
|
|
||||||
self.assertTrue(self.mock_treat_devices_added(details,
|
|
||||||
'_treat_vif_port'))
|
|
||||||
self.assertTrue(func.called)
|
|
||||||
|
|
||||||
def test_treat_devices_added_missing_port_id(self):
|
|
||||||
details = mock.MagicMock()
|
|
||||||
details.__contains__.side_effect = lambda x: False
|
|
||||||
with mock.patch.object(self.agent.plugin_rpc,
|
|
||||||
"update_device_up") as func:
|
|
||||||
self.assertFalse(self.mock_treat_devices_added(details,
|
|
||||||
'_treat_vif_port'))
|
|
||||||
self.assertFalse(func.called)
|
|
||||||
|
|
||||||
def test_treat_devices_removed_returns_true_for_missing_device(self):
|
|
||||||
attrs = {'update_device_down.side_effect': Exception()}
|
|
||||||
self.agent.plugin_rpc.configure_mock(**attrs)
|
|
||||||
self.assertTrue(self.agent._treat_devices_removed([{}]))
|
|
||||||
|
|
||||||
def mock_treat_devices_removed(self, port_exists):
|
|
||||||
details = dict(exists=port_exists)
|
|
||||||
attrs = {'update_device_down.return_value': details}
|
|
||||||
self.agent.plugin_rpc.configure_mock(**attrs)
|
|
||||||
with mock.patch.object(self.agent, '_port_unbound') as func:
|
|
||||||
self.assertFalse(self.agent._treat_devices_removed([{}]))
|
|
||||||
self.assertEqual(func.called, not port_exists)
|
|
||||||
|
|
||||||
def test_treat_devices_removed_unbinds_port(self):
|
|
||||||
self.mock_treat_devices_removed(False)
|
|
||||||
|
|
||||||
def test_treat_devices_removed_ignores_missing_port(self):
|
|
||||||
self.mock_treat_devices_removed(False)
|
|
||||||
|
|
||||||
def test_report_state(self):
|
|
||||||
with mock.patch.object(self.agent.state_rpc,
|
|
||||||
"report_state") as report_st:
|
|
||||||
self.agent._report_state()
|
|
||||||
report_st.assert_called_with(self.agent.context,
|
|
||||||
self.agent.agent_state)
|
|
||||||
self.assertNotIn("start_flag", self.agent.agent_state)
|
|
||||||
|
|
||||||
def test_main(self):
|
|
||||||
with mock.patch.object(hyperv_neutron_agent,
|
|
||||||
'HyperVNeutronAgent') as plugin:
|
|
||||||
with mock.patch.object(hyperv_neutron_agent,
|
|
||||||
'common_config') as common_config:
|
|
||||||
hyperv_neutron_agent.main()
|
|
||||||
|
|
||||||
self.assertTrue(common_config.init.called)
|
|
||||||
self.assertTrue(common_config.setup_logging.called)
|
|
||||||
plugin.assert_has_calls([mock.call().daemon_loop()])
|
|
|
@ -1,69 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cloudbase Solutions SRL
|
|
||||||
# Copyright 2013 Pedro Navarro Perez
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron import context
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
|
|
||||||
|
|
||||||
class HyperVNeutronPluginTestCase(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
|
|
||||||
_plugin_name = ('neutron.plugins.hyperv.'
|
|
||||||
'hyperv_neutron_plugin.HyperVNeutronPlugin')
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(HyperVNeutronPluginTestCase, self).setUp(self._plugin_name)
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVVirtualSwitchBasicGet(
|
|
||||||
test_plugin.TestBasicGet, HyperVNeutronPluginTestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVVirtualSwitchV2HTTPResponse(
|
|
||||||
test_plugin.TestV2HTTPResponse, HyperVNeutronPluginTestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVVirtualSwitchPortsV2(
|
|
||||||
test_plugin.TestPortsV2, HyperVNeutronPluginTestCase):
|
|
||||||
def test_port_vif_details(self):
|
|
||||||
with self.port(name='name') as port:
|
|
||||||
self.assertEqual(port['port']['binding:vif_type'],
|
|
||||||
portbindings.VIF_TYPE_HYPERV)
|
|
||||||
|
|
||||||
def test_ports_vif_details(self):
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
|
||||||
plugin = manager.NeutronManager.get_plugin()
|
|
||||||
with contextlib.nested(self.port(), self.port()) as (port1, port2):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
ports = plugin.get_ports(ctx)
|
|
||||||
self.assertEqual(len(ports), 2)
|
|
||||||
for port in ports:
|
|
||||||
self.assertEqual(port['binding:vif_type'],
|
|
||||||
portbindings.VIF_TYPE_HYPERV)
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVVirtualSwitchNetworksV2(
|
|
||||||
test_plugin.TestNetworksV2, HyperVNeutronPluginTestCase):
|
|
||||||
pass
|
|
|
@ -1,125 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cloudbase Solutions SRL
|
|
||||||
# Copyright 2013 Pedro Navarro Perez
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit Tests for hyperv neutron rpc
|
|
||||||
"""
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.agent import rpc as agent_rpc
|
|
||||||
from neutron.common import rpc_compat
|
|
||||||
from neutron.common import topics
|
|
||||||
from neutron.openstack.common import context
|
|
||||||
from neutron.plugins.hyperv import agent_notifier_api as ana
|
|
||||||
from neutron.plugins.hyperv.common import constants
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class rpcHyperVApiTestCase(base.BaseTestCase):
|
|
||||||
|
|
||||||
def _test_hyperv_neutron_api(
|
|
||||||
self, rpcapi, topic, method, rpc_method, **kwargs):
|
|
||||||
ctxt = context.RequestContext('fake_user', 'fake_project')
|
|
||||||
expected_retval = 'foo' if method == 'call' else None
|
|
||||||
expected_msg = rpcapi.make_msg(method, **kwargs)
|
|
||||||
if rpc_method == 'cast' and method == 'run_instance':
|
|
||||||
kwargs['call'] = False
|
|
||||||
|
|
||||||
proxy = rpc_compat.RpcProxy
|
|
||||||
with mock.patch.object(proxy, rpc_method) as rpc_method_mock:
|
|
||||||
rpc_method_mock.return_value = expected_retval
|
|
||||||
retval = getattr(rpcapi, method)(ctxt, **kwargs)
|
|
||||||
|
|
||||||
self.assertEqual(retval, expected_retval)
|
|
||||||
expected = [
|
|
||||||
mock.call(ctxt, expected_msg, topic=topic)
|
|
||||||
]
|
|
||||||
rpc_method_mock.assert_has_calls(expected)
|
|
||||||
|
|
||||||
def test_delete_network(self):
|
|
||||||
rpcapi = ana.AgentNotifierApi(topics.AGENT)
|
|
||||||
self._test_hyperv_neutron_api(
|
|
||||||
rpcapi,
|
|
||||||
topics.get_topic_name(
|
|
||||||
topics.AGENT,
|
|
||||||
topics.NETWORK,
|
|
||||||
topics.DELETE),
|
|
||||||
'network_delete', rpc_method='fanout_cast',
|
|
||||||
network_id='fake_request_spec')
|
|
||||||
|
|
||||||
def test_port_update(self):
|
|
||||||
rpcapi = ana.AgentNotifierApi(topics.AGENT)
|
|
||||||
self._test_hyperv_neutron_api(
|
|
||||||
rpcapi,
|
|
||||||
topics.get_topic_name(
|
|
||||||
topics.AGENT,
|
|
||||||
topics.PORT,
|
|
||||||
topics.UPDATE),
|
|
||||||
'port_update', rpc_method='fanout_cast',
|
|
||||||
port='fake_port',
|
|
||||||
network_type='fake_network_type',
|
|
||||||
segmentation_id='fake_segmentation_id',
|
|
||||||
physical_network='fake_physical_network')
|
|
||||||
|
|
||||||
def test_port_delete(self):
|
|
||||||
rpcapi = ana.AgentNotifierApi(topics.AGENT)
|
|
||||||
self._test_hyperv_neutron_api(
|
|
||||||
rpcapi,
|
|
||||||
topics.get_topic_name(
|
|
||||||
topics.AGENT,
|
|
||||||
topics.PORT,
|
|
||||||
topics.DELETE),
|
|
||||||
'port_delete', rpc_method='fanout_cast',
|
|
||||||
port_id='port_id')
|
|
||||||
|
|
||||||
def test_tunnel_update(self):
|
|
||||||
rpcapi = ana.AgentNotifierApi(topics.AGENT)
|
|
||||||
self._test_hyperv_neutron_api(
|
|
||||||
rpcapi,
|
|
||||||
topics.get_topic_name(
|
|
||||||
topics.AGENT,
|
|
||||||
constants.TUNNEL,
|
|
||||||
topics.UPDATE),
|
|
||||||
'tunnel_update', rpc_method='fanout_cast',
|
|
||||||
tunnel_ip='fake_ip', tunnel_id='fake_id')
|
|
||||||
|
|
||||||
def test_device_details(self):
|
|
||||||
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
|
|
||||||
self._test_hyperv_neutron_api(
|
|
||||||
rpcapi, topics.PLUGIN,
|
|
||||||
'get_device_details', rpc_method='call',
|
|
||||||
device='fake_device',
|
|
||||||
agent_id='fake_agent_id')
|
|
||||||
|
|
||||||
def test_update_device_down(self):
|
|
||||||
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
|
|
||||||
self._test_hyperv_neutron_api(
|
|
||||||
rpcapi, topics.PLUGIN,
|
|
||||||
'update_device_down', rpc_method='call',
|
|
||||||
device='fake_device',
|
|
||||||
agent_id='fake_agent_id',
|
|
||||||
host='fake_host')
|
|
||||||
|
|
||||||
def test_tunnel_sync(self):
|
|
||||||
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
|
|
||||||
self._test_hyperv_neutron_api(
|
|
||||||
rpcapi, topics.PLUGIN,
|
|
||||||
'tunnel_sync', rpc_method='call',
|
|
||||||
tunnel_ip='fake_tunnel_ip',
|
|
||||||
tunnel_type=None)
|
|
|
@ -1,189 +0,0 @@
|
||||||
# Copyright 2014 Cloudbase Solutions SRL
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
# @author: Claudiu Belu, Cloudbase Solutions Srl
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit tests for the Hyper-V Security Groups Driver.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.plugins.hyperv.agent import security_groups_driver as sg_driver
|
|
||||||
from neutron.plugins.hyperv.agent import utilsfactory
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVSecurityGroupsDriver(base.BaseTestCase):
|
|
||||||
|
|
||||||
_FAKE_DEVICE = 'fake_device'
|
|
||||||
_FAKE_ID = 'fake_id'
|
|
||||||
_FAKE_DIRECTION = 'ingress'
|
|
||||||
_FAKE_ETHERTYPE = 'IPv4'
|
|
||||||
_FAKE_ETHERTYPE_IPV6 = 'IPv6'
|
|
||||||
_FAKE_DEST_IP_PREFIX = 'fake_dest_ip_prefix'
|
|
||||||
_FAKE_SOURCE_IP_PREFIX = 'fake_source_ip_prefix'
|
|
||||||
_FAKE_PARAM_NAME = 'fake_param_name'
|
|
||||||
_FAKE_PARAM_VALUE = 'fake_param_value'
|
|
||||||
|
|
||||||
_FAKE_PORT_MIN = 9001
|
|
||||||
_FAKE_PORT_MAX = 9011
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestHyperVSecurityGroupsDriver, self).setUp()
|
|
||||||
self._mock_windows_version = mock.patch.object(utilsfactory,
|
|
||||||
'get_hypervutils')
|
|
||||||
self._mock_windows_version.start()
|
|
||||||
self._driver = sg_driver.HyperVSecurityGroupsDriver()
|
|
||||||
self._driver._utils = mock.MagicMock()
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.security_groups_driver'
|
|
||||||
'.HyperVSecurityGroupsDriver._create_port_rules')
|
|
||||||
def test_prepare_port_filter(self, mock_create_rules):
|
|
||||||
mock_port = self._get_port()
|
|
||||||
mock_utils_method = self._driver._utils.create_default_reject_all_rules
|
|
||||||
self._driver.prepare_port_filter(mock_port)
|
|
||||||
|
|
||||||
self.assertEqual(mock_port,
|
|
||||||
self._driver._security_ports[self._FAKE_DEVICE])
|
|
||||||
mock_utils_method.assert_called_once_with(self._FAKE_ID)
|
|
||||||
self._driver._create_port_rules.assert_called_once_with(
|
|
||||||
self._FAKE_ID, mock_port['security_group_rules'])
|
|
||||||
|
|
||||||
def test_update_port_filter(self):
|
|
||||||
mock_port = self._get_port()
|
|
||||||
new_mock_port = self._get_port()
|
|
||||||
new_mock_port['id'] += '2'
|
|
||||||
new_mock_port['security_group_rules'][0]['ethertype'] += "2"
|
|
||||||
|
|
||||||
self._driver._security_ports[mock_port['device']] = mock_port
|
|
||||||
self._driver._create_port_rules = mock.MagicMock()
|
|
||||||
self._driver._remove_port_rules = mock.MagicMock()
|
|
||||||
self._driver.update_port_filter(new_mock_port)
|
|
||||||
|
|
||||||
self._driver._remove_port_rules.assert_called_once_with(
|
|
||||||
mock_port['id'], mock_port['security_group_rules'])
|
|
||||||
self._driver._create_port_rules.assert_called_once_with(
|
|
||||||
new_mock_port['id'], new_mock_port['security_group_rules'])
|
|
||||||
self.assertEqual(new_mock_port,
|
|
||||||
self._driver._security_ports[new_mock_port['device']])
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.security_groups_driver'
|
|
||||||
'.HyperVSecurityGroupsDriver.prepare_port_filter')
|
|
||||||
def test_update_port_filter_new_port(self, mock_method):
|
|
||||||
mock_port = self._get_port()
|
|
||||||
self._driver.prepare_port_filter = mock.MagicMock()
|
|
||||||
self._driver.update_port_filter(mock_port)
|
|
||||||
|
|
||||||
self._driver.prepare_port_filter.assert_called_once_with(mock_port)
|
|
||||||
|
|
||||||
def test_remove_port_filter(self):
|
|
||||||
mock_port = self._get_port()
|
|
||||||
self._driver._security_ports[mock_port['device']] = mock_port
|
|
||||||
self._driver.remove_port_filter(mock_port)
|
|
||||||
self.assertFalse(mock_port['device'] in self._driver._security_ports)
|
|
||||||
|
|
||||||
def test_create_port_rules_exception(self):
|
|
||||||
fake_rule = self._create_security_rule()
|
|
||||||
self._driver._utils.create_security_rule.side_effect = Exception(
|
|
||||||
'Generated Exception for testing.')
|
|
||||||
self._driver._create_port_rules(self._FAKE_ID, [fake_rule])
|
|
||||||
|
|
||||||
def test_create_param_map(self):
|
|
||||||
fake_rule = self._create_security_rule()
|
|
||||||
self._driver._get_rule_remote_address = mock.MagicMock(
|
|
||||||
return_value=self._FAKE_SOURCE_IP_PREFIX)
|
|
||||||
actual = self._driver._create_param_map(fake_rule)
|
|
||||||
expected = {
|
|
||||||
'direction': self._driver._ACL_PROP_MAP[
|
|
||||||
'direction'][self._FAKE_DIRECTION],
|
|
||||||
'acl_type': self._driver._ACL_PROP_MAP[
|
|
||||||
'ethertype'][self._FAKE_ETHERTYPE],
|
|
||||||
'local_port': '%s-%s' % (self._FAKE_PORT_MIN, self._FAKE_PORT_MAX),
|
|
||||||
'protocol': self._driver._ACL_PROP_MAP['default'],
|
|
||||||
'remote_address': self._FAKE_SOURCE_IP_PREFIX
|
|
||||||
}
|
|
||||||
|
|
||||||
self.assertEqual(expected, actual)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.security_groups_driver'
|
|
||||||
'.HyperVSecurityGroupsDriver._create_param_map')
|
|
||||||
def test_create_port_rules(self, mock_method):
|
|
||||||
fake_rule = self._create_security_rule()
|
|
||||||
mock_method.return_value = {
|
|
||||||
self._FAKE_PARAM_NAME: self._FAKE_PARAM_VALUE}
|
|
||||||
self._driver._create_port_rules(self._FAKE_ID, [fake_rule])
|
|
||||||
|
|
||||||
self._driver._utils.create_security_rule.assert_called_once_with(
|
|
||||||
self._FAKE_ID, fake_param_name=self._FAKE_PARAM_VALUE)
|
|
||||||
|
|
||||||
def test_convert_any_address_to_same_ingress(self):
|
|
||||||
rule = self._create_security_rule()
|
|
||||||
actual = self._driver._get_rule_remote_address(rule)
|
|
||||||
self.assertEqual(self._FAKE_SOURCE_IP_PREFIX, actual)
|
|
||||||
|
|
||||||
def test_convert_any_address_to_same_egress(self):
|
|
||||||
rule = self._create_security_rule()
|
|
||||||
rule['direction'] += '2'
|
|
||||||
actual = self._driver._get_rule_remote_address(rule)
|
|
||||||
self.assertEqual(self._FAKE_DEST_IP_PREFIX, actual)
|
|
||||||
|
|
||||||
def test_convert_any_address_to_ipv4(self):
|
|
||||||
rule = self._create_security_rule()
|
|
||||||
del rule['source_ip_prefix']
|
|
||||||
actual = self._driver._get_rule_remote_address(rule)
|
|
||||||
self.assertEqual(self._driver._ACL_PROP_MAP['address_default']['IPv4'],
|
|
||||||
actual)
|
|
||||||
|
|
||||||
def test_convert_any_address_to_ipv6(self):
|
|
||||||
rule = self._create_security_rule()
|
|
||||||
del rule['source_ip_prefix']
|
|
||||||
rule['ethertype'] = self._FAKE_ETHERTYPE_IPV6
|
|
||||||
actual = self._driver._get_rule_remote_address(rule)
|
|
||||||
self.assertEqual(self._driver._ACL_PROP_MAP['address_default']['IPv6'],
|
|
||||||
actual)
|
|
||||||
|
|
||||||
def test_get_rule_protocol_icmp(self):
|
|
||||||
self._test_get_rule_protocol(
|
|
||||||
'icmp', self._driver._ACL_PROP_MAP['protocol']['icmp'])
|
|
||||||
|
|
||||||
def test_get_rule_protocol_no_icmp(self):
|
|
||||||
self._test_get_rule_protocol('tcp', 'tcp')
|
|
||||||
|
|
||||||
def _test_get_rule_protocol(self, protocol, expected):
|
|
||||||
rule = self._create_security_rule()
|
|
||||||
rule['protocol'] = protocol
|
|
||||||
actual = self._driver._get_rule_protocol(rule)
|
|
||||||
|
|
||||||
self.assertEqual(expected, actual)
|
|
||||||
|
|
||||||
def _get_port(self):
|
|
||||||
return {
|
|
||||||
'device': self._FAKE_DEVICE,
|
|
||||||
'id': self._FAKE_ID,
|
|
||||||
'security_group_rules': [self._create_security_rule()]
|
|
||||||
}
|
|
||||||
|
|
||||||
def _create_security_rule(self):
|
|
||||||
return {
|
|
||||||
'direction': self._FAKE_DIRECTION,
|
|
||||||
'ethertype': self._FAKE_ETHERTYPE,
|
|
||||||
'dest_ip_prefix': self._FAKE_DEST_IP_PREFIX,
|
|
||||||
'source_ip_prefix': self._FAKE_SOURCE_IP_PREFIX,
|
|
||||||
'port_range_min': self._FAKE_PORT_MIN,
|
|
||||||
'port_range_max': self._FAKE_PORT_MAX
|
|
||||||
}
|
|
|
@ -1,54 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cloudbase Solutions SRL
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
# @author: Claudiu Belu, Cloudbase Solutions Srl
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit tests for the Hyper-V utils factory.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.plugins.hyperv.agent import utils
|
|
||||||
from neutron.plugins.hyperv.agent import utilsfactory
|
|
||||||
from neutron.plugins.hyperv.agent import utilsv2
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVUtilsFactory(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_get_hypervutils_v2_r2(self):
|
|
||||||
self._test_returned_class(utilsv2.HyperVUtilsV2R2, True, '6.3.0')
|
|
||||||
|
|
||||||
def test_get_hypervutils_v2(self):
|
|
||||||
self._test_returned_class(utilsv2.HyperVUtilsV2, False, '6.2.0')
|
|
||||||
|
|
||||||
def test_get_hypervutils_v1_old_version(self):
|
|
||||||
self._test_returned_class(utils.HyperVUtils, False, '6.1.0')
|
|
||||||
|
|
||||||
def test_get_hypervutils_v1_forced(self):
|
|
||||||
self._test_returned_class(utils.HyperVUtils, True, '6.2.0')
|
|
||||||
|
|
||||||
def _test_returned_class(self, expected_class, force_v1, os_version):
|
|
||||||
CONF.hyperv.force_hyperv_utils_v1 = force_v1
|
|
||||||
utilsfactory._get_windows_version = mock.MagicMock(
|
|
||||||
return_value=os_version)
|
|
||||||
actual_class = type(utilsfactory.get_hypervutils())
|
|
||||||
self.assertEqual(actual_class, expected_class)
|
|
|
@ -1,519 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2013 Cloudbase Solutions SRL
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit tests for the Hyper-V utils V2.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.plugins.hyperv.agent import utils
|
|
||||||
from neutron.plugins.hyperv.agent import utilsv2
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVUtilsV2(base.BaseTestCase):
|
|
||||||
|
|
||||||
_FAKE_VSWITCH_NAME = "fake_vswitch_name"
|
|
||||||
_FAKE_PORT_NAME = "fake_port_name"
|
|
||||||
_FAKE_JOB_PATH = 'fake_job_path'
|
|
||||||
_FAKE_RET_VAL = 0
|
|
||||||
_FAKE_VM_PATH = "fake_vm_path"
|
|
||||||
_FAKE_RES_DATA = "fake_res_data"
|
|
||||||
_FAKE_RES_PATH = "fake_res_path"
|
|
||||||
_FAKE_VSWITCH = "fake_vswitch"
|
|
||||||
_FAKE_VLAN_ID = "fake_vlan_id"
|
|
||||||
_FAKE_CLASS_NAME = "fake_class_name"
|
|
||||||
_FAKE_ELEMENT_NAME = "fake_element_name"
|
|
||||||
_FAKE_HYPERV_VM_STATE = 'fake_hyperv_state'
|
|
||||||
|
|
||||||
_FAKE_ACL_ACT = 'fake_acl_action'
|
|
||||||
_FAKE_ACL_DIR = 'fake_acl_dir'
|
|
||||||
_FAKE_ACL_TYPE = 'fake_acl_type'
|
|
||||||
_FAKE_LOCAL_PORT = 'fake_local_port'
|
|
||||||
_FAKE_PROTOCOL = 'fake_port_protocol'
|
|
||||||
_FAKE_REMOTE_ADDR = '0.0.0.0/0'
|
|
||||||
_FAKE_WEIGHT = 'fake_weight'
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestHyperVUtilsV2, self).setUp()
|
|
||||||
self._utils = utilsv2.HyperVUtilsV2()
|
|
||||||
self._utils._wmi_conn = mock.MagicMock()
|
|
||||||
|
|
||||||
def test_connect_vnic_to_vswitch_found(self):
|
|
||||||
self._test_connect_vnic_to_vswitch(True)
|
|
||||||
|
|
||||||
def test_connect_vnic_to_vswitch_not_found(self):
|
|
||||||
self._test_connect_vnic_to_vswitch(False)
|
|
||||||
|
|
||||||
def _test_connect_vnic_to_vswitch(self, found):
|
|
||||||
self._utils._get_vnic_settings = mock.MagicMock()
|
|
||||||
|
|
||||||
if not found:
|
|
||||||
mock_vm = mock.MagicMock()
|
|
||||||
self._utils._get_vm_from_res_setting_data = mock.MagicMock(
|
|
||||||
return_value=mock_vm)
|
|
||||||
self._utils._add_virt_resource = mock.MagicMock()
|
|
||||||
else:
|
|
||||||
self._utils._modify_virt_resource = mock.MagicMock()
|
|
||||||
|
|
||||||
self._utils._get_vswitch = mock.MagicMock()
|
|
||||||
self._utils._get_switch_port_allocation = mock.MagicMock()
|
|
||||||
|
|
||||||
mock_port = mock.MagicMock()
|
|
||||||
self._utils._get_switch_port_allocation.return_value = (mock_port,
|
|
||||||
found)
|
|
||||||
|
|
||||||
self._utils.connect_vnic_to_vswitch(self._FAKE_VSWITCH_NAME,
|
|
||||||
self._FAKE_PORT_NAME)
|
|
||||||
|
|
||||||
if not found:
|
|
||||||
self._utils._add_virt_resource.assert_called_with(mock_vm,
|
|
||||||
mock_port)
|
|
||||||
else:
|
|
||||||
self._utils._modify_virt_resource.assert_called_with(mock_port)
|
|
||||||
|
|
||||||
def test_add_virt_resource(self):
|
|
||||||
self._test_virt_method('AddResourceSettings', 3, '_add_virt_resource',
|
|
||||||
True, self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
|
|
||||||
|
|
||||||
def test_add_virt_feature(self):
|
|
||||||
self._test_virt_method('AddFeatureSettings', 3, '_add_virt_feature',
|
|
||||||
True, self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
|
|
||||||
|
|
||||||
def test_modify_virt_resource(self):
|
|
||||||
self._test_virt_method('ModifyResourceSettings', 3,
|
|
||||||
'_modify_virt_resource', False,
|
|
||||||
ResourceSettings=[self._FAKE_RES_DATA])
|
|
||||||
|
|
||||||
def test_remove_virt_resource(self):
|
|
||||||
self._test_virt_method('RemoveResourceSettings', 2,
|
|
||||||
'_remove_virt_resource', False,
|
|
||||||
ResourceSettings=[self._FAKE_RES_PATH])
|
|
||||||
|
|
||||||
def test_remove_virt_feature(self):
|
|
||||||
self._test_virt_method('RemoveFeatureSettings', 2,
|
|
||||||
'_remove_virt_feature', False,
|
|
||||||
FeatureSettings=[self._FAKE_RES_PATH])
|
|
||||||
|
|
||||||
def _test_virt_method(self, vsms_method_name, return_count,
|
|
||||||
utils_method_name, with_mock_vm, *args, **kwargs):
|
|
||||||
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
|
|
||||||
vsms_method = getattr(mock_svc, vsms_method_name)
|
|
||||||
mock_rsd = self._mock_vsms_method(vsms_method, return_count)
|
|
||||||
if with_mock_vm:
|
|
||||||
mock_vm = mock.MagicMock()
|
|
||||||
mock_vm.path_.return_value = self._FAKE_VM_PATH
|
|
||||||
getattr(self._utils, utils_method_name)(mock_vm, mock_rsd)
|
|
||||||
else:
|
|
||||||
getattr(self._utils, utils_method_name)(mock_rsd)
|
|
||||||
|
|
||||||
if args:
|
|
||||||
vsms_method.assert_called_once_with(*args)
|
|
||||||
else:
|
|
||||||
vsms_method.assert_called_once_with(**kwargs)
|
|
||||||
|
|
||||||
def _mock_vsms_method(self, vsms_method, return_count):
|
|
||||||
args = None
|
|
||||||
if return_count == 3:
|
|
||||||
args = (self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
|
|
||||||
else:
|
|
||||||
args = (self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
|
|
||||||
|
|
||||||
vsms_method.return_value = args
|
|
||||||
mock_res_setting_data = mock.MagicMock()
|
|
||||||
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
|
|
||||||
mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
|
|
||||||
|
|
||||||
self._utils._check_job_status = mock.MagicMock()
|
|
||||||
|
|
||||||
return mock_res_setting_data
|
|
||||||
|
|
||||||
def test_disconnect_switch_port_delete_port(self):
|
|
||||||
self._test_disconnect_switch_port(True)
|
|
||||||
|
|
||||||
def test_disconnect_switch_port_modify_port(self):
|
|
||||||
self._test_disconnect_switch_port(False)
|
|
||||||
|
|
||||||
def _test_disconnect_switch_port(self, delete_port):
|
|
||||||
self._utils._get_switch_port_allocation = mock.MagicMock()
|
|
||||||
|
|
||||||
mock_sw_port = mock.MagicMock()
|
|
||||||
self._utils._get_switch_port_allocation.return_value = (mock_sw_port,
|
|
||||||
True)
|
|
||||||
|
|
||||||
if delete_port:
|
|
||||||
self._utils._remove_virt_resource = mock.MagicMock()
|
|
||||||
else:
|
|
||||||
self._utils._modify_virt_resource = mock.MagicMock()
|
|
||||||
|
|
||||||
self._utils.disconnect_switch_port(self._FAKE_VSWITCH_NAME,
|
|
||||||
self._FAKE_PORT_NAME,
|
|
||||||
delete_port)
|
|
||||||
|
|
||||||
if delete_port:
|
|
||||||
self._utils._remove_virt_resource.assert_called_with(mock_sw_port)
|
|
||||||
else:
|
|
||||||
self._utils._modify_virt_resource.assert_called_with(mock_sw_port)
|
|
||||||
|
|
||||||
def test_get_vswitch(self):
|
|
||||||
self._utils._conn.Msvm_VirtualEthernetSwitch.return_value = [
|
|
||||||
self._FAKE_VSWITCH]
|
|
||||||
vswitch = self._utils._get_vswitch(self._FAKE_VSWITCH_NAME)
|
|
||||||
|
|
||||||
self.assertEqual(self._FAKE_VSWITCH, vswitch)
|
|
||||||
|
|
||||||
def test_get_vswitch_not_found(self):
|
|
||||||
self._utils._conn.Msvm_VirtualEthernetSwitch.return_value = []
|
|
||||||
self.assertRaises(utils.HyperVException, self._utils._get_vswitch,
|
|
||||||
self._FAKE_VSWITCH_NAME)
|
|
||||||
|
|
||||||
def test_get_vswitch_external_port(self):
|
|
||||||
mock_vswitch = mock.MagicMock()
|
|
||||||
mock_sw_port = mock.MagicMock()
|
|
||||||
mock_vswitch.associators.return_value = [mock_sw_port]
|
|
||||||
mock_le = mock_sw_port.associators.return_value
|
|
||||||
mock_le.__len__.return_value = 1
|
|
||||||
mock_le1 = mock_le[0].associators.return_value
|
|
||||||
mock_le1.__len__.return_value = 1
|
|
||||||
|
|
||||||
vswitch_port = self._utils._get_vswitch_external_port(mock_vswitch)
|
|
||||||
|
|
||||||
self.assertEqual(mock_sw_port, vswitch_port)
|
|
||||||
|
|
||||||
def test_set_vswitch_port_vlan_id(self):
|
|
||||||
mock_port_alloc = mock.MagicMock()
|
|
||||||
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
|
|
||||||
mock_port_alloc, True))
|
|
||||||
self._utils._get_vlan_setting_data_from_port_alloc = mock.MagicMock()
|
|
||||||
|
|
||||||
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
|
|
||||||
mock_svc.RemoveFeatureSettings.return_value = (self._FAKE_JOB_PATH,
|
|
||||||
self._FAKE_RET_VAL)
|
|
||||||
mock_vlan_settings = mock.MagicMock()
|
|
||||||
self._utils._get_vlan_setting_data = mock.MagicMock(return_value=(
|
|
||||||
mock_vlan_settings, True))
|
|
||||||
|
|
||||||
mock_svc.AddFeatureSettings.return_value = (self._FAKE_JOB_PATH,
|
|
||||||
None,
|
|
||||||
self._FAKE_RET_VAL)
|
|
||||||
|
|
||||||
self._utils.set_vswitch_port_vlan_id(self._FAKE_VLAN_ID,
|
|
||||||
self._FAKE_PORT_NAME)
|
|
||||||
|
|
||||||
self.assertTrue(mock_svc.RemoveFeatureSettings.called)
|
|
||||||
self.assertTrue(mock_svc.AddFeatureSettings.called)
|
|
||||||
|
|
||||||
def test_get_setting_data(self):
|
|
||||||
self._utils._get_first_item = mock.MagicMock(return_value=None)
|
|
||||||
|
|
||||||
mock_data = mock.MagicMock()
|
|
||||||
self._utils._get_default_setting_data = mock.MagicMock(
|
|
||||||
return_value=mock_data)
|
|
||||||
|
|
||||||
ret_val = self._utils._get_setting_data(self._FAKE_CLASS_NAME,
|
|
||||||
self._FAKE_ELEMENT_NAME,
|
|
||||||
True)
|
|
||||||
|
|
||||||
self.assertEqual(ret_val, (mock_data, False))
|
|
||||||
|
|
||||||
def test_enable_port_metrics_collection(self):
|
|
||||||
mock_port = mock.MagicMock()
|
|
||||||
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
|
|
||||||
mock_port, True))
|
|
||||||
|
|
||||||
mock_acl = mock.MagicMock()
|
|
||||||
|
|
||||||
with mock.patch.multiple(
|
|
||||||
self._utils,
|
|
||||||
_get_default_setting_data=mock.MagicMock(return_value=mock_acl),
|
|
||||||
_add_virt_feature=mock.MagicMock()):
|
|
||||||
|
|
||||||
self._utils.enable_port_metrics_collection(self._FAKE_PORT_NAME)
|
|
||||||
|
|
||||||
self.assertEqual(4, len(self._utils._add_virt_feature.mock_calls))
|
|
||||||
self._utils._add_virt_feature.assert_called_with(
|
|
||||||
mock_port, mock_acl)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._get_switch_port_allocation')
|
|
||||||
def test_enable_control_metrics_ok(self, mock_get_port_allocation):
|
|
||||||
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
|
|
||||||
mock_metrics_def_source = self._utils._conn.CIM_BaseMetricDefinition
|
|
||||||
mock_metric_def = mock.MagicMock()
|
|
||||||
mock_port = mock.MagicMock()
|
|
||||||
mock_get_port_allocation.return_value = (mock_port, True)
|
|
||||||
|
|
||||||
mock_metrics_def_source.return_value = [mock_metric_def]
|
|
||||||
m_call = mock.call(Subject=mock_port.path_.return_value,
|
|
||||||
Definition=mock_metric_def.path_.return_value,
|
|
||||||
MetricCollectionEnabled=self._utils._METRIC_ENABLED)
|
|
||||||
|
|
||||||
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
|
|
||||||
|
|
||||||
mock_metrics_svc.ControlMetrics.assert_has_calls([m_call, m_call])
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._get_switch_port_allocation')
|
|
||||||
def test_enable_control_metrics_no_port(self, mock_get_port_allocation):
|
|
||||||
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
|
|
||||||
mock_get_port_allocation.return_value = (None, False)
|
|
||||||
|
|
||||||
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
|
|
||||||
self.assertEqual(0, mock_metrics_svc.ControlMetrics.call_count)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._get_switch_port_allocation')
|
|
||||||
def test_enable_control_metrics_no_def(self, mock_get_port_allocation):
|
|
||||||
mock_metrics_svc = self._utils._conn.Msvm_MetricService()[0]
|
|
||||||
mock_metrics_def_source = self._utils._conn.CIM_BaseMetricDefinition
|
|
||||||
mock_port = mock.MagicMock()
|
|
||||||
|
|
||||||
mock_get_port_allocation.return_value = (mock_port, True)
|
|
||||||
mock_metrics_def_source.return_value = None
|
|
||||||
|
|
||||||
self._utils.enable_control_metrics(self._FAKE_PORT_NAME)
|
|
||||||
self.assertEqual(0, mock_metrics_svc.ControlMetrics.call_count)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._is_port_vm_started')
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._get_switch_port_allocation')
|
|
||||||
def test_can_enable_control_metrics_true(self, mock_get, mock_is_started):
|
|
||||||
mock_acl = mock.MagicMock()
|
|
||||||
mock_acl.Action = self._utils._ACL_ACTION_METER
|
|
||||||
self._test_can_enable_control_metrics(mock_get, mock_is_started,
|
|
||||||
[mock_acl, mock_acl], True)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._is_port_vm_started')
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._get_switch_port_allocation')
|
|
||||||
def test_can_enable_control_metrics_false(self, mock_get, mock_is_started):
|
|
||||||
self._test_can_enable_control_metrics(mock_get, mock_is_started, [],
|
|
||||||
False)
|
|
||||||
|
|
||||||
def _test_can_enable_control_metrics(self, mock_get_port, mock_vm_started,
|
|
||||||
acls, expected_result):
|
|
||||||
mock_port = mock.MagicMock()
|
|
||||||
mock_acl = mock.MagicMock()
|
|
||||||
mock_acl.Action = self._utils._ACL_ACTION_METER
|
|
||||||
|
|
||||||
mock_port.associators.return_value = acls
|
|
||||||
mock_get_port.return_value = (mock_port, True)
|
|
||||||
mock_vm_started.return_value = True
|
|
||||||
|
|
||||||
result = self._utils.can_enable_control_metrics(self._FAKE_PORT_NAME)
|
|
||||||
self.assertEqual(expected_result, result)
|
|
||||||
|
|
||||||
def test_is_port_vm_started_true(self):
|
|
||||||
self._test_is_port_vm_started(self._utils._HYPERV_VM_STATE_ENABLED,
|
|
||||||
True)
|
|
||||||
|
|
||||||
def test_is_port_vm_started_false(self):
|
|
||||||
self._test_is_port_vm_started(self._FAKE_HYPERV_VM_STATE, False)
|
|
||||||
|
|
||||||
def _test_is_port_vm_started(self, vm_state, expected_result):
|
|
||||||
mock_svc = self._utils._conn.Msvm_VirtualSystemManagementService()[0]
|
|
||||||
mock_port = mock.MagicMock()
|
|
||||||
mock_vmsettings = mock.MagicMock()
|
|
||||||
mock_summary = mock.MagicMock()
|
|
||||||
mock_summary.EnabledState = vm_state
|
|
||||||
mock_vmsettings.path_.return_value = self._FAKE_RES_PATH
|
|
||||||
|
|
||||||
mock_port.associators.return_value = [mock_vmsettings]
|
|
||||||
mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
|
|
||||||
[mock_summary])
|
|
||||||
|
|
||||||
result = self._utils._is_port_vm_started(mock_port)
|
|
||||||
self.assertEqual(expected_result, result)
|
|
||||||
mock_svc.GetSummaryInformation.assert_called_once_with(
|
|
||||||
[self._utils._VM_SUMMARY_ENABLED_STATE],
|
|
||||||
[self._FAKE_RES_PATH])
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._remove_virt_feature')
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._bind_security_rule')
|
|
||||||
def test_create_default_reject_all_rules(self, mock_bind, mock_remove):
|
|
||||||
(m_port, m_acl) = self._setup_security_rule_test()
|
|
||||||
m_acl.Action = self._utils._ACL_ACTION_DENY
|
|
||||||
self._utils.create_default_reject_all_rules(self._FAKE_PORT_NAME)
|
|
||||||
|
|
||||||
calls = []
|
|
||||||
ipv4_pair = (self._utils._ACL_TYPE_IPV4, self._utils._IPV4_ANY)
|
|
||||||
ipv6_pair = (self._utils._ACL_TYPE_IPV6, self._utils._IPV6_ANY)
|
|
||||||
for direction in [self._utils._ACL_DIR_IN, self._utils._ACL_DIR_OUT]:
|
|
||||||
for acl_type, address in [ipv4_pair, ipv6_pair]:
|
|
||||||
for protocol in [self._utils._TCP_PROTOCOL,
|
|
||||||
self._utils._UDP_PROTOCOL,
|
|
||||||
self._utils._ICMP_PROTOCOL]:
|
|
||||||
calls.append(mock.call(m_port, direction, acl_type,
|
|
||||||
self._utils._ACL_ACTION_DENY,
|
|
||||||
self._utils._ACL_DEFAULT,
|
|
||||||
protocol, address, mock.ANY))
|
|
||||||
|
|
||||||
self._utils._remove_virt_feature.assert_called_once_with(m_acl)
|
|
||||||
self._utils._bind_security_rule.assert_has_calls(calls)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._remove_virt_feature')
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._bind_security_rule')
|
|
||||||
def test_create_default_reject_all_rules_already_added(self, mock_bind,
|
|
||||||
mock_remove):
|
|
||||||
(m_port, m_acl) = self._setup_security_rule_test()
|
|
||||||
m_acl.Action = self._utils._ACL_ACTION_DENY
|
|
||||||
m_port.associators.return_value = [
|
|
||||||
m_acl] * self._utils._REJECT_ACLS_COUNT
|
|
||||||
self._utils.create_default_reject_all_rules(self._FAKE_PORT_NAME)
|
|
||||||
|
|
||||||
self.assertFalse(self._utils._remove_virt_feature.called)
|
|
||||||
self.assertFalse(self._utils._bind_security_rule.called)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._remove_virt_feature')
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._add_virt_feature')
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._create_security_acl')
|
|
||||||
def test_bind_security_rule(self, mock_create_acl, mock_add, mock_remove):
|
|
||||||
(m_port, m_acl) = self._setup_security_rule_test()
|
|
||||||
mock_create_acl.return_value = m_acl
|
|
||||||
|
|
||||||
self._utils._bind_security_rule(
|
|
||||||
m_port, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
|
|
||||||
self._FAKE_ACL_ACT, self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL,
|
|
||||||
self._FAKE_REMOTE_ADDR, self._FAKE_WEIGHT)
|
|
||||||
|
|
||||||
self._utils._add_virt_feature.assert_called_once_with(m_port, m_acl)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._remove_virt_feature')
|
|
||||||
def test_remove_security_rule(self, mock_remove_feature):
|
|
||||||
mock_acl = self._setup_security_rule_test()[1]
|
|
||||||
self._utils.remove_security_rule(
|
|
||||||
self._FAKE_PORT_NAME, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
|
|
||||||
self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, self._FAKE_REMOTE_ADDR)
|
|
||||||
self._utils._remove_virt_feature.assert_called_once_with(mock_acl)
|
|
||||||
|
|
||||||
@mock.patch('neutron.plugins.hyperv.agent.utilsv2.HyperVUtilsV2'
|
|
||||||
'._remove_multiple_virt_features')
|
|
||||||
def test_remove_all_security_rules(self, mock_remove_feature):
|
|
||||||
mock_acl = self._setup_security_rule_test()[1]
|
|
||||||
self._utils.remove_all_security_rules(self._FAKE_PORT_NAME)
|
|
||||||
self._utils._remove_multiple_virt_features.assert_called_once_with(
|
|
||||||
[mock_acl])
|
|
||||||
|
|
||||||
def _setup_security_rule_test(self):
|
|
||||||
mock_port = mock.MagicMock()
|
|
||||||
mock_acl = mock.MagicMock()
|
|
||||||
mock_port.associators.return_value = [mock_acl]
|
|
||||||
|
|
||||||
self._utils._get_switch_port_allocation = mock.MagicMock(return_value=(
|
|
||||||
mock_port, True))
|
|
||||||
self._utils._filter_security_acls = mock.MagicMock(
|
|
||||||
return_value=[mock_acl])
|
|
||||||
|
|
||||||
return (mock_port, mock_acl)
|
|
||||||
|
|
||||||
def test_filter_acls(self):
|
|
||||||
mock_acl = mock.MagicMock()
|
|
||||||
mock_acl.Action = self._FAKE_ACL_ACT
|
|
||||||
mock_acl.Applicability = self._utils._ACL_APPLICABILITY_LOCAL
|
|
||||||
mock_acl.Direction = self._FAKE_ACL_DIR
|
|
||||||
mock_acl.AclType = self._FAKE_ACL_TYPE
|
|
||||||
mock_acl.RemoteAddress = self._FAKE_REMOTE_ADDR
|
|
||||||
|
|
||||||
acls = [mock_acl, mock_acl]
|
|
||||||
good_acls = self._utils._filter_acls(
|
|
||||||
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR,
|
|
||||||
self._FAKE_ACL_TYPE, self._FAKE_REMOTE_ADDR)
|
|
||||||
bad_acls = self._utils._filter_acls(
|
|
||||||
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE)
|
|
||||||
|
|
||||||
self.assertEqual(acls, good_acls)
|
|
||||||
self.assertEqual([], bad_acls)
|
|
||||||
|
|
||||||
|
|
||||||
class TestHyperVUtilsV2R2(base.BaseTestCase):
|
|
||||||
_FAKE_ACL_ACT = 'fake_acl_action'
|
|
||||||
_FAKE_ACL_DIR = 'fake_direction'
|
|
||||||
_FAKE_ACL_TYPE = 'fake_acl_type'
|
|
||||||
_FAKE_LOCAL_PORT = 'fake_local_port'
|
|
||||||
_FAKE_PROTOCOL = 'fake_port_protocol'
|
|
||||||
_FAKE_REMOTE_ADDR = '10.0.0.0/0'
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestHyperVUtilsV2R2, self).setUp()
|
|
||||||
self._utils = utilsv2.HyperVUtilsV2R2()
|
|
||||||
|
|
||||||
def test_filter_security_acls(self):
|
|
||||||
self._test_filter_security_acls(
|
|
||||||
self._FAKE_LOCAL_PORT, self._FAKE_PROTOCOL, self._FAKE_REMOTE_ADDR)
|
|
||||||
|
|
||||||
def test_filter_security_acls_default(self):
|
|
||||||
default = self._utils._ACL_DEFAULT
|
|
||||||
self._test_filter_security_acls(
|
|
||||||
default, default, self._FAKE_REMOTE_ADDR)
|
|
||||||
|
|
||||||
def _test_filter_security_acls(self, local_port, protocol, remote_addr):
|
|
||||||
acls = []
|
|
||||||
default = self._utils._ACL_DEFAULT
|
|
||||||
for port, proto in [(default, default), (local_port, protocol)]:
|
|
||||||
mock_acl = mock.MagicMock()
|
|
||||||
mock_acl.Action = self._utils._ACL_ACTION_ALLOW
|
|
||||||
mock_acl.Direction = self._FAKE_ACL_DIR
|
|
||||||
mock_acl.LocalPort = port
|
|
||||||
mock_acl.Protocol = proto
|
|
||||||
mock_acl.RemoteIPAddress = remote_addr
|
|
||||||
acls.append(mock_acl)
|
|
||||||
|
|
||||||
right_acls = [a for a in acls if a.LocalPort == local_port]
|
|
||||||
|
|
||||||
good_acls = self._utils._filter_security_acls(
|
|
||||||
acls, mock_acl.Action, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
|
|
||||||
local_port, protocol, remote_addr)
|
|
||||||
bad_acls = self._utils._filter_security_acls(
|
|
||||||
acls, self._FAKE_ACL_ACT, self._FAKE_ACL_DIR, self._FAKE_ACL_TYPE,
|
|
||||||
local_port, protocol, remote_addr)
|
|
||||||
|
|
||||||
self.assertEqual(right_acls, good_acls)
|
|
||||||
self.assertEqual([], bad_acls)
|
|
||||||
|
|
||||||
def test_get_new_weight(self):
|
|
||||||
mockacl1 = mock.MagicMock()
|
|
||||||
mockacl1.Weight = self._utils._MAX_WEIGHT - 1
|
|
||||||
mockacl2 = mock.MagicMock()
|
|
||||||
mockacl2.Weight = self._utils._MAX_WEIGHT - 3
|
|
||||||
self.assertEqual(self._utils._MAX_WEIGHT - 2,
|
|
||||||
self._utils._get_new_weight([mockacl1, mockacl2]))
|
|
||||||
|
|
||||||
def test_get_new_weight_no_acls(self):
|
|
||||||
self.assertEqual(self._utils._MAX_WEIGHT - 1,
|
|
||||||
self._utils._get_new_weight([]))
|
|
||||||
|
|
||||||
def test_get_new_weight_default_acls(self):
|
|
||||||
mockacl1 = mock.MagicMock()
|
|
||||||
mockacl1.Weight = self._utils._MAX_WEIGHT - 1
|
|
||||||
mockacl2 = mock.MagicMock()
|
|
||||||
mockacl2.Weight = self._utils._MAX_WEIGHT - 2
|
|
||||||
mockacl2.Action = self._utils._ACL_ACTION_DENY
|
|
||||||
|
|
||||||
self.assertEqual(self._utils._MAX_WEIGHT - 2,
|
|
||||||
self._utils._get_new_weight([mockacl1, mockacl2]))
|
|
|
@ -1,118 +0,0 @@
|
||||||
# Copyright 2014 IBM Corp.
|
|
||||||
#
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Mohammad Banikazemi, IBM Corp
|
|
||||||
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.agent.linux import ip_lib
|
|
||||||
from neutron.plugins.ibm.agent import sdnve_neutron_agent
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
NOTIFIER = ('neutron.plugins.ibm.'
|
|
||||||
'sdnve_neutron_plugin.AgentNotifierApi')
|
|
||||||
|
|
||||||
|
|
||||||
class CreateAgentConfigMap(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_create_agent_config_map_succeeds(self):
|
|
||||||
self.assertTrue(sdnve_neutron_agent.create_agent_config_map(cfg.CONF))
|
|
||||||
|
|
||||||
def test_create_agent_config_using_controller_ips(self):
|
|
||||||
cfg.CONF.set_override('controller_ips',
|
|
||||||
['10.10.10.1', '10.10.10.2'], group='SDNVE')
|
|
||||||
cfgmap = sdnve_neutron_agent.create_agent_config_map(cfg.CONF)
|
|
||||||
self.assertEqual(cfgmap['controller_ip'], '10.10.10.1')
|
|
||||||
|
|
||||||
def test_create_agent_config_using_interface_mappings(self):
|
|
||||||
cfg.CONF.set_override('interface_mappings',
|
|
||||||
['interface1 : eth1', 'interface2 : eth2'],
|
|
||||||
group='SDNVE')
|
|
||||||
cfgmap = sdnve_neutron_agent.create_agent_config_map(cfg.CONF)
|
|
||||||
self.assertEqual(cfgmap['interface_mappings'],
|
|
||||||
{'interface1': 'eth1', 'interface2': 'eth2'})
|
|
||||||
|
|
||||||
|
|
||||||
class TestSdnveNeutronAgent(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestSdnveNeutronAgent, self).setUp()
|
|
||||||
notifier_p = mock.patch(NOTIFIER)
|
|
||||||
notifier_cls = notifier_p.start()
|
|
||||||
self.notifier = mock.Mock()
|
|
||||||
notifier_cls.return_value = self.notifier
|
|
||||||
# Avoid rpc initialization for unit tests
|
|
||||||
cfg.CONF.set_override('rpc_backend',
|
|
||||||
'neutron.openstack.common.rpc.impl_fake')
|
|
||||||
cfg.CONF.set_override('integration_bridge',
|
|
||||||
'br_int', group='SDNVE')
|
|
||||||
kwargs = sdnve_neutron_agent.create_agent_config_map(cfg.CONF)
|
|
||||||
|
|
||||||
class MockFixedIntervalLoopingCall(object):
|
|
||||||
def __init__(self, f):
|
|
||||||
self.f = f
|
|
||||||
|
|
||||||
def start(self, interval=0):
|
|
||||||
self.f()
|
|
||||||
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch('neutron.plugins.ibm.agent.sdnve_neutron_agent.'
|
|
||||||
'SdnveNeutronAgent.setup_integration_br',
|
|
||||||
return_value=mock.Mock()),
|
|
||||||
mock.patch('neutron.openstack.common.loopingcall.'
|
|
||||||
'FixedIntervalLoopingCall',
|
|
||||||
new=MockFixedIntervalLoopingCall)):
|
|
||||||
self.agent = sdnve_neutron_agent.SdnveNeutronAgent(**kwargs)
|
|
||||||
|
|
||||||
def test_setup_physical_interfaces(self):
|
|
||||||
with mock.patch.object(self.agent.int_br,
|
|
||||||
'add_port') as add_port_func:
|
|
||||||
with mock.patch.object(ip_lib,
|
|
||||||
'device_exists',
|
|
||||||
return_valxue=True):
|
|
||||||
self.agent.setup_physical_interfaces({"interface1": "eth1"})
|
|
||||||
add_port_func.assert_called_once_with('eth1')
|
|
||||||
|
|
||||||
def test_setup_physical_interfaces_none(self):
|
|
||||||
with mock.patch.object(self.agent.int_br,
|
|
||||||
'add_port') as add_port_func:
|
|
||||||
with mock.patch.object(ip_lib,
|
|
||||||
'device_exists',
|
|
||||||
return_valxue=True):
|
|
||||||
self.agent.setup_physical_interfaces({})
|
|
||||||
self.assertFalse(add_port_func.called)
|
|
||||||
|
|
||||||
def test_get_info_set_controller(self):
|
|
||||||
with mock.patch.object(self.agent.int_br,
|
|
||||||
'run_vsctl') as run_vsctl_func:
|
|
||||||
kwargs = {}
|
|
||||||
kwargs['info'] = {'new_controller': '10.10.10.1'}
|
|
||||||
self.agent.info_update('dummy', **kwargs)
|
|
||||||
run_vsctl_func.assert_called_once_with(['set-controller',
|
|
||||||
'br_int',
|
|
||||||
'tcp:10.10.10.1'])
|
|
||||||
|
|
||||||
def test_get_info(self):
|
|
||||||
with mock.patch.object(self.agent.int_br,
|
|
||||||
'run_vsctl') as run_vsctl_func:
|
|
||||||
kwargs = {}
|
|
||||||
self.agent.info_update('dummy', **kwargs)
|
|
||||||
self.assertFalse(run_vsctl_func.called)
|
|
|
@ -1,145 +0,0 @@
|
||||||
# Copyright 2014 IBM Corp.
|
|
||||||
#
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Mohammad Banikazemi, IBM Corp
|
|
||||||
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.openstack.common import uuidutils
|
|
||||||
from neutron.plugins.ibm.common import constants
|
|
||||||
from neutron.plugins.ibm import sdnve_api
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
RESOURCE_PATH = {
|
|
||||||
'network': "ln/networks/",
|
|
||||||
}
|
|
||||||
RESOURCE = 'network'
|
|
||||||
HTTP_OK = 200
|
|
||||||
TENANT_ID = uuidutils.generate_uuid()
|
|
||||||
|
|
||||||
|
|
||||||
class TestSdnveApi(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestSdnveApi, self).setUp()
|
|
||||||
|
|
||||||
class MockKeystoneClient(object):
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_tenant_name(self, id):
|
|
||||||
return 'test tenant name'
|
|
||||||
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'KeystoneClient',
|
|
||||||
new=MockKeystoneClient):
|
|
||||||
self.api = sdnve_api.Client()
|
|
||||||
|
|
||||||
def mock_do_request(self, method, url, body=None, headers=None,
|
|
||||||
params=None, connection_type=None):
|
|
||||||
return (HTTP_OK, url)
|
|
||||||
|
|
||||||
def mock_do_request_tenant(self, method, url, body=None, headers=None,
|
|
||||||
params=None, connection_type=None):
|
|
||||||
return (HTTP_OK, {'id': TENANT_ID,
|
|
||||||
'network_type': constants.TENANT_TYPE_OF})
|
|
||||||
|
|
||||||
def mock_do_request_no_tenant(self, method, url, body=None, headers=None,
|
|
||||||
params=None, connection_type=None):
|
|
||||||
return (None, None)
|
|
||||||
|
|
||||||
def mock_process_request(self, body):
|
|
||||||
return body
|
|
||||||
|
|
||||||
def test_sdnve_api_list(self):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.do_request',
|
|
||||||
new=self.mock_do_request):
|
|
||||||
result = self.api.sdnve_list(RESOURCE)
|
|
||||||
self.assertEqual(result, (HTTP_OK, RESOURCE_PATH[RESOURCE]))
|
|
||||||
|
|
||||||
def test_sdnve_api_show(self):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.do_request',
|
|
||||||
new=self.mock_do_request):
|
|
||||||
result = self.api.sdnve_show(RESOURCE, TENANT_ID)
|
|
||||||
self.assertEqual(result,
|
|
||||||
(HTTP_OK, RESOURCE_PATH[RESOURCE] + TENANT_ID))
|
|
||||||
|
|
||||||
def test_sdnve_api_create(self):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.do_request',
|
|
||||||
new=self.mock_do_request):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.process_request',
|
|
||||||
new=self.mock_process_request):
|
|
||||||
result = self.api.sdnve_create(RESOURCE, '')
|
|
||||||
self.assertEqual(result, (HTTP_OK, RESOURCE_PATH[RESOURCE]))
|
|
||||||
|
|
||||||
def test_sdnve_api_update(self):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.do_request',
|
|
||||||
new=self.mock_do_request):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.process_request',
|
|
||||||
new=self.mock_process_request):
|
|
||||||
result = self.api.sdnve_update(RESOURCE, TENANT_ID, '')
|
|
||||||
self.assertEqual(result,
|
|
||||||
(HTTP_OK,
|
|
||||||
RESOURCE_PATH[RESOURCE] + TENANT_ID))
|
|
||||||
|
|
||||||
def test_sdnve_api_delete(self):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.do_request',
|
|
||||||
new=self.mock_do_request):
|
|
||||||
result = self.api.sdnve_delete(RESOURCE, TENANT_ID)
|
|
||||||
self.assertEqual(result,
|
|
||||||
(HTTP_OK, RESOURCE_PATH[RESOURCE] + TENANT_ID))
|
|
||||||
|
|
||||||
def test_sdnve_get_tenant_by_id(self):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.do_request',
|
|
||||||
new=self.mock_do_request_tenant):
|
|
||||||
id = TENANT_ID
|
|
||||||
result = self.api.sdnve_get_tenant_byid(id)
|
|
||||||
self.assertEqual(result,
|
|
||||||
(TENANT_ID, constants.TENANT_TYPE_OF))
|
|
||||||
|
|
||||||
def test_sdnve_check_and_create_tenant(self):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.do_request',
|
|
||||||
new=self.mock_do_request_tenant):
|
|
||||||
id = TENANT_ID
|
|
||||||
result = self.api.sdnve_check_and_create_tenant(id)
|
|
||||||
self.assertEqual(result, TENANT_ID)
|
|
||||||
|
|
||||||
def test_sdnve_check_and_create_tenant_fail(self):
|
|
||||||
with mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client.do_request',
|
|
||||||
new=self.mock_do_request_no_tenant):
|
|
||||||
id = TENANT_ID
|
|
||||||
result = self.api.sdnve_check_and_create_tenant(
|
|
||||||
id, constants.TENANT_TYPE_OF)
|
|
||||||
self.assertIsNone(result)
|
|
||||||
|
|
||||||
def test_process_request(self):
|
|
||||||
my_request = {'key_1': 'value_1', 'router:external': 'True',
|
|
||||||
'key_2': 'value_2'}
|
|
||||||
expected = {'key_1': 'value_1', 'router_external': 'True',
|
|
||||||
'key_2': 'value_2'}
|
|
||||||
result = self.api.process_request(my_request)
|
|
||||||
self.assertEqual(expected, result)
|
|
|
@ -1,126 +0,0 @@
|
||||||
# Copyright 2014 IBM Corp.
|
|
||||||
#
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Mohammad Banikazemi, IBM Corp
|
|
||||||
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron.tests.unit import _test_extension_portbindings as test_bindings
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
from neutron.tests.unit import test_l3_plugin as test_l3_plugin
|
|
||||||
|
|
||||||
from neutron.plugins.ibm.common import constants
|
|
||||||
|
|
||||||
|
|
||||||
_plugin_name = ('neutron.plugins.ibm.'
|
|
||||||
'sdnve_neutron_plugin.SdnvePluginV2')
|
|
||||||
HTTP_OK = 200
|
|
||||||
|
|
||||||
|
|
||||||
class MockClient(object):
|
|
||||||
def sdnve_list(self, resource, **params):
|
|
||||||
return (HTTP_OK, 'body')
|
|
||||||
|
|
||||||
def sdnve_show(self, resource, specific, **params):
|
|
||||||
return (HTTP_OK, 'body')
|
|
||||||
|
|
||||||
def sdnve_create(self, resource, body):
|
|
||||||
return (HTTP_OK, 'body')
|
|
||||||
|
|
||||||
def sdnve_update(self, resource, specific, body=None):
|
|
||||||
return (HTTP_OK, 'body')
|
|
||||||
|
|
||||||
def sdnve_delete(self, resource, specific):
|
|
||||||
return (HTTP_OK, 'body')
|
|
||||||
|
|
||||||
def sdnve_get_tenant_byid(self, os_tenant_id):
|
|
||||||
return (os_tenant_id, constants.TENANT_TYPE_OF)
|
|
||||||
|
|
||||||
def sdnve_check_and_create_tenant(
|
|
||||||
self, os_tenant_id, network_type=None):
|
|
||||||
return os_tenant_id
|
|
||||||
|
|
||||||
def sdnve_get_controller(self):
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
class MockKeystoneClient(object):
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_tenant_type(self, id):
|
|
||||||
return constants.TENANT_TYPE_OF
|
|
||||||
|
|
||||||
def get_tenant_name(self, id):
|
|
||||||
return "tenant name"
|
|
||||||
|
|
||||||
|
|
||||||
class IBMPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'KeystoneClient',
|
|
||||||
new=MockKeystoneClient),
|
|
||||||
mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client',
|
|
||||||
new=MockClient)):
|
|
||||||
super(IBMPluginV2TestCase, self).setUp(plugin=_plugin_name)
|
|
||||||
|
|
||||||
|
|
||||||
class TestIBMBasicGet(test_plugin.TestBasicGet,
|
|
||||||
IBMPluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestIBMV2HTTPResponse(test_plugin.TestV2HTTPResponse,
|
|
||||||
IBMPluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestIBMNetworksV2(test_plugin.TestNetworksV2,
|
|
||||||
IBMPluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestIBMPortsV2(test_plugin.TestPortsV2,
|
|
||||||
IBMPluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestIBMSubnetsV2(test_plugin.TestSubnetsV2,
|
|
||||||
IBMPluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestIBMPortBinding(IBMPluginV2TestCase,
|
|
||||||
test_bindings.PortBindingsTestCase):
|
|
||||||
VIF_TYPE = portbindings.VIF_TYPE_OVS
|
|
||||||
|
|
||||||
|
|
||||||
class IBMPluginRouterTestCase(test_l3_plugin.L3NatDBIntTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'KeystoneClient',
|
|
||||||
new=MockKeystoneClient),
|
|
||||||
mock.patch('neutron.plugins.ibm.sdnve_api.'
|
|
||||||
'Client',
|
|
||||||
new=MockClient)):
|
|
||||||
super(IBMPluginRouterTestCase, self).setUp(plugin=_plugin_name)
|
|
|
@ -1,16 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,34 +0,0 @@
|
||||||
# Copyright (c) 2013 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from neutron.tests.unit.linuxbridge import test_linuxbridge_plugin
|
|
||||||
from neutron.tests.unit.openvswitch import test_agent_scheduler
|
|
||||||
|
|
||||||
|
|
||||||
class LbAgentSchedulerTestCase(
|
|
||||||
test_agent_scheduler.OvsAgentSchedulerTestCase):
|
|
||||||
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
|
|
||||||
l3_plugin = None
|
|
||||||
|
|
||||||
|
|
||||||
class LbL3AgentNotifierTestCase(
|
|
||||||
test_agent_scheduler.OvsL3AgentNotifierTestCase):
|
|
||||||
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
|
|
||||||
l3_plugin = None
|
|
||||||
|
|
||||||
|
|
||||||
class LbDhcpAgentNotifierTestCase(
|
|
||||||
test_agent_scheduler.OvsDhcpAgentNotifierTestCase):
|
|
||||||
plugin_str = test_linuxbridge_plugin.PLUGIN_NAME
|
|
|
@ -1,42 +0,0 @@
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.plugins.linuxbridge.common import config # noqa
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class ConfigurationTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_defaults(self):
|
|
||||||
self.assertEqual(2,
|
|
||||||
cfg.CONF.AGENT.polling_interval)
|
|
||||||
self.assertEqual(False,
|
|
||||||
cfg.CONF.AGENT.rpc_support_old_agents)
|
|
||||||
self.assertEqual('sudo',
|
|
||||||
cfg.CONF.AGENT.root_helper)
|
|
||||||
self.assertEqual('local',
|
|
||||||
cfg.CONF.VLANS.tenant_network_type)
|
|
||||||
self.assertEqual(0,
|
|
||||||
len(cfg.CONF.VLANS.network_vlan_ranges))
|
|
||||||
self.assertEqual(0,
|
|
||||||
len(cfg.CONF.LINUX_BRIDGE.
|
|
||||||
physical_interface_mappings))
|
|
||||||
self.assertEqual(False, cfg.CONF.VXLAN.enable_vxlan)
|
|
||||||
self.assertEqual(config.DEFAULT_VXLAN_GROUP,
|
|
||||||
cfg.CONF.VXLAN.vxlan_group)
|
|
||||||
self.assertEqual(0, len(cfg.CONF.VXLAN.local_ip))
|
|
||||||
self.assertEqual(False, cfg.CONF.VXLAN.l2_population)
|
|
|
@ -1,172 +0,0 @@
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
from six import moves
|
|
||||||
import testtools
|
|
||||||
from testtools import matchers
|
|
||||||
|
|
||||||
from neutron.common import exceptions as n_exc
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.plugins.linuxbridge.db import l2network_db_v2 as lb_db
|
|
||||||
from neutron.tests import base
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
|
|
||||||
PHYS_NET = 'physnet1'
|
|
||||||
PHYS_NET_2 = 'physnet2'
|
|
||||||
VLAN_MIN = 10
|
|
||||||
VLAN_MAX = 19
|
|
||||||
VLAN_RANGES = {PHYS_NET: [(VLAN_MIN, VLAN_MAX)]}
|
|
||||||
UPDATED_VLAN_RANGES = {PHYS_NET: [(VLAN_MIN + 5, VLAN_MAX + 5)],
|
|
||||||
PHYS_NET_2: [(VLAN_MIN + 20, VLAN_MAX + 20)]}
|
|
||||||
|
|
||||||
PLUGIN_NAME = ('neutron.plugins.linuxbridge.'
|
|
||||||
'lb_neutron_plugin.LinuxBridgePluginV2')
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkStatesTest(base.BaseTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(NetworkStatesTest, self).setUp()
|
|
||||||
db.configure_db()
|
|
||||||
lb_db.sync_network_states(VLAN_RANGES)
|
|
||||||
self.session = db.get_session()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
|
|
||||||
def test_sync_network_states(self):
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN - 1))
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN + 1).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX - 1).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX).allocated)
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX + 1))
|
|
||||||
|
|
||||||
lb_db.sync_network_states(UPDATED_VLAN_RANGES)
|
|
||||||
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN + 5 - 1))
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN + 5).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN + 5 + 1).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX + 5 - 1).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX + 5).allocated)
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX + 5 + 1))
|
|
||||||
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET_2,
|
|
||||||
VLAN_MIN + 20 - 1))
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET_2,
|
|
||||||
VLAN_MIN + 20).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET_2,
|
|
||||||
VLAN_MIN + 20 + 1).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET_2,
|
|
||||||
VLAN_MAX + 20 - 1).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET_2,
|
|
||||||
VLAN_MAX + 20).allocated)
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET_2,
|
|
||||||
VLAN_MAX + 20 + 1))
|
|
||||||
|
|
||||||
lb_db.sync_network_states(VLAN_RANGES)
|
|
||||||
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN - 1))
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MIN + 1).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX - 1).allocated)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX).allocated)
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET,
|
|
||||||
VLAN_MAX + 1))
|
|
||||||
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET_2,
|
|
||||||
VLAN_MIN + 20))
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET_2,
|
|
||||||
VLAN_MAX + 20))
|
|
||||||
|
|
||||||
def test_network_pool(self):
|
|
||||||
vlan_ids = set()
|
|
||||||
for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1):
|
|
||||||
physical_network, vlan_id = lb_db.reserve_network(self.session)
|
|
||||||
self.assertEqual(physical_network, PHYS_NET)
|
|
||||||
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
|
|
||||||
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
|
|
||||||
vlan_ids.add(vlan_id)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(n_exc.NoNetworkAvailable):
|
|
||||||
physical_network, vlan_id = lb_db.reserve_network(self.session)
|
|
||||||
|
|
||||||
for vlan_id in vlan_ids:
|
|
||||||
lb_db.release_network(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
|
|
||||||
|
|
||||||
def test_specific_network_inside_pool(self):
|
|
||||||
vlan_id = VLAN_MIN + 5
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
vlan_id).allocated)
|
|
||||||
lb_db.reserve_specific_network(self.session, PHYS_NET, vlan_id)
|
|
||||||
self.assertTrue(lb_db.get_network_state(PHYS_NET,
|
|
||||||
vlan_id).allocated)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(n_exc.VlanIdInUse):
|
|
||||||
lb_db.reserve_specific_network(self.session, PHYS_NET, vlan_id)
|
|
||||||
|
|
||||||
lb_db.release_network(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
|
|
||||||
self.assertFalse(lb_db.get_network_state(PHYS_NET,
|
|
||||||
vlan_id).allocated)
|
|
||||||
|
|
||||||
def test_specific_network_outside_pool(self):
|
|
||||||
vlan_id = VLAN_MAX + 5
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET, vlan_id))
|
|
||||||
lb_db.reserve_specific_network(self.session, PHYS_NET, vlan_id)
|
|
||||||
self.assertTrue(lb_db.get_network_state(PHYS_NET,
|
|
||||||
vlan_id).allocated)
|
|
||||||
|
|
||||||
with testtools.ExpectedException(n_exc.VlanIdInUse):
|
|
||||||
lb_db.reserve_specific_network(self.session, PHYS_NET, vlan_id)
|
|
||||||
|
|
||||||
lb_db.release_network(self.session, PHYS_NET, vlan_id, VLAN_RANGES)
|
|
||||||
self.assertIsNone(lb_db.get_network_state(PHYS_NET, vlan_id))
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
cfg.CONF.set_override('network_vlan_ranges', ['physnet1:1000:2999'],
|
|
||||||
group='VLANS')
|
|
||||||
super(NetworkBindingsTest, self).setUp(plugin=PLUGIN_NAME)
|
|
||||||
db.configure_db()
|
|
||||||
self.session = db.get_session()
|
|
||||||
|
|
||||||
def test_add_network_binding(self):
|
|
||||||
params = {'provider:network_type': 'vlan',
|
|
||||||
'provider:physical_network': PHYS_NET,
|
|
||||||
'provider:segmentation_id': 1234}
|
|
||||||
params['arg_list'] = tuple(params.keys())
|
|
||||||
with self.network(**params) as network:
|
|
||||||
TEST_NETWORK_ID = network['network']['id']
|
|
||||||
binding = lb_db.get_network_binding(self.session, TEST_NETWORK_ID)
|
|
||||||
self.assertIsNotNone(binding)
|
|
||||||
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
|
|
||||||
self.assertEqual(binding.physical_network, PHYS_NET)
|
|
||||||
self.assertEqual(binding.vlan_id, 1234)
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,99 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.api.v2 import attributes
|
|
||||||
from neutron.extensions import securitygroup as ext_sg
|
|
||||||
from neutron.plugins.linuxbridge.db import l2network_db_v2 as lb_db
|
|
||||||
from neutron.tests.unit import test_extension_security_group as test_sg
|
|
||||||
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
|
|
||||||
|
|
||||||
|
|
||||||
PLUGIN_NAME = ('neutron.plugins.linuxbridge.'
|
|
||||||
'lb_neutron_plugin.LinuxBridgePluginV2')
|
|
||||||
NOTIFIER = ('neutron.plugins.linuxbridge.'
|
|
||||||
'lb_neutron_plugin.AgentNotifierApi')
|
|
||||||
|
|
||||||
|
|
||||||
class LinuxBridgeSecurityGroupsTestCase(test_sg.SecurityGroupDBTestCase):
|
|
||||||
_plugin_name = PLUGIN_NAME
|
|
||||||
|
|
||||||
def setUp(self, plugin=None):
|
|
||||||
test_sg_rpc.set_firewall_driver(test_sg_rpc.FIREWALL_IPTABLES_DRIVER)
|
|
||||||
notifier_p = mock.patch(NOTIFIER)
|
|
||||||
notifier_cls = notifier_p.start()
|
|
||||||
self.notifier = mock.Mock()
|
|
||||||
notifier_cls.return_value = self.notifier
|
|
||||||
self._attribute_map_bk_ = {}
|
|
||||||
for item in attributes.RESOURCE_ATTRIBUTE_MAP:
|
|
||||||
self._attribute_map_bk_[item] = (attributes.
|
|
||||||
RESOURCE_ATTRIBUTE_MAP[item].
|
|
||||||
copy())
|
|
||||||
super(LinuxBridgeSecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
attributes.RESOURCE_ATTRIBUTE_MAP = self._attribute_map_bk_
|
|
||||||
super(LinuxBridgeSecurityGroupsTestCase, self).tearDown()
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgeSecurityGroups(LinuxBridgeSecurityGroupsTestCase,
|
|
||||||
test_sg.TestSecurityGroups,
|
|
||||||
test_sg_rpc.SGNotificationTestMixin):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgeSecurityGroupsXML(TestLinuxBridgeSecurityGroups):
|
|
||||||
fmt = 'xml'
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgeSecurityGroupsDB(LinuxBridgeSecurityGroupsTestCase):
|
|
||||||
def test_security_group_get_port_from_device(self):
|
|
||||||
with self.network() as n:
|
|
||||||
with self.subnet(n):
|
|
||||||
with self.security_group() as sg:
|
|
||||||
security_group_id = sg['security_group']['id']
|
|
||||||
res = self._create_port(self.fmt, n['network']['id'])
|
|
||||||
port = self.deserialize(self.fmt, res)
|
|
||||||
fixed_ips = port['port']['fixed_ips']
|
|
||||||
data = {'port': {'fixed_ips': fixed_ips,
|
|
||||||
'name': port['port']['name'],
|
|
||||||
ext_sg.SECURITYGROUPS:
|
|
||||||
[security_group_id]}}
|
|
||||||
|
|
||||||
req = self.new_update_request('ports', data,
|
|
||||||
port['port']['id'])
|
|
||||||
res = self.deserialize(self.fmt,
|
|
||||||
req.get_response(self.api))
|
|
||||||
port_id = res['port']['id']
|
|
||||||
device_id = port_id[:8]
|
|
||||||
port_dict = lb_db.get_port_from_device(device_id)
|
|
||||||
self.assertEqual(port_id, port_dict['id'])
|
|
||||||
self.assertEqual([security_group_id],
|
|
||||||
port_dict[ext_sg.SECURITYGROUPS])
|
|
||||||
self.assertEqual([], port_dict['security_group_rules'])
|
|
||||||
self.assertEqual([fixed_ips[0]['ip_address']],
|
|
||||||
port_dict['fixed_ips'])
|
|
||||||
self._delete('ports', port['port']['id'])
|
|
||||||
|
|
||||||
def test_security_group_get_port_from_device_with_no_port(self):
|
|
||||||
port_dict = lb_db.get_port_from_device('bad_device_id')
|
|
||||||
self.assertIsNone(port_dict)
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgeSecurityGroupsDBXML(TestLinuxBridgeSecurityGroupsDB):
|
|
||||||
fmt = 'xml'
|
|
|
@ -1,132 +0,0 @@
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.common import constants as q_const
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron import manager
|
|
||||||
from neutron.plugins.linuxbridge import lb_neutron_plugin
|
|
||||||
from neutron.tests.unit import _test_extension_portbindings as test_bindings
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
from neutron.tests.unit import test_security_groups_rpc as test_sg_rpc
|
|
||||||
|
|
||||||
PLUGIN_NAME = ('neutron.plugins.linuxbridge.'
|
|
||||||
'lb_neutron_plugin.LinuxBridgePluginV2')
|
|
||||||
|
|
||||||
|
|
||||||
class LinuxBridgePluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
_plugin_name = PLUGIN_NAME
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(LinuxBridgePluginV2TestCase, self).setUp(PLUGIN_NAME)
|
|
||||||
self.port_create_status = 'DOWN'
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgeBasicGet(test_plugin.TestBasicGet,
|
|
||||||
LinuxBridgePluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgeV2HTTPResponse(test_plugin.TestV2HTTPResponse,
|
|
||||||
LinuxBridgePluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgeNetworksV2(test_plugin.TestNetworksV2,
|
|
||||||
LinuxBridgePluginV2TestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgePortsV2(test_plugin.TestPortsV2,
|
|
||||||
LinuxBridgePluginV2TestCase):
|
|
||||||
|
|
||||||
def test_update_port_status_build(self):
|
|
||||||
with self.port() as port:
|
|
||||||
self.assertEqual(port['port']['status'], 'DOWN')
|
|
||||||
self.assertEqual(self.port_create_status, 'DOWN')
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgePortBinding(LinuxBridgePluginV2TestCase,
|
|
||||||
test_bindings.PortBindingsTestCase):
|
|
||||||
VIF_TYPE = portbindings.VIF_TYPE_BRIDGE
|
|
||||||
HAS_PORT_FILTER = True
|
|
||||||
ENABLE_SG = True
|
|
||||||
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_IPTABLES_DRIVER
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
test_sg_rpc.set_firewall_driver(self.FIREWALL_DRIVER)
|
|
||||||
cfg.CONF.set_override(
|
|
||||||
'enable_security_group', self.ENABLE_SG,
|
|
||||||
group='SECURITYGROUP')
|
|
||||||
super(TestLinuxBridgePortBinding, self).setUp()
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgePortBindingNoSG(TestLinuxBridgePortBinding):
|
|
||||||
HAS_PORT_FILTER = False
|
|
||||||
ENABLE_SG = False
|
|
||||||
FIREWALL_DRIVER = test_sg_rpc.FIREWALL_NOOP_DRIVER
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgePortBindingHost(
|
|
||||||
LinuxBridgePluginV2TestCase,
|
|
||||||
test_bindings.PortBindingsHostTestCaseMixin):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestLinuxBridgePluginRpcCallbacks(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestLinuxBridgePluginRpcCallbacks, self).setUp(PLUGIN_NAME)
|
|
||||||
self.callbacks = lb_neutron_plugin.LinuxBridgeRpcCallbacks()
|
|
||||||
|
|
||||||
def test_update_device_down(self):
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch.object(self.callbacks, "get_port_from_device",
|
|
||||||
return_value=None),
|
|
||||||
mock.patch.object(manager.NeutronManager, "get_plugin")
|
|
||||||
) as (gpfd, gp):
|
|
||||||
self.assertEqual(
|
|
||||||
self.callbacks.update_device_down("fake_context",
|
|
||||||
agent_id="123",
|
|
||||||
device="device",
|
|
||||||
host="host"),
|
|
||||||
{'device': 'device', 'exists': False}
|
|
||||||
)
|
|
||||||
gpfd.return_value = {'id': 'fakeid',
|
|
||||||
'status': q_const.PORT_STATUS_ACTIVE}
|
|
||||||
self.assertEqual(
|
|
||||||
self.callbacks.update_device_down("fake_context",
|
|
||||||
agent_id="123",
|
|
||||||
device="device",
|
|
||||||
host="host"),
|
|
||||||
{'device': 'device', 'exists': True}
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_update_device_up(self):
|
|
||||||
with contextlib.nested(
|
|
||||||
mock.patch.object(self.callbacks, "get_port_from_device",
|
|
||||||
return_value=None),
|
|
||||||
mock.patch.object(manager.NeutronManager, "get_plugin")
|
|
||||||
) as (gpfd, gp):
|
|
||||||
gpfd.return_value = {'id': 'fakeid',
|
|
||||||
'status': q_const.PORT_STATUS_ACTIVE}
|
|
||||||
self.callbacks.update_device_up("fake_context",
|
|
||||||
agent_id="123",
|
|
||||||
device="device",
|
|
||||||
host="host")
|
|
||||||
gpfd.assert_called_once_with('device')
|
|
|
@ -1,132 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012, Red Hat, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Unit Tests for linuxbridge rpc
|
|
||||||
"""
|
|
||||||
|
|
||||||
import fixtures
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.agent import rpc as agent_rpc
|
|
||||||
from neutron.common import topics
|
|
||||||
from neutron.openstack.common import context
|
|
||||||
from neutron.plugins.linuxbridge import lb_neutron_plugin as plb
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class rpcApiTestCase(base.BaseTestCase):
|
|
||||||
def _test_lb_api(self, rpcapi, topic, method, rpc_method,
|
|
||||||
expected_msg=None, **kwargs):
|
|
||||||
ctxt = context.RequestContext('fake_user', 'fake_project')
|
|
||||||
expected_retval = 'foo' if method == 'call' else None
|
|
||||||
if not expected_msg:
|
|
||||||
expected_msg = rpcapi.make_msg(method, **kwargs)
|
|
||||||
if rpc_method == 'cast' and method == 'run_instance':
|
|
||||||
kwargs['call'] = False
|
|
||||||
|
|
||||||
self.fake_args = None
|
|
||||||
self.fake_kwargs = None
|
|
||||||
|
|
||||||
def _fake_rpc_method(*args, **kwargs):
|
|
||||||
self.fake_args = args
|
|
||||||
self.fake_kwargs = kwargs
|
|
||||||
if expected_retval:
|
|
||||||
return expected_retval
|
|
||||||
|
|
||||||
self.useFixture(fixtures.MonkeyPatch(
|
|
||||||
'neutron.common.rpc_compat.RpcProxy.' + rpc_method,
|
|
||||||
_fake_rpc_method))
|
|
||||||
|
|
||||||
retval = getattr(rpcapi, method)(ctxt, **kwargs)
|
|
||||||
|
|
||||||
self.assertEqual(expected_retval, retval)
|
|
||||||
expected_args = [ctxt, expected_msg]
|
|
||||||
expected_kwargs = {'topic': topic}
|
|
||||||
|
|
||||||
# skip the first argument which is 'self'
|
|
||||||
for arg, expected_arg in zip(self.fake_args[1:], expected_args):
|
|
||||||
self.assertEqual(expected_arg, arg)
|
|
||||||
self.assertEqual(expected_kwargs, self.fake_kwargs)
|
|
||||||
|
|
||||||
def test_delete_network(self):
|
|
||||||
rpcapi = plb.AgentNotifierApi(topics.AGENT)
|
|
||||||
self._test_lb_api(rpcapi,
|
|
||||||
topics.get_topic_name(topics.AGENT,
|
|
||||||
topics.NETWORK,
|
|
||||||
topics.DELETE),
|
|
||||||
'network_delete', rpc_method='fanout_cast',
|
|
||||||
network_id='fake_request_spec')
|
|
||||||
|
|
||||||
def test_port_update(self):
|
|
||||||
cfg.CONF.set_override('rpc_support_old_agents', False, 'AGENT')
|
|
||||||
rpcapi = plb.AgentNotifierApi(topics.AGENT)
|
|
||||||
expected_msg = rpcapi.make_msg('port_update',
|
|
||||||
port='fake_port',
|
|
||||||
network_type='vlan',
|
|
||||||
physical_network='fake_net',
|
|
||||||
segmentation_id='fake_vlan_id')
|
|
||||||
self._test_lb_api(rpcapi,
|
|
||||||
topics.get_topic_name(topics.AGENT,
|
|
||||||
topics.PORT,
|
|
||||||
topics.UPDATE),
|
|
||||||
'port_update', rpc_method='fanout_cast',
|
|
||||||
expected_msg=expected_msg,
|
|
||||||
port='fake_port',
|
|
||||||
physical_network='fake_net',
|
|
||||||
vlan_id='fake_vlan_id')
|
|
||||||
|
|
||||||
def test_port_update_old_agent(self):
|
|
||||||
cfg.CONF.set_override('rpc_support_old_agents', True, 'AGENT')
|
|
||||||
rpcapi = plb.AgentNotifierApi(topics.AGENT)
|
|
||||||
expected_msg = rpcapi.make_msg('port_update',
|
|
||||||
port='fake_port',
|
|
||||||
network_type='vlan',
|
|
||||||
physical_network='fake_net',
|
|
||||||
segmentation_id='fake_vlan_id',
|
|
||||||
vlan_id='fake_vlan_id')
|
|
||||||
self._test_lb_api(rpcapi,
|
|
||||||
topics.get_topic_name(topics.AGENT,
|
|
||||||
topics.PORT,
|
|
||||||
topics.UPDATE),
|
|
||||||
'port_update', rpc_method='fanout_cast',
|
|
||||||
expected_msg=expected_msg,
|
|
||||||
port='fake_port',
|
|
||||||
physical_network='fake_net',
|
|
||||||
vlan_id='fake_vlan_id')
|
|
||||||
|
|
||||||
def test_device_details(self):
|
|
||||||
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
|
|
||||||
self._test_lb_api(rpcapi, topics.PLUGIN,
|
|
||||||
'get_device_details', rpc_method='call',
|
|
||||||
device='fake_device',
|
|
||||||
agent_id='fake_agent_id')
|
|
||||||
|
|
||||||
def test_update_device_down(self):
|
|
||||||
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
|
|
||||||
self._test_lb_api(rpcapi, topics.PLUGIN,
|
|
||||||
'update_device_down', rpc_method='call',
|
|
||||||
device='fake_device',
|
|
||||||
agent_id='fake_agent_id',
|
|
||||||
host='fake_host')
|
|
||||||
|
|
||||||
def test_update_device_up(self):
|
|
||||||
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
|
|
||||||
self._test_lb_api(rpcapi, topics.PLUGIN,
|
|
||||||
'update_device_up', rpc_method='call',
|
|
||||||
device='fake_device',
|
|
||||||
agent_id='fake_agent_id',
|
|
||||||
host='fake_host')
|
|
|
@ -1,16 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,79 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
#
|
|
||||||
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from neutron.db import db_base_plugin_v2
|
|
||||||
from neutron.db import external_net_db
|
|
||||||
from neutron.db import l3_gwmode_db
|
|
||||||
|
|
||||||
|
|
||||||
class Fake1(db_base_plugin_v2.NeutronDbPluginV2,
|
|
||||||
external_net_db.External_net_db_mixin,
|
|
||||||
l3_gwmode_db.L3_NAT_db_mixin):
|
|
||||||
supported_extension_aliases = ['external-net', 'router']
|
|
||||||
|
|
||||||
def fake_func(self):
|
|
||||||
return 'fake1'
|
|
||||||
|
|
||||||
def create_network(self, context, network):
|
|
||||||
session = context.session
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
net = super(Fake1, self).create_network(context, network)
|
|
||||||
self._process_l3_create(context, net, network['network'])
|
|
||||||
return net
|
|
||||||
|
|
||||||
def update_network(self, context, id, network):
|
|
||||||
session = context.session
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
net = super(Fake1, self).update_network(context, id,
|
|
||||||
network)
|
|
||||||
self._process_l3_update(context, net, network['network'])
|
|
||||||
return net
|
|
||||||
|
|
||||||
def delete_network(self, context, id):
|
|
||||||
session = context.session
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
self._process_l3_delete(context, id)
|
|
||||||
return super(Fake1, self).delete_network(context, id)
|
|
||||||
|
|
||||||
def create_port(self, context, port):
|
|
||||||
port = super(Fake1, self).create_port(context, port)
|
|
||||||
return port
|
|
||||||
|
|
||||||
def create_subnet(self, context, subnet):
|
|
||||||
subnet = super(Fake1, self).create_subnet(context, subnet)
|
|
||||||
return subnet
|
|
||||||
|
|
||||||
def update_port(self, context, id, port):
|
|
||||||
port = super(Fake1, self).update_port(context, id, port)
|
|
||||||
return port
|
|
||||||
|
|
||||||
def delete_port(self, context, id, l3_port_check=True):
|
|
||||||
if l3_port_check:
|
|
||||||
self.prevent_l3_port_deletion(context, id)
|
|
||||||
self.disassociate_floatingips(context, id)
|
|
||||||
return super(Fake1, self).delete_port(context, id)
|
|
||||||
|
|
||||||
|
|
||||||
class Fake2(Fake1):
|
|
||||||
def fake_func(self):
|
|
||||||
return 'fake2'
|
|
||||||
|
|
||||||
def fake_func2(self):
|
|
||||||
return 'fake2'
|
|
||||||
|
|
||||||
def start_rpc_listeners(self):
|
|
||||||
# return value is only used to confirm this method was called.
|
|
||||||
return 'OK'
|
|
|
@ -1,78 +0,0 @@
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from neutron.tests.unit.metaplugin import test_metaplugin
|
|
||||||
from neutron.tests.unit import test_db_plugin as test_plugin
|
|
||||||
from neutron.tests.unit import test_l3_plugin
|
|
||||||
|
|
||||||
|
|
||||||
class MetaPluginV2DBTestCase(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
|
|
||||||
_plugin_name = ('neutron.plugins.metaplugin.'
|
|
||||||
'meta_neutron_plugin.MetaPluginV2')
|
|
||||||
|
|
||||||
def setUp(self, plugin=None, ext_mgr=None,
|
|
||||||
service_plugins=None):
|
|
||||||
# NOTE(salv-orlando): The plugin keyword argument is ignored,
|
|
||||||
# as this class will always invoke super with self._plugin_name.
|
|
||||||
# These keyword parameters ensure setUp methods always have the
|
|
||||||
# same signature.
|
|
||||||
test_metaplugin.setup_metaplugin_conf()
|
|
||||||
ext_mgr = ext_mgr or test_l3_plugin.L3TestExtensionManager()
|
|
||||||
self.addCleanup(test_metaplugin.unregister_meta_hooks)
|
|
||||||
super(MetaPluginV2DBTestCase, self).setUp(
|
|
||||||
plugin=self._plugin_name, ext_mgr=ext_mgr,
|
|
||||||
service_plugins=service_plugins)
|
|
||||||
|
|
||||||
|
|
||||||
class TestMetaBasicGet(test_plugin.TestBasicGet,
|
|
||||||
MetaPluginV2DBTestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestMetaV2HTTPResponse(test_plugin.TestV2HTTPResponse,
|
|
||||||
MetaPluginV2DBTestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestMetaPortsV2(test_plugin.TestPortsV2,
|
|
||||||
MetaPluginV2DBTestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestMetaNetworksV2(test_plugin.TestNetworksV2,
|
|
||||||
MetaPluginV2DBTestCase):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestMetaSubnetsV2(test_plugin.TestSubnetsV2,
|
|
||||||
MetaPluginV2DBTestCase):
|
|
||||||
#TODO(nati) This test fails if we run all test, but It success just one
|
|
||||||
def test_update_subnet_route(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_update_subnet_dns_to_None(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_update_subnet_route_to_None(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_update_subnet_dns(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestMetaL3NatDBTestCase(test_l3_plugin.L3NatDBIntTestCase,
|
|
||||||
MetaPluginV2DBTestCase):
|
|
||||||
pass
|
|
|
@ -1,404 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
import testtools
|
|
||||||
|
|
||||||
from neutron.common import exceptions as exc
|
|
||||||
from neutron.common import topics
|
|
||||||
from neutron import context
|
|
||||||
from neutron.db import api as db
|
|
||||||
from neutron.db import db_base_plugin_v2
|
|
||||||
from neutron.db import models_v2
|
|
||||||
from neutron.extensions import flavor as ext_flavor
|
|
||||||
from neutron.openstack.common import uuidutils
|
|
||||||
from neutron.plugins.metaplugin import meta_neutron_plugin
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
CONF_FILE = ""
|
|
||||||
META_PATH = "neutron.plugins.metaplugin"
|
|
||||||
FAKE_PATH = "neutron.tests.unit.metaplugin"
|
|
||||||
PROXY_PATH = "%s.proxy_neutron_plugin.ProxyPluginV2" % META_PATH
|
|
||||||
PLUGIN_LIST = """
|
|
||||||
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2,proxy:%s
|
|
||||||
""".strip() % (FAKE_PATH, FAKE_PATH, PROXY_PATH)
|
|
||||||
L3_PLUGIN_LIST = """
|
|
||||||
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2
|
|
||||||
""".strip() % (FAKE_PATH, FAKE_PATH)
|
|
||||||
|
|
||||||
|
|
||||||
def setup_metaplugin_conf(has_l3=True):
|
|
||||||
cfg.CONF.set_override('auth_url', 'http://localhost:35357/v2.0',
|
|
||||||
'PROXY')
|
|
||||||
cfg.CONF.set_override('auth_region', 'RegionOne', 'PROXY')
|
|
||||||
cfg.CONF.set_override('admin_user', 'neutron', 'PROXY')
|
|
||||||
cfg.CONF.set_override('admin_password', 'password', 'PROXY')
|
|
||||||
cfg.CONF.set_override('admin_tenant_name', 'service', 'PROXY')
|
|
||||||
cfg.CONF.set_override('plugin_list', PLUGIN_LIST, 'META')
|
|
||||||
if has_l3:
|
|
||||||
cfg.CONF.set_override('l3_plugin_list', L3_PLUGIN_LIST, 'META')
|
|
||||||
else:
|
|
||||||
cfg.CONF.set_override('l3_plugin_list', "", 'META')
|
|
||||||
cfg.CONF.set_override('default_flavor', 'fake2', 'META')
|
|
||||||
cfg.CONF.set_override('default_l3_flavor', 'fake1', 'META')
|
|
||||||
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
|
|
||||||
#TODO(nati) remove this after subnet quota change is merged
|
|
||||||
cfg.CONF.set_override('max_dns_nameservers', 10)
|
|
||||||
cfg.CONF.set_override('rpc_backend',
|
|
||||||
'neutron.openstack.common.rpc.impl_fake')
|
|
||||||
|
|
||||||
|
|
||||||
# Hooks registered by metaplugin must not exist for other plugins UT.
|
|
||||||
# So hooks must be unregistered (overwrite to None in fact).
|
|
||||||
def unregister_meta_hooks():
|
|
||||||
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
|
|
||||||
models_v2.Network, 'metaplugin_net', None, None, None)
|
|
||||||
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
|
|
||||||
models_v2.Port, 'metaplugin_port', None, None, None)
|
|
||||||
|
|
||||||
|
|
||||||
class MetaNeutronPluginV2Test(base.BaseTestCase):
|
|
||||||
"""Class conisting of MetaNeutronPluginV2 unit tests."""
|
|
||||||
|
|
||||||
has_l3 = True
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(MetaNeutronPluginV2Test, self).setUp()
|
|
||||||
db._ENGINE = None
|
|
||||||
db._MAKER = None
|
|
||||||
self.fake_tenant_id = uuidutils.generate_uuid()
|
|
||||||
self.context = context.get_admin_context()
|
|
||||||
|
|
||||||
db.configure_db()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
self.addCleanup(unregister_meta_hooks)
|
|
||||||
|
|
||||||
setup_metaplugin_conf(self.has_l3)
|
|
||||||
|
|
||||||
self.client_cls_p = mock.patch('neutronclient.v2_0.client.Client')
|
|
||||||
client_cls = self.client_cls_p.start()
|
|
||||||
self.client_inst = mock.Mock()
|
|
||||||
client_cls.return_value = self.client_inst
|
|
||||||
self.client_inst.create_network.return_value = \
|
|
||||||
{'id': 'fake_id'}
|
|
||||||
self.client_inst.create_port.return_value = \
|
|
||||||
{'id': 'fake_id'}
|
|
||||||
self.client_inst.create_subnet.return_value = \
|
|
||||||
{'id': 'fake_id'}
|
|
||||||
self.client_inst.update_network.return_value = \
|
|
||||||
{'id': 'fake_id'}
|
|
||||||
self.client_inst.update_port.return_value = \
|
|
||||||
{'id': 'fake_id'}
|
|
||||||
self.client_inst.update_subnet.return_value = \
|
|
||||||
{'id': 'fake_id'}
|
|
||||||
self.client_inst.delete_network.return_value = True
|
|
||||||
self.client_inst.delete_port.return_value = True
|
|
||||||
self.client_inst.delete_subnet.return_value = True
|
|
||||||
plugin = (meta_neutron_plugin.MetaPluginV2.__module__ + '.'
|
|
||||||
+ meta_neutron_plugin.MetaPluginV2.__name__)
|
|
||||||
self.setup_coreplugin(plugin)
|
|
||||||
self.plugin = meta_neutron_plugin.MetaPluginV2(configfile=None)
|
|
||||||
|
|
||||||
def _fake_network(self, flavor):
|
|
||||||
data = {'network': {'name': flavor,
|
|
||||||
'admin_state_up': True,
|
|
||||||
'shared': False,
|
|
||||||
'router:external': [],
|
|
||||||
'tenant_id': self.fake_tenant_id,
|
|
||||||
ext_flavor.FLAVOR_NETWORK: flavor}}
|
|
||||||
return data
|
|
||||||
|
|
||||||
def _fake_port(self, net_id):
|
|
||||||
return {'port': {'name': net_id,
|
|
||||||
'network_id': net_id,
|
|
||||||
'admin_state_up': True,
|
|
||||||
'device_id': 'bad_device_id',
|
|
||||||
'device_owner': 'bad_device_owner',
|
|
||||||
'admin_state_up': True,
|
|
||||||
'host_routes': [],
|
|
||||||
'fixed_ips': [],
|
|
||||||
'mac_address':
|
|
||||||
self.plugin._generate_mac(self.context, net_id),
|
|
||||||
'tenant_id': self.fake_tenant_id}}
|
|
||||||
|
|
||||||
def _fake_subnet(self, net_id):
|
|
||||||
allocation_pools = [{'start': '10.0.0.2',
|
|
||||||
'end': '10.0.0.254'}]
|
|
||||||
return {'subnet': {'name': net_id,
|
|
||||||
'network_id': net_id,
|
|
||||||
'gateway_ip': '10.0.0.1',
|
|
||||||
'dns_nameservers': ['10.0.0.2'],
|
|
||||||
'host_routes': [],
|
|
||||||
'cidr': '10.0.0.0/24',
|
|
||||||
'allocation_pools': allocation_pools,
|
|
||||||
'enable_dhcp': True,
|
|
||||||
'ip_version': 4}}
|
|
||||||
|
|
||||||
def _fake_router(self, flavor):
|
|
||||||
data = {'router': {'name': flavor, 'admin_state_up': True,
|
|
||||||
'tenant_id': self.fake_tenant_id,
|
|
||||||
ext_flavor.FLAVOR_ROUTER: flavor,
|
|
||||||
'external_gateway_info': None}}
|
|
||||||
return data
|
|
||||||
|
|
||||||
def test_create_delete_network(self):
|
|
||||||
network1 = self._fake_network('fake1')
|
|
||||||
ret1 = self.plugin.create_network(self.context, network1)
|
|
||||||
self.assertEqual('fake1', ret1[ext_flavor.FLAVOR_NETWORK])
|
|
||||||
|
|
||||||
network2 = self._fake_network('fake2')
|
|
||||||
ret2 = self.plugin.create_network(self.context, network2)
|
|
||||||
self.assertEqual('fake2', ret2[ext_flavor.FLAVOR_NETWORK])
|
|
||||||
|
|
||||||
network3 = self._fake_network('proxy')
|
|
||||||
ret3 = self.plugin.create_network(self.context, network3)
|
|
||||||
self.assertEqual('proxy', ret3[ext_flavor.FLAVOR_NETWORK])
|
|
||||||
|
|
||||||
db_ret1 = self.plugin.get_network(self.context, ret1['id'])
|
|
||||||
self.assertEqual('fake1', db_ret1['name'])
|
|
||||||
|
|
||||||
db_ret2 = self.plugin.get_network(self.context, ret2['id'])
|
|
||||||
self.assertEqual('fake2', db_ret2['name'])
|
|
||||||
|
|
||||||
db_ret3 = self.plugin.get_network(self.context, ret3['id'])
|
|
||||||
self.assertEqual('proxy', db_ret3['name'])
|
|
||||||
|
|
||||||
db_ret4 = self.plugin.get_networks(self.context)
|
|
||||||
self.assertEqual(3, len(db_ret4))
|
|
||||||
|
|
||||||
db_ret5 = self.plugin.get_networks(
|
|
||||||
self.context,
|
|
||||||
{ext_flavor.FLAVOR_NETWORK: ['fake1']})
|
|
||||||
self.assertEqual(1, len(db_ret5))
|
|
||||||
self.assertEqual('fake1', db_ret5[0]['name'])
|
|
||||||
self.plugin.delete_network(self.context, ret1['id'])
|
|
||||||
self.plugin.delete_network(self.context, ret2['id'])
|
|
||||||
self.plugin.delete_network(self.context, ret3['id'])
|
|
||||||
|
|
||||||
def test_create_delete_port(self):
|
|
||||||
network1 = self._fake_network('fake1')
|
|
||||||
network_ret1 = self.plugin.create_network(self.context, network1)
|
|
||||||
network2 = self._fake_network('fake2')
|
|
||||||
network_ret2 = self.plugin.create_network(self.context, network2)
|
|
||||||
network3 = self._fake_network('proxy')
|
|
||||||
network_ret3 = self.plugin.create_network(self.context, network3)
|
|
||||||
|
|
||||||
port1 = self._fake_port(network_ret1['id'])
|
|
||||||
port2 = self._fake_port(network_ret2['id'])
|
|
||||||
port3 = self._fake_port(network_ret3['id'])
|
|
||||||
|
|
||||||
port1_ret = self.plugin.create_port(self.context, port1)
|
|
||||||
port2_ret = self.plugin.create_port(self.context, port2)
|
|
||||||
port3_ret = self.plugin.create_port(self.context, port3)
|
|
||||||
ports_all = self.plugin.get_ports(self.context)
|
|
||||||
|
|
||||||
self.assertEqual(network_ret1['id'], port1_ret['network_id'])
|
|
||||||
self.assertEqual(network_ret2['id'], port2_ret['network_id'])
|
|
||||||
self.assertEqual(network_ret3['id'], port3_ret['network_id'])
|
|
||||||
self.assertEqual(3, len(ports_all))
|
|
||||||
|
|
||||||
port1_dict = self.plugin._make_port_dict(port1_ret)
|
|
||||||
port2_dict = self.plugin._make_port_dict(port2_ret)
|
|
||||||
port3_dict = self.plugin._make_port_dict(port3_ret)
|
|
||||||
|
|
||||||
self.assertEqual(port1_dict, port1_ret)
|
|
||||||
self.assertEqual(port2_dict, port2_ret)
|
|
||||||
self.assertEqual(port3_dict, port3_ret)
|
|
||||||
|
|
||||||
port1['port']['admin_state_up'] = False
|
|
||||||
port2['port']['admin_state_up'] = False
|
|
||||||
port3['port']['admin_state_up'] = False
|
|
||||||
self.plugin.update_port(self.context, port1_ret['id'], port1)
|
|
||||||
self.plugin.update_port(self.context, port2_ret['id'], port2)
|
|
||||||
self.plugin.update_port(self.context, port3_ret['id'], port3)
|
|
||||||
port_in_db1 = self.plugin.get_port(self.context, port1_ret['id'])
|
|
||||||
port_in_db2 = self.plugin.get_port(self.context, port2_ret['id'])
|
|
||||||
port_in_db3 = self.plugin.get_port(self.context, port3_ret['id'])
|
|
||||||
self.assertEqual(False, port_in_db1['admin_state_up'])
|
|
||||||
self.assertEqual(False, port_in_db2['admin_state_up'])
|
|
||||||
self.assertEqual(False, port_in_db3['admin_state_up'])
|
|
||||||
|
|
||||||
self.plugin.delete_port(self.context, port1_ret['id'])
|
|
||||||
self.plugin.delete_port(self.context, port2_ret['id'])
|
|
||||||
self.plugin.delete_port(self.context, port3_ret['id'])
|
|
||||||
|
|
||||||
self.plugin.delete_network(self.context, network_ret1['id'])
|
|
||||||
self.plugin.delete_network(self.context, network_ret2['id'])
|
|
||||||
self.plugin.delete_network(self.context, network_ret3['id'])
|
|
||||||
|
|
||||||
def test_create_delete_subnet(self):
|
|
||||||
# for this test we need to enable overlapping ips
|
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
|
||||||
network1 = self._fake_network('fake1')
|
|
||||||
network_ret1 = self.plugin.create_network(self.context, network1)
|
|
||||||
network2 = self._fake_network('fake2')
|
|
||||||
network_ret2 = self.plugin.create_network(self.context, network2)
|
|
||||||
network3 = self._fake_network('proxy')
|
|
||||||
network_ret3 = self.plugin.create_network(self.context, network3)
|
|
||||||
|
|
||||||
subnet1 = self._fake_subnet(network_ret1['id'])
|
|
||||||
subnet2 = self._fake_subnet(network_ret2['id'])
|
|
||||||
subnet3 = self._fake_subnet(network_ret3['id'])
|
|
||||||
|
|
||||||
subnet1_ret = self.plugin.create_subnet(self.context, subnet1)
|
|
||||||
subnet2_ret = self.plugin.create_subnet(self.context, subnet2)
|
|
||||||
subnet3_ret = self.plugin.create_subnet(self.context, subnet3)
|
|
||||||
self.assertEqual(network_ret1['id'], subnet1_ret['network_id'])
|
|
||||||
self.assertEqual(network_ret2['id'], subnet2_ret['network_id'])
|
|
||||||
self.assertEqual(network_ret3['id'], subnet3_ret['network_id'])
|
|
||||||
|
|
||||||
subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id'])
|
|
||||||
subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id'])
|
|
||||||
subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id'])
|
|
||||||
|
|
||||||
subnet1['subnet']['allocation_pools'].pop()
|
|
||||||
subnet2['subnet']['allocation_pools'].pop()
|
|
||||||
subnet3['subnet']['allocation_pools'].pop()
|
|
||||||
|
|
||||||
self.plugin.update_subnet(self.context,
|
|
||||||
subnet1_ret['id'], subnet1)
|
|
||||||
self.plugin.update_subnet(self.context,
|
|
||||||
subnet2_ret['id'], subnet2)
|
|
||||||
self.plugin.update_subnet(self.context,
|
|
||||||
subnet3_ret['id'], subnet3)
|
|
||||||
subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id'])
|
|
||||||
subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id'])
|
|
||||||
subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id'])
|
|
||||||
|
|
||||||
self.assertEqual(4, subnet_in_db1['ip_version'])
|
|
||||||
self.assertEqual(4, subnet_in_db2['ip_version'])
|
|
||||||
self.assertEqual(4, subnet_in_db3['ip_version'])
|
|
||||||
|
|
||||||
self.plugin.delete_subnet(self.context, subnet1_ret['id'])
|
|
||||||
self.plugin.delete_subnet(self.context, subnet2_ret['id'])
|
|
||||||
self.plugin.delete_subnet(self.context, subnet3_ret['id'])
|
|
||||||
|
|
||||||
self.plugin.delete_network(self.context, network_ret1['id'])
|
|
||||||
self.plugin.delete_network(self.context, network_ret2['id'])
|
|
||||||
self.plugin.delete_network(self.context, network_ret3['id'])
|
|
||||||
|
|
||||||
def test_create_delete_router(self):
|
|
||||||
router1 = self._fake_router('fake1')
|
|
||||||
router_ret1 = self.plugin.create_router(self.context, router1)
|
|
||||||
router2 = self._fake_router('fake2')
|
|
||||||
router_ret2 = self.plugin.create_router(self.context, router2)
|
|
||||||
|
|
||||||
self.assertEqual('fake1', router_ret1[ext_flavor.FLAVOR_ROUTER])
|
|
||||||
self.assertEqual('fake2', router_ret2[ext_flavor.FLAVOR_ROUTER])
|
|
||||||
|
|
||||||
router_in_db1 = self.plugin.get_router(self.context, router_ret1['id'])
|
|
||||||
router_in_db2 = self.plugin.get_router(self.context, router_ret2['id'])
|
|
||||||
|
|
||||||
self.assertEqual('fake1', router_in_db1[ext_flavor.FLAVOR_ROUTER])
|
|
||||||
self.assertEqual('fake2', router_in_db2[ext_flavor.FLAVOR_ROUTER])
|
|
||||||
|
|
||||||
self.plugin.delete_router(self.context, router_ret1['id'])
|
|
||||||
self.plugin.delete_router(self.context, router_ret2['id'])
|
|
||||||
with testtools.ExpectedException(meta_neutron_plugin.FlavorNotFound):
|
|
||||||
self.plugin.get_router(self.context, router_ret1['id'])
|
|
||||||
|
|
||||||
def test_extension_method(self):
|
|
||||||
self.assertEqual('fake1', self.plugin.fake_func())
|
|
||||||
self.assertEqual('fake2', self.plugin.fake_func2())
|
|
||||||
|
|
||||||
def test_extension_not_implemented_method(self):
|
|
||||||
try:
|
|
||||||
self.plugin.not_implemented()
|
|
||||||
except AttributeError:
|
|
||||||
return
|
|
||||||
except Exception:
|
|
||||||
self.fail("AttributeError Error is not raised")
|
|
||||||
|
|
||||||
self.fail("No Error is not raised")
|
|
||||||
|
|
||||||
def test_create_network_flavor_fail(self):
|
|
||||||
with mock.patch('neutron.plugins.metaplugin.meta_db_v2.'
|
|
||||||
'add_network_flavor_binding',
|
|
||||||
side_effect=Exception):
|
|
||||||
network = self._fake_network('fake1')
|
|
||||||
self.assertRaises(meta_neutron_plugin.FaildToAddFlavorBinding,
|
|
||||||
self.plugin.create_network,
|
|
||||||
self.context,
|
|
||||||
network)
|
|
||||||
count = self.plugin.get_networks_count(self.context)
|
|
||||||
self.assertEqual(count, 0)
|
|
||||||
|
|
||||||
def test_create_router_flavor_fail(self):
|
|
||||||
with mock.patch('neutron.plugins.metaplugin.meta_db_v2.'
|
|
||||||
'add_router_flavor_binding',
|
|
||||||
side_effect=Exception):
|
|
||||||
router = self._fake_router('fake1')
|
|
||||||
self.assertRaises(meta_neutron_plugin.FaildToAddFlavorBinding,
|
|
||||||
self.plugin.create_router,
|
|
||||||
self.context,
|
|
||||||
router)
|
|
||||||
count = self.plugin.get_routers_count(self.context)
|
|
||||||
self.assertEqual(count, 0)
|
|
||||||
|
|
||||||
|
|
||||||
class MetaNeutronPluginV2TestWithoutL3(MetaNeutronPluginV2Test):
|
|
||||||
"""Tests without l3_plugin_list configration."""
|
|
||||||
|
|
||||||
has_l3 = False
|
|
||||||
|
|
||||||
def test_supported_extension_aliases(self):
|
|
||||||
self.assertEqual(self.plugin.supported_extension_aliases,
|
|
||||||
['flavor', 'external-net'])
|
|
||||||
|
|
||||||
def test_create_delete_router(self):
|
|
||||||
self.skipTest("Test case without router")
|
|
||||||
|
|
||||||
def test_create_router_flavor_fail(self):
|
|
||||||
self.skipTest("Test case without router")
|
|
||||||
|
|
||||||
|
|
||||||
class MetaNeutronPluginV2TestRpcFlavor(base.BaseTestCase):
|
|
||||||
"""Tests for rpc_flavor."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(MetaNeutronPluginV2TestRpcFlavor, self).setUp()
|
|
||||||
db._ENGINE = None
|
|
||||||
db._MAKER = None
|
|
||||||
db.configure_db()
|
|
||||||
self.addCleanup(db.clear_db)
|
|
||||||
self.addCleanup(unregister_meta_hooks)
|
|
||||||
|
|
||||||
def test_rpc_flavor(self):
|
|
||||||
setup_metaplugin_conf()
|
|
||||||
cfg.CONF.set_override('rpc_flavor', 'fake1', 'META')
|
|
||||||
self.plugin = meta_neutron_plugin.MetaPluginV2()
|
|
||||||
self.assertEqual(topics.PLUGIN, 'q-plugin')
|
|
||||||
ret = self.plugin.rpc_workers_supported()
|
|
||||||
self.assertFalse(ret)
|
|
||||||
|
|
||||||
def test_invalid_rpc_flavor(self):
|
|
||||||
setup_metaplugin_conf()
|
|
||||||
cfg.CONF.set_override('rpc_flavor', 'fake-fake', 'META')
|
|
||||||
self.assertRaises(exc.Invalid,
|
|
||||||
meta_neutron_plugin.MetaPluginV2)
|
|
||||||
self.assertEqual(topics.PLUGIN, 'q-plugin')
|
|
||||||
|
|
||||||
def test_rpc_flavor_multiple_rpc_workers(self):
|
|
||||||
setup_metaplugin_conf()
|
|
||||||
cfg.CONF.set_override('rpc_flavor', 'fake2', 'META')
|
|
||||||
self.plugin = meta_neutron_plugin.MetaPluginV2()
|
|
||||||
self.assertEqual(topics.PLUGIN, 'q-plugin')
|
|
||||||
ret = self.plugin.rpc_workers_supported()
|
|
||||||
self.assertTrue(ret)
|
|
||||||
ret = self.plugin.start_rpc_listeners()
|
|
||||||
self.assertEqual('OK', ret)
|
|
|
@ -1,17 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (C) 2012 Midokura Japan K.K.
|
|
||||||
# Copyright (C) 2013 Midokura PTE LTD
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,16 +0,0 @@
|
||||||
[midonet]
|
|
||||||
|
|
||||||
# MidoNet API server URI
|
|
||||||
midonet_uri = http://localhost:8080/midonet-api
|
|
||||||
|
|
||||||
# MidoNet admin username
|
|
||||||
username = admin
|
|
||||||
|
|
||||||
# MidoNet admin password
|
|
||||||
password = passw0rd
|
|
||||||
|
|
||||||
# Virtual provider router ID
|
|
||||||
provider_router_id = 00112233-0011-0011-0011-001122334455
|
|
||||||
|
|
||||||
# Virtual metadata router ID
|
|
||||||
metadata_router_id = ffeeddcc-ffee-ffee-ffee-ffeeddccbbaa
|
|
|
@ -1,265 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (C) 2013 Midokura PTE LTD
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Ryu Ishimoto, Midokura Japan KK
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
|
|
||||||
def get_bridge_mock(id=None, **kwargs):
|
|
||||||
if id is None:
|
|
||||||
id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
bridge = mock.Mock()
|
|
||||||
bridge.get_id.return_value = id
|
|
||||||
bridge.get_tenant_id.return_value = kwargs.get("tenant_id", "test-tenant")
|
|
||||||
bridge.get_name.return_value = kwargs.get("name", "net")
|
|
||||||
bridge.get_ports.return_value = []
|
|
||||||
bridge.get_peer_ports.return_value = []
|
|
||||||
bridge.get_admin_state_up.return_value = kwargs.get("admin_state_up", True)
|
|
||||||
return bridge
|
|
||||||
|
|
||||||
|
|
||||||
def get_bridge_port_mock(id=None, bridge_id=None, **kwargs):
|
|
||||||
if id is None:
|
|
||||||
id = str(uuid.uuid4())
|
|
||||||
if bridge_id is None:
|
|
||||||
bridge_id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
port = mock.Mock()
|
|
||||||
port.get_id.return_value = id
|
|
||||||
port.get_bridge_id.return_value = bridge_id
|
|
||||||
port.get_admin_state_up.return_value = kwargs.get("admin_state_up", True)
|
|
||||||
port.get_type.return_value = "Bridge"
|
|
||||||
port.create.return_value = port
|
|
||||||
return port
|
|
||||||
|
|
||||||
|
|
||||||
def get_chain_mock(id=None, tenant_id='test-tenant', name='chain',
|
|
||||||
rules=None):
|
|
||||||
if id is None:
|
|
||||||
id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
if rules is None:
|
|
||||||
rules = []
|
|
||||||
|
|
||||||
chain = mock.Mock()
|
|
||||||
chain.get_id.return_value = id
|
|
||||||
chain.get_tenant_id.return_value = tenant_id
|
|
||||||
chain.get_name.return_value = name
|
|
||||||
chain.get_rules.return_value = rules
|
|
||||||
return chain
|
|
||||||
|
|
||||||
|
|
||||||
def get_port_group_mock(id=None, tenant_id='test-tenant', name='pg'):
|
|
||||||
if id is None:
|
|
||||||
id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
port_group = mock.Mock()
|
|
||||||
port_group.get_id.return_value = id
|
|
||||||
port_group.get_tenant_id.return_value = tenant_id
|
|
||||||
port_group.get_name.return_value = name
|
|
||||||
return port_group
|
|
||||||
|
|
||||||
|
|
||||||
def get_router_mock(id=None, **kwargs):
|
|
||||||
if id is None:
|
|
||||||
id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
router = mock.Mock()
|
|
||||||
router.get_id.return_value = id
|
|
||||||
router.get_tenant_id.return_value = kwargs.get("tenant_id", "test-tenant")
|
|
||||||
router.get_name.return_value = kwargs.get("name", "router")
|
|
||||||
router.get_ports.return_value = []
|
|
||||||
router.get_peer_ports.return_value = []
|
|
||||||
router.get_routes.return_value = []
|
|
||||||
router.get_admin_state_up.return_value = kwargs.get("admin_state_up", True)
|
|
||||||
return router
|
|
||||||
|
|
||||||
|
|
||||||
def get_rule_mock(id=None, chain_id=None, properties=None):
|
|
||||||
if id is None:
|
|
||||||
id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
if chain_id is None:
|
|
||||||
chain_id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
if properties is None:
|
|
||||||
properties = {}
|
|
||||||
|
|
||||||
rule = mock.Mock()
|
|
||||||
rule.get_id.return_value = id
|
|
||||||
rule.get_chain_id.return_value = chain_id
|
|
||||||
rule.get_properties.return_value = properties
|
|
||||||
return rule
|
|
||||||
|
|
||||||
|
|
||||||
def get_subnet_mock(bridge_id=None, gateway_ip='10.0.0.1',
|
|
||||||
subnet_prefix='10.0.0.0', subnet_len=int(24)):
|
|
||||||
if bridge_id is None:
|
|
||||||
bridge_id = str(uuid.uuid4())
|
|
||||||
|
|
||||||
subnet = mock.Mock()
|
|
||||||
subnet.get_id.return_value = subnet_prefix + '/' + str(subnet_len)
|
|
||||||
subnet.get_bridge_id.return_value = bridge_id
|
|
||||||
subnet.get_default_gateway.return_value = gateway_ip
|
|
||||||
subnet.get_subnet_prefix.return_value = subnet_prefix
|
|
||||||
subnet.get_subnet_length.return_value = subnet_len
|
|
||||||
return subnet
|
|
||||||
|
|
||||||
|
|
||||||
class MidonetLibMockConfig():
|
|
||||||
|
|
||||||
def __init__(self, inst):
|
|
||||||
self.inst = inst
|
|
||||||
|
|
||||||
def _create_bridge(self, **kwargs):
|
|
||||||
return get_bridge_mock(**kwargs)
|
|
||||||
|
|
||||||
def _create_router(self, **kwargs):
|
|
||||||
return get_router_mock(**kwargs)
|
|
||||||
|
|
||||||
def _create_subnet(self, bridge, gateway_ip, subnet_prefix, subnet_len):
|
|
||||||
return get_subnet_mock(bridge.get_id(), gateway_ip=gateway_ip,
|
|
||||||
subnet_prefix=subnet_prefix,
|
|
||||||
subnet_len=subnet_len)
|
|
||||||
|
|
||||||
def _add_bridge_port(self, bridge, **kwargs):
|
|
||||||
return get_bridge_port_mock(bridge_id=bridge.get_id(), **kwargs)
|
|
||||||
|
|
||||||
def _get_bridge(self, id):
|
|
||||||
return get_bridge_mock(id=id)
|
|
||||||
|
|
||||||
def _get_port(self, id):
|
|
||||||
return get_bridge_port_mock(id=id)
|
|
||||||
|
|
||||||
def _get_router(self, id):
|
|
||||||
return get_router_mock(id=id)
|
|
||||||
|
|
||||||
def _update_bridge(self, id, **kwargs):
|
|
||||||
return get_bridge_mock(id=id, **kwargs)
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
# Bridge methods side effects
|
|
||||||
self.inst.create_bridge.side_effect = self._create_bridge
|
|
||||||
self.inst.get_bridge.side_effect = self._get_bridge
|
|
||||||
self.inst.update_bridge.side_effect = self._update_bridge
|
|
||||||
|
|
||||||
# Subnet methods side effects
|
|
||||||
self.inst.create_subnet.side_effect = self._create_subnet
|
|
||||||
|
|
||||||
# Port methods side effects
|
|
||||||
ex_bp = self.inst.add_bridge_port
|
|
||||||
ex_bp.side_effect = self._add_bridge_port
|
|
||||||
self.inst.get_port.side_effect = self._get_port
|
|
||||||
|
|
||||||
# Router methods side effects
|
|
||||||
self.inst.create_router.side_effect = self._create_router
|
|
||||||
self.inst.get_router.side_effect = self._get_router
|
|
||||||
|
|
||||||
|
|
||||||
class MidoClientMockConfig():
|
|
||||||
|
|
||||||
def __init__(self, inst):
|
|
||||||
self.inst = inst
|
|
||||||
self.chains_in = None
|
|
||||||
self.port_groups_in = None
|
|
||||||
self.chains_out = None
|
|
||||||
self.rules_out = None
|
|
||||||
self.port_groups_out = None
|
|
||||||
|
|
||||||
def _get_query_tenant_id(self, query):
|
|
||||||
if query is not None and query['tenant_id']:
|
|
||||||
tenant_id = query['tenant_id']
|
|
||||||
else:
|
|
||||||
tenant_id = 'test-tenant'
|
|
||||||
return tenant_id
|
|
||||||
|
|
||||||
def _get_bridge(self, id):
|
|
||||||
return get_bridge_mock(id=id)
|
|
||||||
|
|
||||||
def _get_chain(self, id, query=None):
|
|
||||||
if not self.chains_in:
|
|
||||||
return []
|
|
||||||
|
|
||||||
tenant_id = self._get_query_tenant_id(query)
|
|
||||||
for chain in self.chains_in:
|
|
||||||
chain_id = chain['id']
|
|
||||||
if chain_id is id:
|
|
||||||
rule_mocks = []
|
|
||||||
if 'rules' in chain:
|
|
||||||
for rule in chain['rules']:
|
|
||||||
rule_mocks.append(
|
|
||||||
get_rule_mock(id=rule['id'],
|
|
||||||
chain_id=id,
|
|
||||||
properties=rule['properties']))
|
|
||||||
|
|
||||||
return get_chain_mock(id=chain_id, name=chain['name'],
|
|
||||||
tenant_id=tenant_id, rules=rule_mocks)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _get_chains(self, query=None):
|
|
||||||
if not self.chains_in:
|
|
||||||
return []
|
|
||||||
|
|
||||||
tenant_id = self._get_query_tenant_id(query)
|
|
||||||
self.chains_out = []
|
|
||||||
self.rules_out = []
|
|
||||||
for chain in self.chains_in:
|
|
||||||
chain_id = chain['id']
|
|
||||||
|
|
||||||
rule_mocks = []
|
|
||||||
if 'rules' in chain:
|
|
||||||
for rule in chain['rules']:
|
|
||||||
rule_mocks.append(
|
|
||||||
get_rule_mock(id=rule['id'],
|
|
||||||
chain_id=id,
|
|
||||||
properties=rule['properties']))
|
|
||||||
self.rules_out += rule_mocks
|
|
||||||
|
|
||||||
self.chains_out.append(get_chain_mock(id=chain_id,
|
|
||||||
name=chain['name'],
|
|
||||||
tenant_id=tenant_id,
|
|
||||||
rules=rule_mocks))
|
|
||||||
return self.chains_out
|
|
||||||
|
|
||||||
def _get_port_groups(self, query=None):
|
|
||||||
if not self.port_groups_in:
|
|
||||||
return []
|
|
||||||
|
|
||||||
tenant_id = self._get_query_tenant_id(query)
|
|
||||||
self.port_groups_out = []
|
|
||||||
for port_group in self.port_groups_in:
|
|
||||||
self.port_groups_out.append(get_port_group_mock(
|
|
||||||
id=port_group['id'], name=port_group['name'],
|
|
||||||
tenant_id=tenant_id))
|
|
||||||
return self.port_groups_out
|
|
||||||
|
|
||||||
def _get_router(self, id):
|
|
||||||
return get_router_mock(id=id)
|
|
||||||
|
|
||||||
def _add_bridge_port(self, bridge):
|
|
||||||
return get_bridge_port_mock(bridge_id=bridge.get_id())
|
|
||||||
|
|
||||||
def setup(self):
|
|
||||||
self.inst.get_bridge.side_effect = self._get_bridge
|
|
||||||
self.inst.get_chains.side_effect = self._get_chains
|
|
||||||
self.inst.get_chain.side_effect = self._get_chain
|
|
||||||
self.inst.get_port_groups.side_effect = self._get_port_groups
|
|
||||||
self.inst.get_router.side_effect = self._get_router
|
|
||||||
self.inst.add_bridge_port.side_effect = self._add_bridge_port
|
|
|
@ -1,55 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (C) 2012 Midokura Japan K.K.
|
|
||||||
# Copyright (C) 2013 Midokura PTE LTD
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Rossella Sblendido, Midokura Japan KK
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.agent.common import config
|
|
||||||
from neutron.agent.linux import dhcp
|
|
||||||
from neutron.common import config as base_config
|
|
||||||
import neutron.plugins.midonet.agent.midonet_driver as driver
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class FakeNetwork:
|
|
||||||
id = 'aaaabbbb-cccc-dddd-eeee-ffff00001111'
|
|
||||||
namespace = 'qdhcp-ns'
|
|
||||||
|
|
||||||
|
|
||||||
class TestDhcpNoOpDriver(base.BaseTestCase):
|
|
||||||
def setUp(self):
|
|
||||||
super(TestDhcpNoOpDriver, self).setUp()
|
|
||||||
self.conf = config.setup_conf()
|
|
||||||
config.register_interface_driver_opts_helper(self.conf)
|
|
||||||
self.conf.register_opts(base_config.core_opts)
|
|
||||||
self.conf.register_opts(dhcp.OPTS)
|
|
||||||
self.conf.enable_isolated_metadata = True
|
|
||||||
self.conf.use_namespaces = True
|
|
||||||
instance = mock.patch("neutron.agent.linux.dhcp.DeviceManager")
|
|
||||||
self.mock_mgr = instance.start()
|
|
||||||
|
|
||||||
def test_disable_no_retain_port(self):
|
|
||||||
dhcp_driver = driver.DhcpNoOpDriver(self.conf, FakeNetwork())
|
|
||||||
dhcp_driver.disable(retain_port=False)
|
|
||||||
self.assertTrue(self.mock_mgr.return_value.destroy.called)
|
|
||||||
|
|
||||||
def test_disable_retain_port(self):
|
|
||||||
dhcp_driver = driver.DhcpNoOpDriver(self.conf, FakeNetwork())
|
|
||||||
dhcp_driver.disable(retain_port=True)
|
|
||||||
self.assertFalse(self.mock_mgr.return_value.destroy.called)
|
|
|
@ -1,189 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (C) 2012 Midokura Japan K.K.
|
|
||||||
# Copyright (C) 2013 Midokura PTE LTD
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Ryu Ishimoto, Midokura Japan KK
|
|
||||||
# @author: Tomoe Sugihara, Midokura Japan KK
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import testtools
|
|
||||||
import webob.exc as w_exc
|
|
||||||
|
|
||||||
from neutron.openstack.common import uuidutils
|
|
||||||
with mock.patch.dict(sys.modules, {'midonetclient': mock.Mock()}):
|
|
||||||
from neutron.plugins.midonet import midonet_lib
|
|
||||||
import neutron.tests.unit.midonet.mock_lib as mock_lib
|
|
||||||
|
|
||||||
|
|
||||||
def _create_test_chain(id, name, tenant_id):
|
|
||||||
return {'id': id, 'name': name, 'tenant_id': tenant_id}
|
|
||||||
|
|
||||||
|
|
||||||
def _create_test_port_group(id, name, tenant_id):
|
|
||||||
return {"id": id, "name": name, "tenant_id": tenant_id}
|
|
||||||
|
|
||||||
|
|
||||||
class MidoClientTestCase(testtools.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(MidoClientTestCase, self).setUp()
|
|
||||||
self._tenant_id = 'test-tenant'
|
|
||||||
self.mock_api = mock.Mock()
|
|
||||||
self.mock_api_cfg = mock_lib.MidoClientMockConfig(self.mock_api)
|
|
||||||
self.mock_api_cfg.setup()
|
|
||||||
self.client = midonet_lib.MidoClient(self.mock_api)
|
|
||||||
|
|
||||||
def test_delete_chains_by_names(self):
|
|
||||||
|
|
||||||
tenant_id = uuidutils.generate_uuid()
|
|
||||||
chain1_id = uuidutils.generate_uuid()
|
|
||||||
chain1 = _create_test_chain(chain1_id, "chain1", tenant_id)
|
|
||||||
|
|
||||||
chain2_id = uuidutils.generate_uuid()
|
|
||||||
chain2 = _create_test_chain(chain2_id, "chain2", tenant_id)
|
|
||||||
|
|
||||||
calls = [mock.call.delete_chain(chain1_id),
|
|
||||||
mock.call.delete_chain(chain2_id)]
|
|
||||||
self.mock_api_cfg.chains_in = [chain2, chain1]
|
|
||||||
self.client.delete_chains_by_names(tenant_id, ["chain1", "chain2"])
|
|
||||||
|
|
||||||
self.mock_api.assert_has_calls(calls, any_order=True)
|
|
||||||
|
|
||||||
def test_delete_port_group_by_name(self):
|
|
||||||
|
|
||||||
tenant_id = uuidutils.generate_uuid()
|
|
||||||
pg1_id = uuidutils.generate_uuid()
|
|
||||||
pg1 = _create_test_port_group(pg1_id, "pg1", tenant_id)
|
|
||||||
pg2_id = uuidutils.generate_uuid()
|
|
||||||
pg2 = _create_test_port_group(pg2_id, "pg2", tenant_id)
|
|
||||||
|
|
||||||
self.mock_api_cfg.port_groups_in = [pg1, pg2]
|
|
||||||
self.client.delete_port_group_by_name(tenant_id, "pg1")
|
|
||||||
self.mock_api.delete_port_group.assert_called_once_with(pg1_id)
|
|
||||||
|
|
||||||
def test_create_dhcp(self):
|
|
||||||
|
|
||||||
bridge = mock.Mock()
|
|
||||||
|
|
||||||
gateway_ip = "192.168.1.1"
|
|
||||||
cidr = "192.168.1.0/24"
|
|
||||||
host_rts = [{'destination': '10.0.0.0/24', 'nexthop': '10.0.0.1'},
|
|
||||||
{'destination': '10.0.1.0/24', 'nexthop': '10.0.1.1'}]
|
|
||||||
dns_servers = ["8.8.8.8", "8.8.4.4"]
|
|
||||||
|
|
||||||
dhcp_call = mock.call.add_bridge_dhcp(bridge, gateway_ip, cidr,
|
|
||||||
host_rts=host_rts,
|
|
||||||
dns_nservers=dns_servers)
|
|
||||||
|
|
||||||
self.client.create_dhcp(bridge, gateway_ip, cidr, host_rts=host_rts,
|
|
||||||
dns_servers=dns_servers)
|
|
||||||
self.mock_api.assert_has_calls([dhcp_call])
|
|
||||||
|
|
||||||
def test_delete_dhcp(self):
|
|
||||||
|
|
||||||
bridge = mock.Mock()
|
|
||||||
subnet = mock.Mock()
|
|
||||||
subnet.get_subnet_prefix.return_value = "10.0.0.0"
|
|
||||||
subnets = mock.MagicMock(return_value=[subnet])
|
|
||||||
bridge.get_dhcp_subnets.side_effect = subnets
|
|
||||||
self.client.delete_dhcp(bridge, "10.0.0.0/24")
|
|
||||||
bridge.assert_has_calls(mock.call.get_dhcp_subnets)
|
|
||||||
subnet.assert_has_calls([mock.call.get_subnet_prefix(),
|
|
||||||
mock.call.delete()])
|
|
||||||
|
|
||||||
def test_add_dhcp_host(self):
|
|
||||||
|
|
||||||
bridge = mock.Mock()
|
|
||||||
dhcp_subnet_call = mock.call.get_dhcp_subnet("10.0.0.0_24")
|
|
||||||
ip_addr_call = dhcp_subnet_call.add_dhcp_host().ip_addr("10.0.0.10")
|
|
||||||
mac_addr_call = ip_addr_call.mac_addr("2A:DB:6B:8C:19:99")
|
|
||||||
calls = [dhcp_subnet_call, ip_addr_call, mac_addr_call,
|
|
||||||
mac_addr_call.create()]
|
|
||||||
|
|
||||||
self.client.add_dhcp_host(bridge, "10.0.0.0/24", "10.0.0.10",
|
|
||||||
"2A:DB:6B:8C:19:99")
|
|
||||||
bridge.assert_has_calls(calls, any_order=True)
|
|
||||||
|
|
||||||
def test_add_dhcp_route_option(self):
|
|
||||||
|
|
||||||
bridge = mock.Mock()
|
|
||||||
subnet = bridge.get_dhcp_subnet.return_value
|
|
||||||
subnet.get_opt121_routes.return_value = None
|
|
||||||
dhcp_subnet_call = mock.call.get_dhcp_subnet("10.0.0.0_24")
|
|
||||||
dst_ip = "10.0.0.3/24"
|
|
||||||
gw_ip = "10.0.0.1"
|
|
||||||
prefix, length = dst_ip.split("/")
|
|
||||||
routes = [{'destinationPrefix': prefix, 'destinationLength': length,
|
|
||||||
'gatewayAddr': gw_ip}]
|
|
||||||
opt121_routes_call = dhcp_subnet_call.opt121_routes(routes)
|
|
||||||
calls = [dhcp_subnet_call, opt121_routes_call,
|
|
||||||
opt121_routes_call.update()]
|
|
||||||
|
|
||||||
self.client.add_dhcp_route_option(bridge, "10.0.0.0/24",
|
|
||||||
gw_ip, dst_ip)
|
|
||||||
bridge.assert_has_calls(calls, any_order=True)
|
|
||||||
|
|
||||||
def test_get_router_error(self):
|
|
||||||
self.mock_api.get_router.side_effect = w_exc.HTTPInternalServerError()
|
|
||||||
self.assertRaises(midonet_lib.MidonetApiException,
|
|
||||||
self.client.get_router, uuidutils.generate_uuid())
|
|
||||||
|
|
||||||
def test_get_router_not_found(self):
|
|
||||||
self.mock_api.get_router.side_effect = w_exc.HTTPNotFound()
|
|
||||||
self.assertRaises(midonet_lib.MidonetResourceNotFound,
|
|
||||||
self.client.get_router, uuidutils.generate_uuid())
|
|
||||||
|
|
||||||
def test_get_bridge_error(self):
|
|
||||||
self.mock_api.get_bridge.side_effect = w_exc.HTTPInternalServerError()
|
|
||||||
self.assertRaises(midonet_lib.MidonetApiException,
|
|
||||||
self.client.get_bridge, uuidutils.generate_uuid())
|
|
||||||
|
|
||||||
def test_get_bridge_not_found(self):
|
|
||||||
self.mock_api.get_bridge.side_effect = w_exc.HTTPNotFound()
|
|
||||||
self.assertRaises(midonet_lib.MidonetResourceNotFound,
|
|
||||||
self.client.get_bridge, uuidutils.generate_uuid())
|
|
||||||
|
|
||||||
def test_get_bridge(self):
|
|
||||||
bridge_id = uuidutils.generate_uuid()
|
|
||||||
|
|
||||||
bridge = self.client.get_bridge(bridge_id)
|
|
||||||
|
|
||||||
self.assertIsNotNone(bridge)
|
|
||||||
self.assertEqual(bridge.get_id(), bridge_id)
|
|
||||||
self.assertTrue(bridge.get_admin_state_up())
|
|
||||||
|
|
||||||
def test_add_bridge_port(self):
|
|
||||||
bridge_id = uuidutils.generate_uuid()
|
|
||||||
|
|
||||||
bridge = self.client.get_bridge(bridge_id)
|
|
||||||
|
|
||||||
self.assertIsNotNone(bridge)
|
|
||||||
|
|
||||||
port = self.client.add_bridge_port(bridge)
|
|
||||||
|
|
||||||
self.assertEqual(bridge.get_id(), port.get_bridge_id())
|
|
||||||
self.assertTrue(port.get_admin_state_up())
|
|
||||||
|
|
||||||
def test_get_router(self):
|
|
||||||
router_id = uuidutils.generate_uuid()
|
|
||||||
|
|
||||||
router = self.client.get_router(router_id)
|
|
||||||
|
|
||||||
self.assertIsNotNone(router)
|
|
||||||
self.assertEqual(router.get_id(), router_id)
|
|
||||||
self.assertTrue(router.get_admin_state_up())
|
|
|
@ -1,218 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
|
|
||||||
# Copyright (C) 2012 Midokura Japan K.K.
|
|
||||||
# Copyright (C) 2013 Midokura PTE LTD
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Rossella Sblendido, Midokura Europe SARL
|
|
||||||
# @author: Ryu Ishimoto, Midokura Japan KK
|
|
||||||
# @author: Tomoe Sugihara, Midokura Japan KK
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import neutron.common.test_lib as test_lib
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron.tests.unit import _test_extension_portbindings as test_bindings
|
|
||||||
import neutron.tests.unit.midonet.mock_lib as mock_lib
|
|
||||||
import neutron.tests.unit.test_db_plugin as test_plugin
|
|
||||||
import neutron.tests.unit.test_extension_security_group as sg
|
|
||||||
import neutron.tests.unit.test_l3_plugin as test_l3_plugin
|
|
||||||
|
|
||||||
MIDOKURA_PKG_PATH = "neutron.plugins.midonet.plugin"
|
|
||||||
MIDONET_PLUGIN_NAME = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH)
|
|
||||||
|
|
||||||
|
|
||||||
class MidonetPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
|
|
||||||
def setUp(self,
|
|
||||||
plugin=MIDONET_PLUGIN_NAME,
|
|
||||||
ext_mgr=None,
|
|
||||||
service_plugins=None):
|
|
||||||
self.mock_api = mock.patch(
|
|
||||||
'neutron.plugins.midonet.midonet_lib.MidoClient')
|
|
||||||
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
|
|
||||||
test_lib.test_config['config_files'] = [os.path.join(
|
|
||||||
etc_path, 'midonet.ini.test')]
|
|
||||||
|
|
||||||
p = mock.patch.dict(sys.modules, {'midonetclient': mock.Mock()})
|
|
||||||
p.start()
|
|
||||||
# dict patches must be explicitly stopped
|
|
||||||
self.addCleanup(p.stop)
|
|
||||||
self.instance = self.mock_api.start()
|
|
||||||
mock_cfg = mock_lib.MidonetLibMockConfig(self.instance.return_value)
|
|
||||||
mock_cfg.setup()
|
|
||||||
super(MidonetPluginV2TestCase, self).setUp(plugin=plugin,
|
|
||||||
ext_mgr=ext_mgr)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
super(MidonetPluginV2TestCase, self).tearDown()
|
|
||||||
self.mock_api.stop()
|
|
||||||
|
|
||||||
|
|
||||||
class TestMidonetNetworksV2(test_plugin.TestNetworksV2,
|
|
||||||
MidonetPluginV2TestCase):
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestMidonetL3NatTestCase(MidonetPluginV2TestCase,
|
|
||||||
test_l3_plugin.L3NatDBIntTestCase):
|
|
||||||
def setUp(self,
|
|
||||||
plugin=MIDONET_PLUGIN_NAME,
|
|
||||||
ext_mgr=None,
|
|
||||||
service_plugins=None):
|
|
||||||
super(TestMidonetL3NatTestCase, self).setUp(plugin=plugin,
|
|
||||||
ext_mgr=None,
|
|
||||||
service_plugins=None)
|
|
||||||
|
|
||||||
def test_floatingip_with_invalid_create_port(self):
|
|
||||||
self._test_floatingip_with_invalid_create_port(MIDONET_PLUGIN_NAME)
|
|
||||||
|
|
||||||
def test_floatingip_assoc_no_port(self):
|
|
||||||
with self.subnet(cidr='200.0.0.0/24') as public_sub:
|
|
||||||
self._set_net_external(public_sub['subnet']['network_id'])
|
|
||||||
res = super(TestMidonetL3NatTestCase, self)._create_floatingip(
|
|
||||||
self.fmt, public_sub['subnet']['network_id'])
|
|
||||||
# Cleanup
|
|
||||||
floatingip = self.deserialize(self.fmt, res)
|
|
||||||
self._delete('floatingips', floatingip['floatingip']['id'])
|
|
||||||
self.assertFalse(self.instance.return_value.add_static_nat.called)
|
|
||||||
|
|
||||||
def test_floatingip_assoc_with_port(self):
|
|
||||||
with self.subnet(cidr='200.0.0.0/24') as public_sub:
|
|
||||||
self._set_net_external(public_sub['subnet']['network_id'])
|
|
||||||
with self.port() as private_port:
|
|
||||||
with self.router() as r:
|
|
||||||
# We need to hook up the private subnet to the external
|
|
||||||
# network in order to associate the fip.
|
|
||||||
sid = private_port['port']['fixed_ips'][0]['subnet_id']
|
|
||||||
private_sub = {'subnet': {'id': sid}}
|
|
||||||
self._add_external_gateway_to_router(
|
|
||||||
r['router']['id'],
|
|
||||||
public_sub['subnet']['network_id'])
|
|
||||||
|
|
||||||
# Check that get_link_port was called - if not, Source NAT
|
|
||||||
# will not be set up correctly on the MidoNet side
|
|
||||||
self.assertTrue(
|
|
||||||
self.instance.return_value.get_link_port.called)
|
|
||||||
|
|
||||||
self._router_interface_action('add', r['router']['id'],
|
|
||||||
private_sub['subnet']['id'],
|
|
||||||
None)
|
|
||||||
|
|
||||||
# Create the fip.
|
|
||||||
res = super(TestMidonetL3NatTestCase,
|
|
||||||
self)._create_floatingip(
|
|
||||||
self.fmt,
|
|
||||||
public_sub['subnet']['network_id'],
|
|
||||||
port_id=private_port['port']['id'])
|
|
||||||
|
|
||||||
# Cleanup the resources used for the test
|
|
||||||
floatingip = self.deserialize(self.fmt, res)
|
|
||||||
self._delete('floatingips', floatingip['floatingip']['id'])
|
|
||||||
self._remove_external_gateway_from_router(
|
|
||||||
r['router']['id'],
|
|
||||||
public_sub['subnet']['network_id'])
|
|
||||||
self._router_interface_action('remove',
|
|
||||||
r['router']['id'],
|
|
||||||
private_sub['subnet']['id'],
|
|
||||||
None)
|
|
||||||
self.assertTrue(self.instance.return_value.add_static_nat.called)
|
|
||||||
|
|
||||||
|
|
||||||
class TestMidonetSecurityGroupsTestCase(sg.SecurityGroupDBTestCase):
|
|
||||||
|
|
||||||
_plugin_name = ('%s.MidonetPluginV2' % MIDOKURA_PKG_PATH)
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.mock_api = mock.patch(
|
|
||||||
'neutron.plugins.midonet.midonet_lib.MidoClient')
|
|
||||||
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
|
|
||||||
test_lib.test_config['config_files'] = [os.path.join(
|
|
||||||
etc_path, 'midonet.ini.test')]
|
|
||||||
|
|
||||||
self.instance = self.mock_api.start()
|
|
||||||
mock_cfg = mock_lib.MidonetLibMockConfig(self.instance.return_value)
|
|
||||||
mock_cfg.setup()
|
|
||||||
p = mock.patch.dict(sys.modules, {'midonetclient': mock.Mock()})
|
|
||||||
p.start()
|
|
||||||
# dict patches must be explicitly stopped
|
|
||||||
self.addCleanup(p.stop)
|
|
||||||
super(TestMidonetSecurityGroupsTestCase, self).setUp(self._plugin_name)
|
|
||||||
|
|
||||||
|
|
||||||
class TestMidonetSecurityGroup(sg.TestSecurityGroups,
|
|
||||||
TestMidonetSecurityGroupsTestCase):
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestMidonetSubnetsV2(test_plugin.TestSubnetsV2,
|
|
||||||
MidonetPluginV2TestCase):
|
|
||||||
|
|
||||||
# IPv6 is not supported by MidoNet yet. Ignore tests that attempt to
|
|
||||||
# create IPv6 subnet.
|
|
||||||
def test_create_subnet_inconsistent_ipv6_cidrv4(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_create_subnet_inconsistent_ipv6_dns_v4(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_create_subnet_with_v6_allocation_pool(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_update_subnet_inconsistent_ipv6_gatewayv4(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_create_subnet_inconsistent_ipv6_gatewayv4(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_create_subnet_dhcp_disabled(self):
|
|
||||||
super(TestMidonetSubnetsV2, self)._test_create_subnet(
|
|
||||||
enable_dhcp=False)
|
|
||||||
self.assertFalse(self.instance.return_value.create_dhcp.called)
|
|
||||||
|
|
||||||
|
|
||||||
class TestMidonetPortsV2(test_plugin.TestPortsV2,
|
|
||||||
MidonetPluginV2TestCase):
|
|
||||||
|
|
||||||
# IPv6 is not supported by MidoNet yet. Ignore tests that attempt to
|
|
||||||
# create IPv6 subnet.
|
|
||||||
|
|
||||||
def test_requested_subnet_id_v4_and_v6(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_vif_port_binding(self):
|
|
||||||
with self.port(name='myname') as port:
|
|
||||||
self.assertEqual('midonet', port['port']['binding:vif_type'])
|
|
||||||
self.assertTrue(port['port']['admin_state_up'])
|
|
||||||
|
|
||||||
|
|
||||||
class TestMidonetPluginPortBinding(test_bindings.PortBindingsTestCase,
|
|
||||||
MidonetPluginV2TestCase):
|
|
||||||
|
|
||||||
VIF_TYPE = portbindings.VIF_TYPE_MIDONET
|
|
||||||
HAS_PORT_FILTER = True
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestMidonetPluginPortBinding, self).setUp()
|
|
|
@ -1,14 +0,0 @@
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,218 +0,0 @@
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
from neutron.extensions import portbindings
|
|
||||||
from neutron.plugins.ml2 import driver_api as api
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
NETWORK_ID = "fake_network"
|
|
||||||
PORT_ID = "fake_port"
|
|
||||||
|
|
||||||
|
|
||||||
class FakeNetworkContext(api.NetworkContext):
|
|
||||||
def __init__(self, segments):
|
|
||||||
self._network_segments = segments
|
|
||||||
|
|
||||||
@property
|
|
||||||
def current(self):
|
|
||||||
return {'id': NETWORK_ID}
|
|
||||||
|
|
||||||
@property
|
|
||||||
def original(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def network_segments(self):
|
|
||||||
return self._network_segments
|
|
||||||
|
|
||||||
|
|
||||||
class FakePortContext(api.PortContext):
|
|
||||||
def __init__(self, agent_type, agents, segments,
|
|
||||||
vnic_type=portbindings.VNIC_NORMAL):
|
|
||||||
self._agent_type = agent_type
|
|
||||||
self._agents = agents
|
|
||||||
self._network_context = FakeNetworkContext(segments)
|
|
||||||
self._bound_vnic_type = vnic_type
|
|
||||||
self._bound_segment_id = None
|
|
||||||
self._bound_vif_type = None
|
|
||||||
self._bound_vif_details = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def current(self):
|
|
||||||
return {'id': PORT_ID,
|
|
||||||
'binding:vnic_type': self._bound_vnic_type}
|
|
||||||
|
|
||||||
@property
|
|
||||||
def original(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def network(self):
|
|
||||||
return self._network_context
|
|
||||||
|
|
||||||
@property
|
|
||||||
def bound_segment(self):
|
|
||||||
if self._bound_segment_id:
|
|
||||||
for segment in self._network_context.network_segments:
|
|
||||||
if segment[api.ID] == self._bound_segment_id:
|
|
||||||
return segment
|
|
||||||
|
|
||||||
@property
|
|
||||||
def original_bound_segment(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def bound_driver(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def original_bound_driver(self):
|
|
||||||
return None
|
|
||||||
|
|
||||||
def host_agents(self, agent_type):
|
|
||||||
if agent_type == self._agent_type:
|
|
||||||
return self._agents
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def set_binding(self, segment_id, vif_type, vif_details):
|
|
||||||
self._bound_segment_id = segment_id
|
|
||||||
self._bound_vif_type = vif_type
|
|
||||||
self._bound_vif_details = vif_details
|
|
||||||
|
|
||||||
|
|
||||||
class AgentMechanismBaseTestCase(base.BaseTestCase):
|
|
||||||
# These following must be overriden for the specific mechanism
|
|
||||||
# driver being tested:
|
|
||||||
VIF_TYPE = None
|
|
||||||
CAP_PORT_FILTER = None
|
|
||||||
AGENT_TYPE = None
|
|
||||||
AGENTS = None
|
|
||||||
AGENTS_DEAD = None
|
|
||||||
AGENTS_BAD = None
|
|
||||||
|
|
||||||
def _check_unbound(self, context):
|
|
||||||
self.assertIsNone(context._bound_segment_id)
|
|
||||||
self.assertIsNone(context._bound_vif_type)
|
|
||||||
self.assertIsNone(context._bound_vif_details)
|
|
||||||
|
|
||||||
def _check_bound(self, context, segment):
|
|
||||||
self.assertEqual(context._bound_segment_id, segment[api.ID])
|
|
||||||
self.assertEqual(context._bound_vif_type, self.VIF_TYPE)
|
|
||||||
vif_details = context._bound_vif_details
|
|
||||||
self.assertIsNotNone(vif_details)
|
|
||||||
self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER],
|
|
||||||
self.CAP_PORT_FILTER)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase):
|
|
||||||
UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'no_such_type'}]
|
|
||||||
|
|
||||||
def test_unknown_type(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS,
|
|
||||||
self.UNKNOWN_TYPE_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_unbound(context)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase):
|
|
||||||
LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'no_such_type'},
|
|
||||||
{api.ID: 'local_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'local'}]
|
|
||||||
|
|
||||||
def test_type_local(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS,
|
|
||||||
self.LOCAL_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_bound(context, self.LOCAL_SEGMENTS[1])
|
|
||||||
|
|
||||||
def test_type_local_dead(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS_DEAD,
|
|
||||||
self.LOCAL_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_unbound(context)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase):
|
|
||||||
FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'no_such_type'},
|
|
||||||
{api.ID: 'flat_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'flat',
|
|
||||||
api.PHYSICAL_NETWORK: 'fake_physical_network'}]
|
|
||||||
|
|
||||||
def test_type_flat(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS,
|
|
||||||
self.FLAT_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_bound(context, self.FLAT_SEGMENTS[1])
|
|
||||||
|
|
||||||
def test_type_flat_bad(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS_BAD,
|
|
||||||
self.FLAT_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_unbound(context)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase):
|
|
||||||
VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'no_such_type'},
|
|
||||||
{api.ID: 'vlan_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'vlan',
|
|
||||||
api.PHYSICAL_NETWORK: 'fake_physical_network',
|
|
||||||
api.SEGMENTATION_ID: 1234}]
|
|
||||||
|
|
||||||
def test_type_vlan(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS,
|
|
||||||
self.VLAN_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_bound(context, self.VLAN_SEGMENTS[1])
|
|
||||||
|
|
||||||
def test_type_vlan_bad(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS_BAD,
|
|
||||||
self.VLAN_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_unbound(context)
|
|
||||||
|
|
||||||
|
|
||||||
class AgentMechanismGreTestCase(AgentMechanismBaseTestCase):
|
|
||||||
GRE_SEGMENTS = [{api.ID: 'unknown_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'no_such_type'},
|
|
||||||
{api.ID: 'gre_segment_id',
|
|
||||||
api.NETWORK_TYPE: 'gre',
|
|
||||||
api.SEGMENTATION_ID: 1234}]
|
|
||||||
|
|
||||||
def test_type_gre(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS,
|
|
||||||
self.GRE_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_bound(context, self.GRE_SEGMENTS[1])
|
|
||||||
|
|
||||||
def test_type_gre_bad(self):
|
|
||||||
context = FakePortContext(self.AGENT_TYPE,
|
|
||||||
self.AGENTS_BAD,
|
|
||||||
self.GRE_SEGMENTS)
|
|
||||||
self.driver.bind_port(context)
|
|
||||||
self._check_unbound(context)
|
|
|
@ -1,14 +0,0 @@
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
|
@ -1,69 +0,0 @@
|
||||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
||||||
# Copyright (c) 2013 OpenStack Foundation
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.openstack.common import importutils
|
|
||||||
from neutron.openstack.common import log as logging
|
|
||||||
from neutron.plugins.ml2 import config as ml2_config
|
|
||||||
from neutron.plugins.ml2.drivers.brocade import (mechanism_brocade
|
|
||||||
as brocademechanism)
|
|
||||||
from neutron.tests.unit import test_db_plugin
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
MECHANISM_NAME = ('neutron.plugins.ml2.'
|
|
||||||
'drivers.brocade.mechanism_brocade.BrocadeMechanism')
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadeMechDriverV2(test_db_plugin.NeutronDbPluginV2TestCase):
|
|
||||||
"""Test Brocade VCS/VDX mechanism driver.
|
|
||||||
"""
|
|
||||||
|
|
||||||
_mechanism_name = MECHANISM_NAME
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
|
|
||||||
_mechanism_name = MECHANISM_NAME
|
|
||||||
|
|
||||||
ml2_opts = {
|
|
||||||
'mechanism_drivers': ['brocade'],
|
|
||||||
'tenant_network_types': ['vlan']}
|
|
||||||
|
|
||||||
for opt, val in ml2_opts.items():
|
|
||||||
ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
|
|
||||||
|
|
||||||
def mocked_brocade_init(self):
|
|
||||||
self._driver = mock.MagicMock()
|
|
||||||
|
|
||||||
with mock.patch.object(brocademechanism.BrocadeMechanism,
|
|
||||||
'brocade_init', new=mocked_brocade_init):
|
|
||||||
super(TestBrocadeMechDriverV2, self).setUp()
|
|
||||||
self.mechanism_driver = importutils.import_object(_mechanism_name)
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadeMechDriverNetworksV2(test_db_plugin.TestNetworksV2,
|
|
||||||
TestBrocadeMechDriverV2):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadeMechDriverPortsV2(test_db_plugin.TestPortsV2,
|
|
||||||
TestBrocadeMechDriverV2):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestBrocadeMechDriverSubnetsV2(test_db_plugin.TestSubnetsV2,
|
|
||||||
TestBrocadeMechDriverV2):
|
|
||||||
pass
|
|
|
@ -1,272 +0,0 @@
|
||||||
# Copyright (c) 2014 Cisco Systems
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Henry Gessau, Cisco Systems
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import requests
|
|
||||||
import requests.exceptions
|
|
||||||
|
|
||||||
from neutron.plugins.ml2.drivers.cisco.apic import apic_client as apic
|
|
||||||
from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc
|
|
||||||
from neutron.tests import base
|
|
||||||
from neutron.tests.unit.ml2.drivers.cisco.apic import (
|
|
||||||
test_cisco_apic_common as mocked)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCiscoApicClient(base.BaseTestCase, mocked.ControllerMixin):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestCiscoApicClient, self).setUp()
|
|
||||||
self.set_up_mocks()
|
|
||||||
self.apic = apic.RestClient(mocked.APIC_HOST)
|
|
||||||
self.addCleanup(mock.patch.stopall)
|
|
||||||
|
|
||||||
def _mock_authenticate(self, timeout=300):
|
|
||||||
self.reset_reponses()
|
|
||||||
self.mock_apic_manager_login_responses(timeout=timeout)
|
|
||||||
self.apic.login(mocked.APIC_USR, mocked.APIC_PWD)
|
|
||||||
|
|
||||||
def test_login_by_instantiation(self):
|
|
||||||
self.reset_reponses()
|
|
||||||
self.mock_apic_manager_login_responses()
|
|
||||||
apic2 = apic.RestClient(mocked.APIC_HOST,
|
|
||||||
usr=mocked.APIC_USR, pwd=mocked.APIC_PWD)
|
|
||||||
self.assertIsNotNone(apic2.authentication)
|
|
||||||
self.assertEqual(apic2.username, mocked.APIC_USR)
|
|
||||||
|
|
||||||
def test_client_session_login_ok(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.assertEqual(
|
|
||||||
self.apic.authentication['userName'], mocked.APIC_USR)
|
|
||||||
self.assertTrue(self.apic.api_base.startswith('http://'))
|
|
||||||
self.assertEqual(self.apic.username, mocked.APIC_USR)
|
|
||||||
self.assertIsNotNone(self.apic.authentication)
|
|
||||||
self.apic = apic.RestClient(mocked.APIC_HOST, mocked.APIC_PORT,
|
|
||||||
ssl=True)
|
|
||||||
self.assertTrue(self.apic.api_base.startswith('https://'))
|
|
||||||
|
|
||||||
def test_client_session_login_fail(self):
|
|
||||||
self.mock_error_post_response(requests.codes.unauthorized,
|
|
||||||
code='599',
|
|
||||||
text=u'Fake error')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk, self.apic.login,
|
|
||||||
mocked.APIC_USR, mocked.APIC_PWD)
|
|
||||||
|
|
||||||
def test_client_session_login_timeout(self):
|
|
||||||
self.response['post'].append(requests.exceptions.Timeout)
|
|
||||||
self.assertRaises(cexc.ApicHostNoResponse, self.apic.login,
|
|
||||||
mocked.APIC_USR, mocked.APIC_PWD)
|
|
||||||
|
|
||||||
def test_client_session_logout_ok(self):
|
|
||||||
self.mock_response_for_post('aaaLogout')
|
|
||||||
self.apic.logout()
|
|
||||||
self.assertIsNone(self.apic.authentication)
|
|
||||||
# Multiple signouts should not cause an error
|
|
||||||
self.apic.logout()
|
|
||||||
self.assertIsNone(self.apic.authentication)
|
|
||||||
|
|
||||||
def test_client_session_logout_fail(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_error_post_response(requests.codes.timeout,
|
|
||||||
code='123', text='failed')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk, self.apic.logout)
|
|
||||||
|
|
||||||
def test_query_not_logged_in(self):
|
|
||||||
self.apic.authentication = None
|
|
||||||
self.assertRaises(cexc.ApicSessionNotLoggedIn,
|
|
||||||
self.apic.fvTenant.get, mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_query_no_response(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
requests.Session.get = mock.Mock(return_value=None)
|
|
||||||
self.assertRaises(cexc.ApicHostNoResponse,
|
|
||||||
self.apic.fvTenant.get, mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_query_error_response_no_data(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_error_get_response(requests.codes.bad) # No error attrs.
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.apic.fvTenant.get, mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_generic_get_data(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_get('topSystem', name='ifc1')
|
|
||||||
top_system = self.apic.get_data('class/topSystem')
|
|
||||||
self.assertIsNotNone(top_system)
|
|
||||||
name = top_system[0]['topSystem']['attributes']['name']
|
|
||||||
self.assertEqual(name, 'ifc1')
|
|
||||||
|
|
||||||
def test_session_timeout_refresh_ok(self):
|
|
||||||
self._mock_authenticate(timeout=-1)
|
|
||||||
# Client will do refresh before getting tenant
|
|
||||||
self.mock_response_for_get('aaaLogin', token='ok',
|
|
||||||
refreshTimeoutSeconds=300)
|
|
||||||
self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT)
|
|
||||||
tenant = self.apic.fvTenant.get(mocked.APIC_TENANT)
|
|
||||||
self.assertEqual(tenant['name'], mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_session_timeout_refresh_no_cookie(self):
|
|
||||||
self._mock_authenticate(timeout=-1)
|
|
||||||
# Client will do refresh before getting tenant
|
|
||||||
self.mock_response_for_get('aaaLogin', notoken='test')
|
|
||||||
self.assertRaises(cexc.ApicResponseNoCookie,
|
|
||||||
self.apic.fvTenant.get, mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_session_timeout_refresh_error(self):
|
|
||||||
self._mock_authenticate(timeout=-1)
|
|
||||||
self.mock_error_get_response(requests.codes.timeout,
|
|
||||||
code='503', text=u'timed out')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.apic.fvTenant.get, mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_session_timeout_refresh_timeout_error(self):
|
|
||||||
self._mock_authenticate(timeout=-1)
|
|
||||||
# Client will try to get refresh, we fake a refresh error.
|
|
||||||
self.mock_error_get_response(requests.codes.bad_request,
|
|
||||||
code='403',
|
|
||||||
text=u'Token was invalid. Expired.')
|
|
||||||
# Client will then try to re-login.
|
|
||||||
self.mock_apic_manager_login_responses()
|
|
||||||
# Finally the client will try to get the tenant.
|
|
||||||
self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT)
|
|
||||||
tenant = self.apic.fvTenant.get(mocked.APIC_TENANT)
|
|
||||||
self.assertEqual(tenant['name'], mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_lookup_mo_bad_token_retry(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
# For the first get request we mock a bad token.
|
|
||||||
self.mock_error_get_response(requests.codes.bad_request,
|
|
||||||
code='403',
|
|
||||||
text=u'Token was invalid. Expired.')
|
|
||||||
# Client will then try to re-login.
|
|
||||||
self.mock_apic_manager_login_responses()
|
|
||||||
# Then the client will retry to get the tenant.
|
|
||||||
self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT)
|
|
||||||
tenant = self.apic.fvTenant.get(mocked.APIC_TENANT)
|
|
||||||
self.assertEqual(tenant['name'], mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_use_unsupported_managed_object(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
# unittest.assertRaises cannot catch exceptions raised in
|
|
||||||
# __getattr__, so we need to defer the evaluation using lambda.
|
|
||||||
self.assertRaises(cexc.ApicManagedObjectNotSupported,
|
|
||||||
lambda: self.apic.nonexistentObject)
|
|
||||||
|
|
||||||
def test_lookup_nonexistant_mo(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_get('fvTenant')
|
|
||||||
self.assertIsNone(self.apic.fvTenant.get(mocked.APIC_TENANT))
|
|
||||||
|
|
||||||
def test_lookup_existing_mo(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_get('fvTenant', name='infra')
|
|
||||||
tenant = self.apic.fvTenant.get('infra')
|
|
||||||
self.assertEqual(tenant['name'], 'infra')
|
|
||||||
|
|
||||||
def test_list_mos_ok(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_get('fvTenant', name='t1')
|
|
||||||
self.mock_append_to_response('fvTenant', name='t2')
|
|
||||||
tlist = self.apic.fvTenant.list_all()
|
|
||||||
self.assertIsNotNone(tlist)
|
|
||||||
self.assertEqual(len(tlist), 2)
|
|
||||||
self.assertIn({'name': 't1'}, tlist)
|
|
||||||
self.assertIn({'name': 't2'}, tlist)
|
|
||||||
|
|
||||||
def test_list_mo_names_ok(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_get('fvTenant', name='t1')
|
|
||||||
self.mock_append_to_response('fvTenant', name='t2')
|
|
||||||
tnlist = self.apic.fvTenant.list_names()
|
|
||||||
self.assertIsNotNone(tnlist)
|
|
||||||
self.assertEqual(len(tnlist), 2)
|
|
||||||
self.assertIn('t1', tnlist)
|
|
||||||
self.assertIn('t2', tnlist)
|
|
||||||
|
|
||||||
def test_list_mos_split_class_fail(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_get('fvnsEncapBlk', name='Blk1')
|
|
||||||
encap_blks = self.apic.fvnsEncapBlk__vlan.list_all()
|
|
||||||
self.assertEqual(len(encap_blks), 1)
|
|
||||||
|
|
||||||
def test_delete_mo_ok(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_post('fvTenant')
|
|
||||||
self.assertTrue(self.apic.fvTenant.delete(mocked.APIC_TENANT))
|
|
||||||
|
|
||||||
def test_create_mo_ok(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_post('fvTenant', name=mocked.APIC_TENANT)
|
|
||||||
self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT)
|
|
||||||
self.apic.fvTenant.create(mocked.APIC_TENANT)
|
|
||||||
tenant = self.apic.fvTenant.get(mocked.APIC_TENANT)
|
|
||||||
self.assertEqual(tenant['name'], mocked.APIC_TENANT)
|
|
||||||
|
|
||||||
def test_create_mo_already_exists(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_error_post_response(requests.codes.bad_request,
|
|
||||||
code='103',
|
|
||||||
text=u'Fake 103 error')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.apic.vmmProvP.create, mocked.APIC_VMMP)
|
|
||||||
|
|
||||||
def test_create_mo_with_prereq(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_post('fvTenant', name=mocked.APIC_TENANT)
|
|
||||||
self.mock_response_for_post('fvBD', name=mocked.APIC_NETWORK)
|
|
||||||
self.mock_response_for_get('fvBD', name=mocked.APIC_NETWORK)
|
|
||||||
bd_args = mocked.APIC_TENANT, mocked.APIC_NETWORK
|
|
||||||
self.apic.fvBD.create(*bd_args)
|
|
||||||
network = self.apic.fvBD.get(*bd_args)
|
|
||||||
self.assertEqual(network['name'], mocked.APIC_NETWORK)
|
|
||||||
|
|
||||||
def test_create_mo_prereq_exists(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_post('vmmDomP', name=mocked.APIC_DOMAIN)
|
|
||||||
self.mock_response_for_get('vmmDomP', name=mocked.APIC_DOMAIN)
|
|
||||||
self.apic.vmmDomP.create(mocked.APIC_VMMP, mocked.APIC_DOMAIN)
|
|
||||||
dom = self.apic.vmmDomP.get(mocked.APIC_VMMP, mocked.APIC_DOMAIN)
|
|
||||||
self.assertEqual(dom['name'], mocked.APIC_DOMAIN)
|
|
||||||
|
|
||||||
def test_create_mo_fails(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_post('fvTenant', name=mocked.APIC_TENANT)
|
|
||||||
self.mock_error_post_response(requests.codes.bad_request,
|
|
||||||
code='not103',
|
|
||||||
text=u'Fake not103 error')
|
|
||||||
bd_args = mocked.APIC_TENANT, mocked.APIC_NETWORK
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.apic.fvBD.create, *bd_args)
|
|
||||||
|
|
||||||
def test_update_mo(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_post('fvTenant', name=mocked.APIC_TENANT)
|
|
||||||
self.mock_response_for_get('fvTenant', name=mocked.APIC_TENANT,
|
|
||||||
more='extra')
|
|
||||||
self.apic.fvTenant.update(mocked.APIC_TENANT, more='extra')
|
|
||||||
tenant = self.apic.fvTenant.get(mocked.APIC_TENANT)
|
|
||||||
self.assertEqual(tenant['name'], mocked.APIC_TENANT)
|
|
||||||
self.assertEqual(tenant['more'], 'extra')
|
|
||||||
|
|
||||||
def test_attr_fail_empty_list(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_get('fvTenant') # No attrs for tenant.
|
|
||||||
self.assertIsNone(self.apic.fvTenant.get(mocked.APIC_TENANT))
|
|
||||||
|
|
||||||
def test_attr_fail_other_obj(self):
|
|
||||||
self._mock_authenticate()
|
|
||||||
self.mock_response_for_get('other', name=mocked.APIC_TENANT)
|
|
||||||
self.assertIsNone(self.apic.fvTenant.get(mocked.APIC_TENANT))
|
|
|
@ -1,225 +0,0 @@
|
||||||
# Copyright (c) 2014 Cisco Systems
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Henry Gessau, Cisco Systems
|
|
||||||
|
|
||||||
import mock
|
|
||||||
import requests
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.common import config as neutron_config
|
|
||||||
from neutron.plugins.ml2 import config as ml2_config
|
|
||||||
from neutron.plugins.ml2.drivers.cisco.apic import apic_client as apic
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
OK = requests.codes.ok
|
|
||||||
|
|
||||||
APIC_HOST = 'fake.controller.local'
|
|
||||||
APIC_PORT = 7580
|
|
||||||
APIC_USR = 'notadmin'
|
|
||||||
APIC_PWD = 'topsecret'
|
|
||||||
|
|
||||||
APIC_TENANT = 'citizen14'
|
|
||||||
APIC_NETWORK = 'network99'
|
|
||||||
APIC_NETNAME = 'net99name'
|
|
||||||
APIC_SUBNET = '10.3.2.1/24'
|
|
||||||
APIC_L3CTX = 'layer3context'
|
|
||||||
APIC_AP = 'appProfile001'
|
|
||||||
APIC_EPG = 'endPointGroup001'
|
|
||||||
|
|
||||||
APIC_CONTRACT = 'signedContract'
|
|
||||||
APIC_SUBJECT = 'testSubject'
|
|
||||||
APIC_FILTER = 'carbonFilter'
|
|
||||||
APIC_ENTRY = 'forcedEntry'
|
|
||||||
|
|
||||||
APIC_VMMP = 'OpenStack'
|
|
||||||
APIC_DOMAIN = 'cumuloNimbus'
|
|
||||||
APIC_PDOM = 'rainStorm'
|
|
||||||
|
|
||||||
APIC_NODE_PROF = 'red'
|
|
||||||
APIC_LEAF = 'green'
|
|
||||||
APIC_LEAF_TYPE = 'range'
|
|
||||||
APIC_NODE_BLK = 'blue'
|
|
||||||
APIC_PORT_PROF = 'yellow'
|
|
||||||
APIC_PORT_SEL = 'front'
|
|
||||||
APIC_PORT_TYPE = 'range'
|
|
||||||
APIC_PORT_BLK1 = 'block01'
|
|
||||||
APIC_PORT_BLK2 = 'block02'
|
|
||||||
APIC_ACC_PORT_GRP = 'alpha'
|
|
||||||
APIC_FUNC_PROF = 'beta'
|
|
||||||
APIC_ATT_ENT_PROF = 'delta'
|
|
||||||
APIC_VLAN_NAME = 'gamma'
|
|
||||||
APIC_VLAN_MODE = 'dynamic'
|
|
||||||
APIC_VLANID_FROM = 2900
|
|
||||||
APIC_VLANID_TO = 2999
|
|
||||||
APIC_VLAN_FROM = 'vlan-%d' % APIC_VLANID_FROM
|
|
||||||
APIC_VLAN_TO = 'vlan-%d' % APIC_VLANID_TO
|
|
||||||
|
|
||||||
|
|
||||||
class ControllerMixin(object):
|
|
||||||
|
|
||||||
"""Mock the controller for APIC driver and service unit tests."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.response = None
|
|
||||||
|
|
||||||
def set_up_mocks(self):
|
|
||||||
# The mocked responses from the server are lists used by
|
|
||||||
# mock.side_effect, which means each call to post or get will
|
|
||||||
# return the next item in the list. This allows the test cases
|
|
||||||
# to stage a sequence of responses to method(s) under test.
|
|
||||||
self.response = {'post': [], 'get': []}
|
|
||||||
self.reset_reponses()
|
|
||||||
|
|
||||||
def reset_reponses(self, req=None):
|
|
||||||
# Clear all staged responses.
|
|
||||||
reqs = req and [req] or ['post', 'get'] # Both if none specified.
|
|
||||||
for req in reqs:
|
|
||||||
del self.response[req][:]
|
|
||||||
self.restart_responses(req)
|
|
||||||
|
|
||||||
def restart_responses(self, req):
|
|
||||||
responses = mock.MagicMock(side_effect=self.response[req])
|
|
||||||
if req == 'post':
|
|
||||||
requests.Session.post = responses
|
|
||||||
elif req == 'get':
|
|
||||||
requests.Session.get = responses
|
|
||||||
|
|
||||||
def mock_response_for_post(self, mo, **attrs):
|
|
||||||
attrs['debug_mo'] = mo # useful for debugging
|
|
||||||
self._stage_mocked_response('post', OK, mo, **attrs)
|
|
||||||
|
|
||||||
def mock_response_for_get(self, mo, **attrs):
|
|
||||||
self._stage_mocked_response('get', OK, mo, **attrs)
|
|
||||||
|
|
||||||
def mock_append_to_response(self, mo, **attrs):
|
|
||||||
# Append a MO to the last get response.
|
|
||||||
mo_attrs = attrs and {mo: {'attributes': attrs}} or {}
|
|
||||||
self.response['get'][-1].json.return_value['imdata'].append(mo_attrs)
|
|
||||||
|
|
||||||
def mock_error_post_response(self, status, **attrs):
|
|
||||||
self._stage_mocked_response('post', status, 'error', **attrs)
|
|
||||||
|
|
||||||
def mock_error_get_response(self, status, **attrs):
|
|
||||||
self._stage_mocked_response('get', status, 'error', **attrs)
|
|
||||||
|
|
||||||
def _stage_mocked_response(self, req, mock_status, mo, **attrs):
|
|
||||||
response = mock.MagicMock()
|
|
||||||
response.status_code = mock_status
|
|
||||||
mo_attrs = attrs and [{mo: {'attributes': attrs}}] or []
|
|
||||||
response.json.return_value = {'imdata': mo_attrs}
|
|
||||||
self.response[req].append(response)
|
|
||||||
|
|
||||||
def mock_responses_for_create(self, obj):
|
|
||||||
self._mock_container_responses_for_create(
|
|
||||||
apic.ManagedObjectClass(obj).container)
|
|
||||||
name = '-'.join([obj, 'name']) # useful for debugging
|
|
||||||
self._stage_mocked_response('post', OK, obj, name=name)
|
|
||||||
|
|
||||||
def _mock_container_responses_for_create(self, obj):
|
|
||||||
# Recursively generate responses for creating obj's containers.
|
|
||||||
if obj:
|
|
||||||
mo = apic.ManagedObjectClass(obj)
|
|
||||||
if mo.can_create:
|
|
||||||
if mo.container:
|
|
||||||
self._mock_container_responses_for_create(mo.container)
|
|
||||||
name = '-'.join([obj, 'name']) # useful for debugging
|
|
||||||
self._stage_mocked_response('post', OK, obj, debug_name=name)
|
|
||||||
|
|
||||||
def mock_apic_manager_login_responses(self, timeout=300):
|
|
||||||
# APIC Manager tests are based on authenticated session
|
|
||||||
self.mock_response_for_post('aaaLogin', userName=APIC_USR,
|
|
||||||
token='ok', refreshTimeoutSeconds=timeout)
|
|
||||||
|
|
||||||
def assert_responses_drained(self, req=None):
|
|
||||||
"""Fail if all the expected responses have not been consumed."""
|
|
||||||
request = {'post': self.session.post, 'get': self.session.get}
|
|
||||||
reqs = req and [req] or ['post', 'get'] # Both if none specified.
|
|
||||||
for req in reqs:
|
|
||||||
try:
|
|
||||||
request[req]('some url')
|
|
||||||
except StopIteration:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# User-friendly error message
|
|
||||||
msg = req + ' response queue not drained'
|
|
||||||
self.fail(msg=msg)
|
|
||||||
|
|
||||||
|
|
||||||
class ConfigMixin(object):
|
|
||||||
|
|
||||||
"""Mock the config for APIC driver and service unit tests."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.mocked_parser = None
|
|
||||||
|
|
||||||
def set_up_mocks(self):
|
|
||||||
# Mock the configuration file
|
|
||||||
args = ['--config-file', base.etcdir('neutron.conf.test')]
|
|
||||||
neutron_config.init(args=args)
|
|
||||||
|
|
||||||
# Configure the ML2 mechanism drivers and network types
|
|
||||||
ml2_opts = {
|
|
||||||
'mechanism_drivers': ['apic'],
|
|
||||||
'tenant_network_types': ['vlan'],
|
|
||||||
}
|
|
||||||
for opt, val in ml2_opts.items():
|
|
||||||
ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
|
|
||||||
|
|
||||||
# Configure the Cisco APIC mechanism driver
|
|
||||||
apic_test_config = {
|
|
||||||
'apic_host': APIC_HOST,
|
|
||||||
'apic_username': APIC_USR,
|
|
||||||
'apic_password': APIC_PWD,
|
|
||||||
'apic_port': APIC_PORT,
|
|
||||||
'apic_vmm_domain': APIC_DOMAIN,
|
|
||||||
'apic_vlan_ns_name': APIC_VLAN_NAME,
|
|
||||||
'apic_vlan_range': '%d:%d' % (APIC_VLANID_FROM, APIC_VLANID_TO),
|
|
||||||
'apic_node_profile': APIC_NODE_PROF,
|
|
||||||
'apic_entity_profile': APIC_ATT_ENT_PROF,
|
|
||||||
'apic_function_profile': APIC_FUNC_PROF,
|
|
||||||
}
|
|
||||||
for opt, val in apic_test_config.items():
|
|
||||||
cfg.CONF.set_override(opt, val, 'ml2_cisco_apic')
|
|
||||||
|
|
||||||
apic_switch_cfg = {
|
|
||||||
'apic_switch:east01': {'ubuntu1,ubuntu2': ['3/11']},
|
|
||||||
'apic_switch:east02': {'rhel01,rhel02': ['4/21'],
|
|
||||||
'rhel03': ['4/22']},
|
|
||||||
}
|
|
||||||
self.mocked_parser = mock.patch.object(cfg,
|
|
||||||
'MultiConfigParser').start()
|
|
||||||
self.mocked_parser.return_value.read.return_value = [apic_switch_cfg]
|
|
||||||
self.mocked_parser.return_value.parsed = [apic_switch_cfg]
|
|
||||||
|
|
||||||
|
|
||||||
class DbModelMixin(object):
|
|
||||||
|
|
||||||
"""Mock the DB models for the APIC driver and service unit tests."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
self.mocked_session = None
|
|
||||||
|
|
||||||
def set_up_mocks(self):
|
|
||||||
self.mocked_session = mock.Mock()
|
|
||||||
get_session = mock.patch('neutron.db.api.get_session').start()
|
|
||||||
get_session.return_value = self.mocked_session
|
|
||||||
|
|
||||||
def mock_db_query_filterby_first_return(self, value):
|
|
||||||
"""Mock db.session.query().filterby().first() to return value."""
|
|
||||||
query = self.mocked_session.query.return_value
|
|
||||||
query.filter_by.return_value.first.return_value = value
|
|
|
@ -1,698 +0,0 @@
|
||||||
# Copyright (c) 2014 Cisco Systems
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Henry Gessau, Cisco Systems
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from webob import exc as wexc
|
|
||||||
|
|
||||||
from neutron.openstack.common import uuidutils
|
|
||||||
|
|
||||||
from neutron.plugins.ml2.drivers.cisco.apic import apic_manager
|
|
||||||
from neutron.plugins.ml2.drivers.cisco.apic import exceptions as cexc
|
|
||||||
from neutron.tests import base
|
|
||||||
from neutron.tests.unit.ml2.drivers.cisco.apic import (
|
|
||||||
test_cisco_apic_common as mocked)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCiscoApicManager(base.BaseTestCase,
|
|
||||||
mocked.ControllerMixin,
|
|
||||||
mocked.ConfigMixin,
|
|
||||||
mocked.DbModelMixin):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestCiscoApicManager, self).setUp()
|
|
||||||
mocked.ControllerMixin.set_up_mocks(self)
|
|
||||||
mocked.ConfigMixin.set_up_mocks(self)
|
|
||||||
mocked.DbModelMixin.set_up_mocks(self)
|
|
||||||
|
|
||||||
self.mock_apic_manager_login_responses()
|
|
||||||
self.mgr = apic_manager.APICManager()
|
|
||||||
self.session = self.mgr.apic.session
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.reset_reponses()
|
|
||||||
|
|
||||||
def test_mgr_session_login(self):
|
|
||||||
login = self.mgr.apic.authentication
|
|
||||||
self.assertEqual(login['userName'], mocked.APIC_USR)
|
|
||||||
|
|
||||||
def test_mgr_session_logout(self):
|
|
||||||
self.mock_response_for_post('aaaLogout')
|
|
||||||
self.mgr.apic.logout()
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertIsNone(self.mgr.apic.authentication)
|
|
||||||
|
|
||||||
def test_to_range(self):
|
|
||||||
port_list = [4, 2, 3, 1, 7, 8, 10, 20, 6, 22, 21]
|
|
||||||
expected_ranges = [(1, 4), (6, 8), (10, 10), (20, 22)]
|
|
||||||
port_ranges = [r for r in apic_manager.group_by_ranges(port_list)]
|
|
||||||
self.assertEqual(port_ranges, expected_ranges)
|
|
||||||
|
|
||||||
def test_get_profiles(self):
|
|
||||||
self.mock_db_query_filterby_first_return('faked')
|
|
||||||
self.assertEqual(
|
|
||||||
self.mgr.db.get_port_profile_for_node('node'),
|
|
||||||
'faked'
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
self.mgr.db.get_profile_for_module('node', 'prof', 'module'),
|
|
||||||
'faked'
|
|
||||||
)
|
|
||||||
self.assertEqual(
|
|
||||||
self.mgr.db.get_profile_for_module_and_ports(
|
|
||||||
'node', 'prof', 'module', 'from', 'to'
|
|
||||||
),
|
|
||||||
'faked'
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_add_profile(self):
|
|
||||||
self.mgr.db.add_profile_for_module_and_ports(
|
|
||||||
'node', 'prof', 'hpselc', 'module', 'from', 'to')
|
|
||||||
self.assertTrue(self.mocked_session.add.called)
|
|
||||||
self.assertTrue(self.mocked_session.flush.called)
|
|
||||||
|
|
||||||
def test_ensure_port_profile_created(self):
|
|
||||||
port_name = mocked.APIC_PORT
|
|
||||||
self.mock_responses_for_create('infraAccPortP')
|
|
||||||
self.mock_response_for_get('infraAccPortP', name=port_name)
|
|
||||||
port = self.mgr.ensure_port_profile_created_on_apic(port_name)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertEqual(port['name'], port_name)
|
|
||||||
|
|
||||||
def test_ensure_port_profile_created_exc(self):
|
|
||||||
port_name = mocked.APIC_PORT
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('infraAccPortP')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_port_profile_created_on_apic,
|
|
||||||
port_name)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_node_profile_created_for_switch_old(self):
|
|
||||||
old_switch = mocked.APIC_NODE_PROF
|
|
||||||
self.mock_response_for_get('infraNodeP', name=old_switch)
|
|
||||||
self.mgr.ensure_node_profile_created_for_switch(old_switch)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
old_name = self.mgr.node_profiles[old_switch]['object']['name']
|
|
||||||
self.assertEqual(old_name, old_switch)
|
|
||||||
|
|
||||||
def test_ensure_node_profile_created_for_switch_new(self):
|
|
||||||
new_switch = mocked.APIC_NODE_PROF
|
|
||||||
self.mock_response_for_get('infraNodeP')
|
|
||||||
self.mock_responses_for_create('infraNodeP')
|
|
||||||
self.mock_responses_for_create('infraLeafS')
|
|
||||||
self.mock_responses_for_create('infraNodeBlk')
|
|
||||||
self.mock_response_for_get('infraNodeP', name=new_switch)
|
|
||||||
self.mgr.ensure_node_profile_created_for_switch(new_switch)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
new_name = self.mgr.node_profiles[new_switch]['object']['name']
|
|
||||||
self.assertEqual(new_name, new_switch)
|
|
||||||
|
|
||||||
def test_ensure_node_profile_created_for_switch_new_exc(self):
|
|
||||||
new_switch = mocked.APIC_NODE_PROF
|
|
||||||
self.mock_response_for_get('infraNodeP')
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('infraNodeP')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_node_profile_created_for_switch,
|
|
||||||
new_switch)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_vmm_domain_created_old(self):
|
|
||||||
dom = mocked.APIC_DOMAIN
|
|
||||||
self.mock_response_for_get('vmmDomP', name=dom)
|
|
||||||
self.mgr.ensure_vmm_domain_created_on_apic(dom)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
old_dom = self.mgr.vmm_domain['name']
|
|
||||||
self.assertEqual(old_dom, dom)
|
|
||||||
|
|
||||||
def _mock_new_vmm_dom_responses(self, dom, seg_type=None):
|
|
||||||
vmm = mocked.APIC_VMMP
|
|
||||||
dn = self.mgr.apic.vmmDomP.mo.dn(vmm, dom)
|
|
||||||
self.mock_response_for_get('vmmDomP')
|
|
||||||
self.mock_responses_for_create('vmmDomP')
|
|
||||||
if seg_type:
|
|
||||||
self.mock_responses_for_create(seg_type)
|
|
||||||
self.mock_response_for_get('vmmDomP', name=dom, dn=dn)
|
|
||||||
|
|
||||||
def test_ensure_vmm_domain_created_new_no_vlan_ns(self):
|
|
||||||
dom = mocked.APIC_DOMAIN
|
|
||||||
self._mock_new_vmm_dom_responses(dom)
|
|
||||||
self.mgr.ensure_vmm_domain_created_on_apic(dom)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
new_dom = self.mgr.vmm_domain['name']
|
|
||||||
self.assertEqual(new_dom, dom)
|
|
||||||
|
|
||||||
def test_ensure_vmm_domain_created_new_no_vlan_ns_exc(self):
|
|
||||||
dom = mocked.APIC_DOMAIN
|
|
||||||
self.mock_response_for_get('vmmDomP')
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('vmmDomP')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_vmm_domain_created_on_apic, dom)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_vmm_domain_created_new_with_vlan_ns(self):
|
|
||||||
dom = mocked.APIC_DOMAIN
|
|
||||||
self._mock_new_vmm_dom_responses(dom, seg_type='infraRsVlanNs__vmm')
|
|
||||||
ns = {'dn': 'test_vlan_ns'}
|
|
||||||
self.mgr.ensure_vmm_domain_created_on_apic(dom, vlan_ns=ns)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
new_dom = self.mgr.vmm_domain['name']
|
|
||||||
self.assertEqual(new_dom, dom)
|
|
||||||
|
|
||||||
def test_ensure_vmm_domain_created_new_with_vxlan_ns(self):
|
|
||||||
dom = mocked.APIC_DOMAIN
|
|
||||||
# TODO(Henry): mock seg_type vxlan when vxlan is ready
|
|
||||||
self._mock_new_vmm_dom_responses(dom, seg_type=None)
|
|
||||||
ns = {'dn': 'test_vxlan_ns'}
|
|
||||||
self.mgr.ensure_vmm_domain_created_on_apic(dom, vxlan_ns=ns)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
new_dom = self.mgr.vmm_domain['name']
|
|
||||||
self.assertEqual(new_dom, dom)
|
|
||||||
|
|
||||||
def test_ensure_infra_created_no_infra(self):
|
|
||||||
self.mgr.switch_dict = {}
|
|
||||||
self.mgr.ensure_infra_created_on_apic()
|
|
||||||
|
|
||||||
def _ensure_infra_created_seq1_setup(self):
|
|
||||||
am = 'neutron.plugins.ml2.drivers.cisco.apic.apic_manager.APICManager'
|
|
||||||
np_create_for_switch = mock.patch(
|
|
||||||
am + '.ensure_node_profile_created_for_switch').start()
|
|
||||||
self.mock_db_query_filterby_first_return(None)
|
|
||||||
pp_create_for_switch = mock.patch(
|
|
||||||
am + '.ensure_port_profile_created_on_apic').start()
|
|
||||||
pp_create_for_switch.return_value = {'dn': 'port_profile_dn'}
|
|
||||||
return np_create_for_switch, pp_create_for_switch
|
|
||||||
|
|
||||||
def test_ensure_infra_created_seq1(self):
|
|
||||||
np_create_for_switch, pp_create_for_switch = (
|
|
||||||
self._ensure_infra_created_seq1_setup())
|
|
||||||
|
|
||||||
def _profile_for_module(aswitch, ppn, module):
|
|
||||||
profile = mock.Mock()
|
|
||||||
profile.ppn = ppn
|
|
||||||
profile.hpselc_id = '-'.join([aswitch, module, 'hpselc_id'])
|
|
||||||
return profile
|
|
||||||
|
|
||||||
self.mgr.db.get_profile_for_module = mock.Mock(
|
|
||||||
side_effect=_profile_for_module)
|
|
||||||
self.mgr.db.get_profile_for_module_and_ports = mock.Mock(
|
|
||||||
return_value=None)
|
|
||||||
self.mgr.db.add_profile_for_module_and_ports = mock.Mock()
|
|
||||||
|
|
||||||
num_switches = len(self.mgr.switch_dict)
|
|
||||||
for loop in range(num_switches):
|
|
||||||
self.mock_responses_for_create('infraRsAccPortP')
|
|
||||||
self.mock_responses_for_create('infraPortBlk')
|
|
||||||
|
|
||||||
self.mgr.ensure_infra_created_on_apic()
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertEqual(np_create_for_switch.call_count, num_switches)
|
|
||||||
self.assertEqual(pp_create_for_switch.call_count, num_switches)
|
|
||||||
for switch in self.mgr.switch_dict:
|
|
||||||
np_create_for_switch.assert_any_call(switch)
|
|
||||||
|
|
||||||
def test_ensure_infra_created_seq1_exc(self):
|
|
||||||
np_create_for_switch, __ = self._ensure_infra_created_seq1_setup()
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('infraAccPortP')
|
|
||||||
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_infra_created_on_apic)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertTrue(np_create_for_switch.called)
|
|
||||||
self.assertEqual(np_create_for_switch.call_count, 1)
|
|
||||||
|
|
||||||
def _ensure_infra_created_seq2_setup(self):
|
|
||||||
am = 'neutron.plugins.ml2.drivers.cisco.apic.apic_manager.APICManager'
|
|
||||||
np_create_for_switch = mock.patch(
|
|
||||||
am + '.ensure_node_profile_created_for_switch').start()
|
|
||||||
|
|
||||||
def _profile_for_node(aswitch):
|
|
||||||
profile = mock.Mock()
|
|
||||||
profile.profile_id = '-'.join([aswitch, 'profile_id'])
|
|
||||||
return profile
|
|
||||||
|
|
||||||
self.mgr.db.get_port_profile_for_node = mock.Mock(
|
|
||||||
side_effect=_profile_for_node)
|
|
||||||
self.mgr.db.get_profile_for_module = mock.Mock(
|
|
||||||
return_value=None)
|
|
||||||
self.mgr.function_profile = {'dn': 'dn'}
|
|
||||||
self.mgr.db.get_profile_for_module_and_ports = mock.Mock(
|
|
||||||
return_value=True)
|
|
||||||
|
|
||||||
return np_create_for_switch
|
|
||||||
|
|
||||||
def test_ensure_infra_created_seq2(self):
|
|
||||||
np_create_for_switch = self._ensure_infra_created_seq2_setup()
|
|
||||||
|
|
||||||
num_switches = len(self.mgr.switch_dict)
|
|
||||||
for loop in range(num_switches):
|
|
||||||
self.mock_responses_for_create('infraHPortS')
|
|
||||||
self.mock_responses_for_create('infraRsAccBaseGrp')
|
|
||||||
|
|
||||||
self.mgr.ensure_infra_created_on_apic()
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertEqual(np_create_for_switch.call_count, num_switches)
|
|
||||||
for switch in self.mgr.switch_dict:
|
|
||||||
np_create_for_switch.assert_any_call(switch)
|
|
||||||
|
|
||||||
def test_ensure_infra_created_seq2_exc(self):
|
|
||||||
np_create_for_switch = self._ensure_infra_created_seq2_setup()
|
|
||||||
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('infraHPortS')
|
|
||||||
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_infra_created_on_apic)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertTrue(np_create_for_switch.called)
|
|
||||||
self.assertEqual(np_create_for_switch.call_count, 1)
|
|
||||||
|
|
||||||
def test_ensure_context_unenforced_new_ctx(self):
|
|
||||||
self.mock_response_for_get('fvCtx')
|
|
||||||
self.mock_responses_for_create('fvCtx')
|
|
||||||
self.mgr.ensure_context_unenforced()
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_context_unenforced_pref1(self):
|
|
||||||
self.mock_response_for_get('fvCtx', pcEnfPref='1')
|
|
||||||
self.mock_response_for_post('fvCtx')
|
|
||||||
self.mgr.ensure_context_unenforced()
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_context_unenforced_pref2(self):
|
|
||||||
self.mock_response_for_get('fvCtx', pcEnfPref='2')
|
|
||||||
self.mgr.ensure_context_unenforced()
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def _mock_vmm_dom_prereq(self, dom):
|
|
||||||
self._mock_new_vmm_dom_responses(dom)
|
|
||||||
self.mgr.ensure_vmm_domain_created_on_apic(dom)
|
|
||||||
|
|
||||||
def _mock_new_phys_dom_responses(self, dom, seg_type=None):
|
|
||||||
dn = self.mgr.apic.physDomP.mo.dn(dom)
|
|
||||||
self.mock_response_for_get('physDomP')
|
|
||||||
self.mock_responses_for_create('physDomP')
|
|
||||||
if seg_type:
|
|
||||||
self.mock_responses_for_create(seg_type)
|
|
||||||
self.mock_response_for_get('physDomP', name=dom, dn=dn)
|
|
||||||
|
|
||||||
def _mock_phys_dom_prereq(self, dom):
|
|
||||||
self._mock_new_phys_dom_responses(dom)
|
|
||||||
self.mgr.ensure_phys_domain_created_on_apic(dom)
|
|
||||||
|
|
||||||
def test_ensure_entity_profile_created_old(self):
|
|
||||||
ep = mocked.APIC_ATT_ENT_PROF
|
|
||||||
self.mock_response_for_get('infraAttEntityP', name=ep)
|
|
||||||
self.mgr.ensure_entity_profile_created_on_apic(ep)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def _mock_new_entity_profile(self, exc=None):
|
|
||||||
self.mock_response_for_get('infraAttEntityP')
|
|
||||||
self.mock_responses_for_create('infraAttEntityP')
|
|
||||||
self.mock_responses_for_create('infraRsDomP')
|
|
||||||
if exc:
|
|
||||||
self.mock_error_get_response(exc, code='103', text=u'Fail')
|
|
||||||
else:
|
|
||||||
self.mock_response_for_get('infraAttEntityP')
|
|
||||||
|
|
||||||
def test_ensure_entity_profile_created_new(self):
|
|
||||||
self._mock_phys_dom_prereq(mocked.APIC_PDOM)
|
|
||||||
ep = mocked.APIC_ATT_ENT_PROF
|
|
||||||
self._mock_new_entity_profile()
|
|
||||||
self.mgr.ensure_entity_profile_created_on_apic(ep)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_entity_profile_created_new_exc(self):
|
|
||||||
self._mock_phys_dom_prereq(mocked.APIC_PDOM)
|
|
||||||
ep = mocked.APIC_ATT_ENT_PROF
|
|
||||||
self._mock_new_entity_profile(exc=wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('infraAttEntityP')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_entity_profile_created_on_apic, ep)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def _mock_entity_profile_preqreq(self):
|
|
||||||
self._mock_phys_dom_prereq(mocked.APIC_PDOM)
|
|
||||||
ep = mocked.APIC_ATT_ENT_PROF
|
|
||||||
self._mock_new_entity_profile()
|
|
||||||
self.mgr.ensure_entity_profile_created_on_apic(ep)
|
|
||||||
|
|
||||||
def test_ensure_function_profile_created_old(self):
|
|
||||||
self._mock_entity_profile_preqreq()
|
|
||||||
fp = mocked.APIC_FUNC_PROF
|
|
||||||
self.mock_response_for_get('infraAccPortGrp', name=fp)
|
|
||||||
self.mgr.ensure_function_profile_created_on_apic(fp)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
old_fp = self.mgr.function_profile['name']
|
|
||||||
self.assertEqual(old_fp, fp)
|
|
||||||
|
|
||||||
def _mock_new_function_profile(self, fp):
|
|
||||||
dn = self.mgr.apic.infraAttEntityP.mo.dn(fp)
|
|
||||||
self.mock_responses_for_create('infraAccPortGrp')
|
|
||||||
self.mock_responses_for_create('infraRsAttEntP')
|
|
||||||
self.mock_response_for_get('infraAccPortGrp', name=fp, dn=dn)
|
|
||||||
|
|
||||||
def test_ensure_function_profile_created_new(self):
|
|
||||||
fp = mocked.APIC_FUNC_PROF
|
|
||||||
dn = self.mgr.apic.infraAttEntityP.mo.dn(fp)
|
|
||||||
self.mgr.entity_profile = {'dn': dn}
|
|
||||||
self.mock_response_for_get('infraAccPortGrp')
|
|
||||||
self.mock_responses_for_create('infraAccPortGrp')
|
|
||||||
self.mock_responses_for_create('infraRsAttEntP')
|
|
||||||
self.mock_response_for_get('infraAccPortGrp', name=fp, dn=dn)
|
|
||||||
self.mgr.ensure_function_profile_created_on_apic(fp)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
new_fp = self.mgr.function_profile['name']
|
|
||||||
self.assertEqual(new_fp, fp)
|
|
||||||
|
|
||||||
def test_ensure_function_profile_created_new_exc(self):
|
|
||||||
fp = mocked.APIC_FUNC_PROF
|
|
||||||
dn = self.mgr.apic.infraAttEntityP.mo.dn(fp)
|
|
||||||
self.mgr.entity_profile = {'dn': dn}
|
|
||||||
self.mock_response_for_get('infraAccPortGrp')
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('infraAccPortGrp')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_function_profile_created_on_apic, fp)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_vlan_ns_created_old(self):
|
|
||||||
ns = mocked.APIC_VLAN_NAME
|
|
||||||
mode = mocked.APIC_VLAN_MODE
|
|
||||||
self.mock_response_for_get('fvnsVlanInstP', name=ns, mode=mode)
|
|
||||||
new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '100', '199')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertIsNone(new_ns)
|
|
||||||
|
|
||||||
def _mock_new_vlan_instance(self, ns, vlan_encap=None):
|
|
||||||
self.mock_responses_for_create('fvnsVlanInstP')
|
|
||||||
if vlan_encap:
|
|
||||||
self.mock_response_for_get('fvnsEncapBlk', **vlan_encap)
|
|
||||||
else:
|
|
||||||
self.mock_response_for_get('fvnsEncapBlk')
|
|
||||||
self.mock_responses_for_create('fvnsEncapBlk__vlan')
|
|
||||||
self.mock_response_for_get('fvnsVlanInstP', name=ns)
|
|
||||||
|
|
||||||
def test_ensure_vlan_ns_created_new_no_encap(self):
|
|
||||||
ns = mocked.APIC_VLAN_NAME
|
|
||||||
self.mock_response_for_get('fvnsVlanInstP')
|
|
||||||
self._mock_new_vlan_instance(ns)
|
|
||||||
new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '200', '299')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertEqual(new_ns['name'], ns)
|
|
||||||
|
|
||||||
def test_ensure_vlan_ns_created_new_exc(self):
|
|
||||||
ns = mocked.APIC_VLAN_NAME
|
|
||||||
self.mock_response_for_get('fvnsVlanInstP')
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('fvnsVlanInstP')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_vlan_ns_created_on_apic,
|
|
||||||
ns, '200', '299')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_vlan_ns_created_new_with_encap(self):
|
|
||||||
ns = mocked.APIC_VLAN_NAME
|
|
||||||
self.mock_response_for_get('fvnsVlanInstP')
|
|
||||||
ns_args = {'name': 'encap', 'from': '300', 'to': '399'}
|
|
||||||
self._mock_new_vlan_instance(ns, vlan_encap=ns_args)
|
|
||||||
new_ns = self.mgr.ensure_vlan_ns_created_on_apic(ns, '300', '399')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertEqual(new_ns['name'], ns)
|
|
||||||
|
|
||||||
def test_ensure_tenant_created_on_apic(self):
|
|
||||||
self.mock_response_for_get('fvTenant', name='any')
|
|
||||||
self.mgr.ensure_tenant_created_on_apic('two')
|
|
||||||
self.mock_response_for_get('fvTenant')
|
|
||||||
self.mock_responses_for_create('fvTenant')
|
|
||||||
self.mgr.ensure_tenant_created_on_apic('four')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_bd_created_existing_bd(self):
|
|
||||||
self.mock_response_for_get('fvBD', name='BD')
|
|
||||||
self.mgr.ensure_bd_created_on_apic('t1', 'two')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_bd_created_not_ctx(self):
|
|
||||||
self.mock_response_for_get('fvBD')
|
|
||||||
self.mock_responses_for_create('fvBD')
|
|
||||||
self.mock_response_for_get('fvCtx')
|
|
||||||
self.mock_responses_for_create('fvCtx')
|
|
||||||
self.mock_responses_for_create('fvRsCtx')
|
|
||||||
self.mgr.ensure_bd_created_on_apic('t2', 'three')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_bd_created_exc(self):
|
|
||||||
self.mock_response_for_get('fvBD')
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('fvBD')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_bd_created_on_apic, 't2', 'three')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_bd_created_ctx_pref1(self):
|
|
||||||
self.mock_response_for_get('fvBD')
|
|
||||||
self.mock_responses_for_create('fvBD')
|
|
||||||
self.mock_response_for_get('fvCtx', pcEnfPref='1')
|
|
||||||
self.mock_responses_for_create('fvRsCtx')
|
|
||||||
self.mgr.ensure_bd_created_on_apic('t3', 'four')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_bd_created_ctx_pref2(self):
|
|
||||||
self.mock_response_for_get('fvBD')
|
|
||||||
self.mock_responses_for_create('fvBD')
|
|
||||||
self.mock_response_for_get('fvCtx', pcEnfPref='2')
|
|
||||||
self.mock_response_for_post('fvCtx')
|
|
||||||
self.mock_responses_for_create('fvRsCtx')
|
|
||||||
self.mgr.ensure_bd_created_on_apic('t3', 'four')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_delete_bd(self):
|
|
||||||
self.mock_response_for_post('fvBD')
|
|
||||||
self.mgr.delete_bd_on_apic('t1', 'bd')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_subnet_created(self):
|
|
||||||
self.mock_response_for_get('fvSubnet', name='sn1')
|
|
||||||
self.mgr.ensure_subnet_created_on_apic('t0', 'bd1', '2.2.2.2/8')
|
|
||||||
self.mock_response_for_get('fvSubnet')
|
|
||||||
self.mock_responses_for_create('fvSubnet')
|
|
||||||
self.mgr.ensure_subnet_created_on_apic('t2', 'bd3', '4.4.4.4/16')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_filter_created(self):
|
|
||||||
self.mock_response_for_get('vzFilter', name='f1')
|
|
||||||
self.mgr.ensure_filter_created_on_apic('t1', 'two')
|
|
||||||
self.mock_response_for_get('vzFilter')
|
|
||||||
self.mock_responses_for_create('vzFilter')
|
|
||||||
self.mgr.ensure_filter_created_on_apic('t2', 'four')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_epg_created_for_network_old(self):
|
|
||||||
self.mock_db_query_filterby_first_return('faked')
|
|
||||||
epg = self.mgr.ensure_epg_created_for_network('X', 'Y', 'Z')
|
|
||||||
self.assertEqual(epg, 'faked')
|
|
||||||
|
|
||||||
def test_ensure_epg_created_for_network_new(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
network = mocked.APIC_NETWORK
|
|
||||||
netname = mocked.APIC_NETNAME
|
|
||||||
self._mock_phys_dom_prereq(mocked.APIC_PDOM)
|
|
||||||
self.mock_db_query_filterby_first_return(None)
|
|
||||||
self.mock_responses_for_create('fvAEPg')
|
|
||||||
self.mock_response_for_get('fvBD', name=network)
|
|
||||||
self.mock_responses_for_create('fvRsBd')
|
|
||||||
self.mock_responses_for_create('fvRsDomAtt')
|
|
||||||
new_epg = self.mgr.ensure_epg_created_for_network(tenant,
|
|
||||||
network, netname)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertEqual(new_epg.network_id, network)
|
|
||||||
self.assertTrue(self.mocked_session.add.called)
|
|
||||||
self.assertTrue(self.mocked_session.flush.called)
|
|
||||||
|
|
||||||
def test_ensure_epg_created_for_network_exc(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
network = mocked.APIC_NETWORK
|
|
||||||
netname = mocked.APIC_NETNAME
|
|
||||||
self.mock_db_query_filterby_first_return(None)
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('fvAEPg')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.ensure_epg_created_for_network,
|
|
||||||
tenant, network, netname)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_delete_epg_for_network_no_epg(self):
|
|
||||||
self.mock_db_query_filterby_first_return(None)
|
|
||||||
self.mgr.delete_epg_for_network('tenant', 'network')
|
|
||||||
|
|
||||||
def test_delete_epg_for_network(self):
|
|
||||||
epg = mock.Mock()
|
|
||||||
epg.epg_id = mocked.APIC_EPG
|
|
||||||
self.mock_db_query_filterby_first_return(epg)
|
|
||||||
self.mock_response_for_post('fvAEPg')
|
|
||||||
self.mgr.delete_epg_for_network('tenant', 'network')
|
|
||||||
self.assertTrue(self.mocked_session.delete.called)
|
|
||||||
self.assertTrue(self.mocked_session.flush.called)
|
|
||||||
|
|
||||||
def test_ensure_path_created_for_port(self):
|
|
||||||
epg = mock.Mock()
|
|
||||||
epg.epg_id = 'epg01'
|
|
||||||
eepg = mock.Mock(return_value=epg)
|
|
||||||
apic_manager.APICManager.ensure_epg_created_for_network = eepg
|
|
||||||
self.mock_response_for_get('fvRsPathAtt', tDn='foo')
|
|
||||||
self.mgr.ensure_path_created_for_port('tenant', 'network', 'rhel01',
|
|
||||||
'static', 'netname')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_path_created_for_port_no_path_att(self):
|
|
||||||
epg = mock.Mock()
|
|
||||||
epg.epg_id = 'epg2'
|
|
||||||
eepg = mock.Mock(return_value=epg)
|
|
||||||
self.mgr.ensure_epg_created_for_network = eepg
|
|
||||||
self.mock_response_for_get('fvRsPathAtt')
|
|
||||||
self.mock_responses_for_create('fvRsPathAtt')
|
|
||||||
self.mgr.ensure_path_created_for_port('tenant', 'network', 'ubuntu2',
|
|
||||||
'static', 'netname')
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_ensure_path_created_for_port_unknown_host(self):
|
|
||||||
epg = mock.Mock()
|
|
||||||
epg.epg_id = 'epg3'
|
|
||||||
eepg = mock.Mock(return_value=epg)
|
|
||||||
apic_manager.APICManager.ensure_epg_created_for_network = eepg
|
|
||||||
self.mock_response_for_get('fvRsPathAtt', tDn='foo')
|
|
||||||
self.assertRaises(cexc.ApicHostNotConfigured,
|
|
||||||
self.mgr.ensure_path_created_for_port,
|
|
||||||
'tenant', 'network', 'cirros3', 'static', 'netname')
|
|
||||||
|
|
||||||
def test_create_tenant_filter(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
self.mock_responses_for_create('vzFilter')
|
|
||||||
self.mock_responses_for_create('vzEntry')
|
|
||||||
filter_id = self.mgr.create_tenant_filter(tenant)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertTrue(uuidutils.is_uuid_like(str(filter_id)))
|
|
||||||
|
|
||||||
def test_create_tenant_filter_exc(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('vzFilter')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.create_tenant_filter, tenant)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_set_contract_for_epg_consumer(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
epg = mocked.APIC_EPG
|
|
||||||
contract = mocked.APIC_CONTRACT
|
|
||||||
self.mock_responses_for_create('fvRsCons')
|
|
||||||
self.mgr.set_contract_for_epg(tenant, epg, contract)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_set_contract_for_epg_provider(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
epg = mocked.APIC_EPG
|
|
||||||
contract = mocked.APIC_CONTRACT
|
|
||||||
epg_obj = mock.Mock()
|
|
||||||
epg_obj.epg_id = epg
|
|
||||||
epg_obj.provider = False
|
|
||||||
self.mock_db_query_filterby_first_return(epg_obj)
|
|
||||||
self.mock_responses_for_create('fvRsProv')
|
|
||||||
self.mock_response_for_post('vzBrCP')
|
|
||||||
self.mgr.set_contract_for_epg(tenant, epg, contract, provider=True)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertTrue(self.mocked_session.merge.called)
|
|
||||||
self.assertTrue(self.mocked_session.flush.called)
|
|
||||||
self.assertTrue(epg_obj.provider)
|
|
||||||
|
|
||||||
def test_set_contract_for_epg_provider_exc(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
epg = mocked.APIC_EPG
|
|
||||||
contract = mocked.APIC_CONTRACT
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('vzBrCP')
|
|
||||||
self.mock_response_for_post('fvRsProv')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.set_contract_for_epg,
|
|
||||||
tenant, epg, contract, provider=True)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_delete_contract_for_epg_consumer(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
epg = mocked.APIC_EPG
|
|
||||||
contract = mocked.APIC_CONTRACT
|
|
||||||
self.mock_response_for_post('fvRsCons')
|
|
||||||
self.mgr.delete_contract_for_epg(tenant, epg, contract)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_delete_contract_for_epg_provider(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
epg = mocked.APIC_EPG
|
|
||||||
contract = mocked.APIC_CONTRACT
|
|
||||||
epg_obj = mock.Mock()
|
|
||||||
epg_obj.epg_id = epg + '-other'
|
|
||||||
epg_obj.provider = False
|
|
||||||
self.mock_db_query_filterby_first_return(epg_obj)
|
|
||||||
self.mock_response_for_post('fvRsProv')
|
|
||||||
self.mock_response_for_post('fvRsCons')
|
|
||||||
self.mock_responses_for_create('fvRsProv')
|
|
||||||
self.mock_response_for_post('vzBrCP')
|
|
||||||
self.mgr.delete_contract_for_epg(tenant, epg, contract, provider=True)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertTrue(self.mocked_session.merge.called)
|
|
||||||
self.assertTrue(self.mocked_session.flush.called)
|
|
||||||
self.assertTrue(epg_obj.provider)
|
|
||||||
|
|
||||||
def test_create_tenant_contract_existing(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
contract = mocked.APIC_CONTRACT
|
|
||||||
self.mock_db_query_filterby_first_return(contract)
|
|
||||||
new_contract = self.mgr.create_tenant_contract(tenant)
|
|
||||||
self.assertEqual(new_contract, contract)
|
|
||||||
|
|
||||||
def test_create_tenant_contract_new(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
contract = mocked.APIC_CONTRACT
|
|
||||||
dn = self.mgr.apic.vzBrCP.mo.dn(tenant, contract)
|
|
||||||
self.mock_db_query_filterby_first_return(None)
|
|
||||||
self.mock_responses_for_create('vzBrCP')
|
|
||||||
self.mock_response_for_get('vzBrCP', dn=dn)
|
|
||||||
self.mock_responses_for_create('vzSubj')
|
|
||||||
self.mock_responses_for_create('vzFilter')
|
|
||||||
self.mock_responses_for_create('vzEntry')
|
|
||||||
self.mock_responses_for_create('vzInTerm')
|
|
||||||
self.mock_responses_for_create('vzRsFiltAtt__In')
|
|
||||||
self.mock_responses_for_create('vzOutTerm')
|
|
||||||
self.mock_responses_for_create('vzRsFiltAtt__Out')
|
|
||||||
self.mock_responses_for_create('vzCPIf')
|
|
||||||
self.mock_responses_for_create('vzRsIf')
|
|
||||||
new_contract = self.mgr.create_tenant_contract(tenant)
|
|
||||||
self.assert_responses_drained()
|
|
||||||
self.assertTrue(self.mocked_session.add.called)
|
|
||||||
self.assertTrue(self.mocked_session.flush.called)
|
|
||||||
self.assertEqual(new_contract['tenant_id'], tenant)
|
|
||||||
|
|
||||||
def test_create_tenant_contract_exc(self):
|
|
||||||
tenant = mocked.APIC_TENANT
|
|
||||||
self.mock_db_query_filterby_first_return(None)
|
|
||||||
self.mock_error_post_response(wexc.HTTPBadRequest)
|
|
||||||
self.mock_response_for_post('vzBrCP')
|
|
||||||
self.assertRaises(cexc.ApicResponseNotOk,
|
|
||||||
self.mgr.create_tenant_contract, tenant)
|
|
||||||
self.assert_responses_drained()
|
|
|
@ -1,226 +0,0 @@
|
||||||
# Copyright (c) 2014 Cisco Systems
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
# @author: Henry Gessau, Cisco Systems
|
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic as md
|
|
||||||
from neutron.plugins.ml2.drivers import type_vlan # noqa
|
|
||||||
from neutron.tests import base
|
|
||||||
from neutron.tests.unit.ml2.drivers.cisco.apic import (
|
|
||||||
test_cisco_apic_common as mocked)
|
|
||||||
|
|
||||||
|
|
||||||
HOST_ID1 = 'ubuntu'
|
|
||||||
HOST_ID2 = 'rhel'
|
|
||||||
ENCAP = '101'
|
|
||||||
|
|
||||||
SUBNET_GATEWAY = '10.3.2.1'
|
|
||||||
SUBNET_CIDR = '10.3.1.0/24'
|
|
||||||
SUBNET_NETMASK = '24'
|
|
||||||
|
|
||||||
TEST_SEGMENT1 = 'test-segment1'
|
|
||||||
TEST_SEGMENT2 = 'test-segment2'
|
|
||||||
|
|
||||||
|
|
||||||
class TestCiscoApicMechDriver(base.BaseTestCase,
|
|
||||||
mocked.ControllerMixin,
|
|
||||||
mocked.ConfigMixin,
|
|
||||||
mocked.DbModelMixin):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestCiscoApicMechDriver, self).setUp()
|
|
||||||
mocked.ControllerMixin.set_up_mocks(self)
|
|
||||||
mocked.ConfigMixin.set_up_mocks(self)
|
|
||||||
mocked.DbModelMixin.set_up_mocks(self)
|
|
||||||
|
|
||||||
self.mock_apic_manager_login_responses()
|
|
||||||
self.driver = md.APICMechanismDriver()
|
|
||||||
self.driver.vif_type = 'test-vif_type'
|
|
||||||
self.driver.cap_port_filter = 'test-cap_port_filter'
|
|
||||||
|
|
||||||
def test_initialize(self):
|
|
||||||
cfg.CONF.set_override('network_vlan_ranges', ['physnet1:100:199'],
|
|
||||||
'ml2_type_vlan')
|
|
||||||
ns = mocked.APIC_VLAN_NAME
|
|
||||||
mode = mocked.APIC_VLAN_MODE
|
|
||||||
self.mock_response_for_get('fvnsVlanInstP', name=ns, mode=mode)
|
|
||||||
self.mock_response_for_get('physDomP', name=mocked.APIC_DOMAIN)
|
|
||||||
self.mock_response_for_get('infraAttEntityP',
|
|
||||||
name=mocked.APIC_ATT_ENT_PROF)
|
|
||||||
self.mock_response_for_get('infraAccPortGrp',
|
|
||||||
name=mocked.APIC_ACC_PORT_GRP)
|
|
||||||
mock.patch('neutron.plugins.ml2.drivers.cisco.apic.apic_manager.'
|
|
||||||
'APICManager.ensure_infra_created_on_apic').start()
|
|
||||||
self.driver.initialize()
|
|
||||||
self.session = self.driver.apic_manager.apic.session
|
|
||||||
self.assert_responses_drained()
|
|
||||||
|
|
||||||
def test_update_port_postcommit(self):
|
|
||||||
net_ctx = self._get_network_context(mocked.APIC_TENANT,
|
|
||||||
mocked.APIC_NETWORK,
|
|
||||||
TEST_SEGMENT1)
|
|
||||||
port_ctx = self._get_port_context(mocked.APIC_TENANT,
|
|
||||||
mocked.APIC_NETWORK,
|
|
||||||
'vm1', net_ctx, HOST_ID1)
|
|
||||||
mgr = self.driver.apic_manager = mock.Mock()
|
|
||||||
self.driver.update_port_postcommit(port_ctx)
|
|
||||||
mgr.ensure_tenant_created_on_apic.assert_called_once_with(
|
|
||||||
mocked.APIC_TENANT)
|
|
||||||
mgr.ensure_path_created_for_port.assert_called_once_with(
|
|
||||||
mocked.APIC_TENANT, mocked.APIC_NETWORK, HOST_ID1,
|
|
||||||
ENCAP, mocked.APIC_NETWORK + '-name')
|
|
||||||
|
|
||||||
def test_create_network_postcommit(self):
|
|
||||||
ctx = self._get_network_context(mocked.APIC_TENANT,
|
|
||||||
mocked.APIC_NETWORK,
|
|
||||||
TEST_SEGMENT1)
|
|
||||||
mgr = self.driver.apic_manager = mock.Mock()
|
|
||||||
self.driver.create_network_postcommit(ctx)
|
|
||||||
mgr.ensure_bd_created_on_apic.assert_called_once_with(
|
|
||||||
mocked.APIC_TENANT, mocked.APIC_NETWORK)
|
|
||||||
mgr.ensure_epg_created_for_network.assert_called_once_with(
|
|
||||||
mocked.APIC_TENANT, mocked.APIC_NETWORK,
|
|
||||||
mocked.APIC_NETWORK + '-name')
|
|
||||||
|
|
||||||
def test_delete_network_postcommit(self):
|
|
||||||
ctx = self._get_network_context(mocked.APIC_TENANT,
|
|
||||||
mocked.APIC_NETWORK,
|
|
||||||
TEST_SEGMENT1)
|
|
||||||
mgr = self.driver.apic_manager = mock.Mock()
|
|
||||||
self.driver.delete_network_postcommit(ctx)
|
|
||||||
mgr.delete_bd_on_apic.assert_called_once_with(
|
|
||||||
mocked.APIC_TENANT, mocked.APIC_NETWORK)
|
|
||||||
mgr.delete_epg_for_network.assert_called_once_with(
|
|
||||||
mocked.APIC_TENANT, mocked.APIC_NETWORK)
|
|
||||||
|
|
||||||
def test_create_subnet_postcommit(self):
|
|
||||||
net_ctx = self._get_network_context(mocked.APIC_TENANT,
|
|
||||||
mocked.APIC_NETWORK,
|
|
||||||
TEST_SEGMENT1)
|
|
||||||
subnet_ctx = self._get_subnet_context(SUBNET_GATEWAY,
|
|
||||||
SUBNET_CIDR,
|
|
||||||
net_ctx)
|
|
||||||
mgr = self.driver.apic_manager = mock.Mock()
|
|
||||||
self.driver.create_subnet_postcommit(subnet_ctx)
|
|
||||||
mgr.ensure_subnet_created_on_apic.assert_called_once_with(
|
|
||||||
mocked.APIC_TENANT, mocked.APIC_NETWORK,
|
|
||||||
'%s/%s' % (SUBNET_GATEWAY, SUBNET_NETMASK))
|
|
||||||
|
|
||||||
def _get_network_context(self, tenant_id, net_id, seg_id=None,
|
|
||||||
seg_type='vlan'):
|
|
||||||
network = {'id': net_id,
|
|
||||||
'name': net_id + '-name',
|
|
||||||
'tenant_id': tenant_id,
|
|
||||||
'provider:segmentation_id': seg_id}
|
|
||||||
if seg_id:
|
|
||||||
network_segments = [{'id': seg_id,
|
|
||||||
'segmentation_id': ENCAP,
|
|
||||||
'network_type': seg_type,
|
|
||||||
'physical_network': 'physnet1'}]
|
|
||||||
else:
|
|
||||||
network_segments = []
|
|
||||||
return FakeNetworkContext(network, network_segments)
|
|
||||||
|
|
||||||
def _get_subnet_context(self, gateway_ip, cidr, network):
|
|
||||||
subnet = {'tenant_id': network.current['tenant_id'],
|
|
||||||
'network_id': network.current['id'],
|
|
||||||
'id': '[%s/%s]' % (gateway_ip, cidr),
|
|
||||||
'gateway_ip': gateway_ip,
|
|
||||||
'cidr': cidr}
|
|
||||||
return FakeSubnetContext(subnet, network)
|
|
||||||
|
|
||||||
def _get_port_context(self, tenant_id, net_id, vm_id, network, host):
|
|
||||||
port = {'device_id': vm_id,
|
|
||||||
'device_owner': 'compute',
|
|
||||||
'binding:host_id': host,
|
|
||||||
'tenant_id': tenant_id,
|
|
||||||
'id': mocked.APIC_PORT,
|
|
||||||
'name': mocked.APIC_PORT,
|
|
||||||
'network_id': net_id}
|
|
||||||
return FakePortContext(port, network)
|
|
||||||
|
|
||||||
|
|
||||||
class FakeNetworkContext(object):
|
|
||||||
"""To generate network context for testing purposes only."""
|
|
||||||
|
|
||||||
def __init__(self, network, segments):
|
|
||||||
self._network = network
|
|
||||||
self._segments = segments
|
|
||||||
|
|
||||||
@property
|
|
||||||
def current(self):
|
|
||||||
return self._network
|
|
||||||
|
|
||||||
@property
|
|
||||||
def network_segments(self):
|
|
||||||
return self._segments
|
|
||||||
|
|
||||||
|
|
||||||
class FakeSubnetContext(object):
|
|
||||||
"""To generate subnet context for testing purposes only."""
|
|
||||||
|
|
||||||
def __init__(self, subnet, network):
|
|
||||||
self._subnet = subnet
|
|
||||||
self._network = network
|
|
||||||
|
|
||||||
@property
|
|
||||||
def current(self):
|
|
||||||
return self._subnet
|
|
||||||
|
|
||||||
@property
|
|
||||||
def network(self):
|
|
||||||
return self._network
|
|
||||||
|
|
||||||
|
|
||||||
class FakePortContext(object):
|
|
||||||
"""To generate port context for testing purposes only."""
|
|
||||||
|
|
||||||
def __init__(self, port, network):
|
|
||||||
self._fake_plugin = mock.Mock()
|
|
||||||
self._fake_plugin.get_ports.return_value = []
|
|
||||||
self._fake_plugin_context = None
|
|
||||||
self._port = port
|
|
||||||
self._network = network
|
|
||||||
if network.network_segments:
|
|
||||||
self._bound_segment = network.network_segments[0]
|
|
||||||
else:
|
|
||||||
self._bound_segment = None
|
|
||||||
|
|
||||||
@property
|
|
||||||
def current(self):
|
|
||||||
return self._port
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _plugin(self):
|
|
||||||
return self._fake_plugin
|
|
||||||
|
|
||||||
@property
|
|
||||||
def _plugin_context(self):
|
|
||||||
return self._fake_plugin_context
|
|
||||||
|
|
||||||
@property
|
|
||||||
def network(self):
|
|
||||||
return self._network
|
|
||||||
|
|
||||||
@property
|
|
||||||
def bound_segment(self):
|
|
||||||
return self._bound_segment
|
|
||||||
|
|
||||||
def set_binding(self, segment_id, vif_type, cap_port_filter):
|
|
||||||
pass
|
|
|
@ -1,71 +0,0 @@
|
||||||
# Copyright (c) 2014 Cisco Systems, Inc.
|
|
||||||
# All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from oslo.config import cfg
|
|
||||||
|
|
||||||
from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestCiscoNexusPluginConfig(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.config_parse()
|
|
||||||
super(TestCiscoNexusPluginConfig, self).setUp()
|
|
||||||
|
|
||||||
def test_config_parse_error(self):
|
|
||||||
"""Check that config error is raised upon config parser failure."""
|
|
||||||
with mock.patch.object(cfg, 'MultiConfigParser') as parser:
|
|
||||||
parser.return_value.read.return_value = []
|
|
||||||
self.assertRaises(cfg.Error, cisco_config.ML2MechCiscoConfig)
|
|
||||||
|
|
||||||
def test_create_device_dictionary(self):
|
|
||||||
"""Test creation of the device dictionary based on nexus config."""
|
|
||||||
test_config = {
|
|
||||||
'ml2_mech_cisco_nexus:1.1.1.1': {
|
|
||||||
'username': ['admin'],
|
|
||||||
'password': ['mySecretPassword'],
|
|
||||||
'ssh_port': [22],
|
|
||||||
'compute1': ['1/1'],
|
|
||||||
'compute2': ['1/2'],
|
|
||||||
},
|
|
||||||
'ml2_mech_cisco_nexus:2.2.2.2': {
|
|
||||||
'username': ['admin'],
|
|
||||||
'password': ['mySecretPassword'],
|
|
||||||
'ssh_port': [22],
|
|
||||||
'compute3': ['1/1'],
|
|
||||||
'compute4': ['1/2'],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
expected_dev_dict = {
|
|
||||||
('1.1.1.1', 'username'): 'admin',
|
|
||||||
('1.1.1.1', 'password'): 'mySecretPassword',
|
|
||||||
('1.1.1.1', 'ssh_port'): 22,
|
|
||||||
('1.1.1.1', 'compute1'): '1/1',
|
|
||||||
('1.1.1.1', 'compute2'): '1/2',
|
|
||||||
('2.2.2.2', 'username'): 'admin',
|
|
||||||
('2.2.2.2', 'password'): 'mySecretPassword',
|
|
||||||
('2.2.2.2', 'ssh_port'): 22,
|
|
||||||
('2.2.2.2', 'compute3'): '1/1',
|
|
||||||
('2.2.2.2', 'compute4'): '1/2',
|
|
||||||
}
|
|
||||||
with mock.patch.object(cfg, 'MultiConfigParser') as parser:
|
|
||||||
parser.return_value.read.return_value = cfg.CONF.config_file
|
|
||||||
parser.return_value.parsed = [test_config]
|
|
||||||
cisco_config.ML2MechCiscoConfig()
|
|
||||||
self.assertEqual(expected_dev_dict,
|
|
||||||
cisco_config.ML2MechCiscoConfig.nexus_dict)
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue