[liam young] Add support for multiple external networks using configuration

This commit is contained in:
James Page
2013-11-28 15:27:29 +00:00
11 changed files with 285 additions and 27 deletions

View File

@@ -49,6 +49,29 @@ The gateway provides two key services; L3 network routing and DHCP services.
These are both required in a fully functional Neutron Openstack deployment. These are both required in a fully functional Neutron Openstack deployment.
If multiple floating pools are needed then an L3 agent (which corresponds to
a quantum-gateway for the sake of this charm) is needed for each one. Each
gateway needs to be deployed as a seperate service so that the external
network id can be set differently for each gateway e.g.
juju deploy quantum-gateway quantum-gateway-extnet1
juju add-relation quantum-gateway-extnet1 mysql
juju add-relation quantum-gateway-extnet1 rabbitmq-server
juju add-relation quantum-gateway-extnet1 nova-cloud-controller
juju deploy quantum-gateway quantum-gateway-extnet2
juju add-relation quantum-gateway-extnet2 mysql
juju add-relation quantum-gateway-extnet2 rabbitmq-server
juju add-relation quantum-gateway-extnet2 nova-cloud-controller
Create extnet1 and extnet2 via neutron client and take a note of their ids
juju set quantum-gateway-extnet1 "run-internal-router=leader"
juju set quantum-gateway-extnet2 "run-internal-router=none"
juju set quantum-gateway-extnet1 "external-network-id=<extnet1 id>"
juju set quantum-gateway-extnet2 "external-network-id=<extnet2 id>"
See upstream [Neutron multi extnet](http://docs.openstack.org/trunk/config-reference/content/adv_cfg_l3_agent_multi_extnet.html)
TODO TODO
---- ----

View File

@@ -25,6 +25,23 @@ options:
- deb http://my.archive.com/ubuntu main|KEYID - deb http://my.archive.com/ubuntu main|KEYID
. .
Note that quantum/neutron is only supported >= Folsom. Note that quantum/neutron is only supported >= Folsom.
run-internal-router:
type: string
default: all
description: |
Optional configuration to support how the L3 agent option
handle_internal_only_routers is configured.
all => Set to be true everywhere
none => Set to be false everywhere
leader => Set to be true on one node (the leader) and false everywhere
else.
Use leader and none when configuring multiple floating pools
external-network-id:
type: string
description: |
Optional configuration to set the external-network-id. Only needed when
configuring multiple external networks and should be used in conjunction
with run-internal-router.
rabbit-user: rabbit-user:
type: string type: string
default: neutron default: neutron

View File

@@ -0,0 +1 @@
quantum_hooks.py

View File

@@ -20,6 +20,9 @@ from charmhelpers.contrib.openstack.context import (
from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.openstack.utils import (
get_os_codename_install_source get_os_codename_install_source
) )
from charmhelpers.contrib.hahelpers.cluster import(
eligible_leader
)
DB_USER = "quantum" DB_USER = "quantum"
QUANTUM_DB = "quantum" QUANTUM_DB = "quantum"
@@ -99,6 +102,23 @@ class NetworkServiceContext(OSContextGenerator):
return {} return {}
class L3AgentContext(OSContextGenerator):
def __call__(self):
ctxt = {}
if config('run-internal-router') == 'leader':
ctxt['handle_internal_only_router'] = eligible_leader(None)
if config('run-internal-router') == 'all':
ctxt['handle_internal_only_router'] = True
if config('run-internal-router') == 'none':
ctxt['handle_internal_only_router'] = False
if config('external-network-id'):
ctxt['ext_net_id'] = config('external-network-id')
return ctxt
class ExternalPortContext(OSContextGenerator): class ExternalPortContext(OSContextGenerator):
def __call__(self): def __call__(self):
if config('ext-port'): if config('ext-port'):

View File

@@ -16,7 +16,7 @@ from charmhelpers.fetch import (
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
restart_on_change, restart_on_change,
lsb_release lsb_release,
) )
from charmhelpers.contrib.hahelpers.cluster import( from charmhelpers.contrib.hahelpers.cluster import(
eligible_leader eligible_leader
@@ -26,7 +26,7 @@ from charmhelpers.contrib.hahelpers.apache import(
) )
from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.openstack.utils import (
configure_installation_source, configure_installation_source,
openstack_upgrade_available openstack_upgrade_available,
) )
from charmhelpers.payload.execd import execd_preinstall from charmhelpers.payload.execd import execd_preinstall
@@ -41,6 +41,7 @@ from quantum_utils import (
valid_plugin, valid_plugin,
configure_ovs, configure_ovs,
reassign_agent_resources, reassign_agent_resources,
stop_services
) )
from quantum_contexts import ( from quantum_contexts import (
DB_USER, QUANTUM_DB, DB_USER, QUANTUM_DB,
@@ -111,7 +112,9 @@ def amqp_joined(relation_id=None):
@hooks.hook('shared-db-relation-changed', @hooks.hook('shared-db-relation-changed',
'amqp-relation-changed') 'amqp-relation-changed',
'cluster-relation-changed',
'cluster-relation-joined')
@restart_on_change(restart_map()) @restart_on_change(restart_map())
def db_amqp_changed(): def db_amqp_changed():
CONFIGS.write_all() CONFIGS.write_all()
@@ -126,6 +129,7 @@ def nm_changed():
@hooks.hook("cluster-relation-departed") @hooks.hook("cluster-relation-departed")
@restart_on_change(restart_map())
def cluster_departed(): def cluster_departed():
if config('plugin') == 'nvp': if config('plugin') == 'nvp':
log('Unable to re-assign agent resources for failed nodes with nvp', log('Unable to re-assign agent resources for failed nodes with nvp',
@@ -133,8 +137,14 @@ def cluster_departed():
return return
if eligible_leader(None): if eligible_leader(None):
reassign_agent_resources() reassign_agent_resources()
CONFIGS.write_all()
@hooks.hook('cluster-relation-broken')
@hooks.hook('stop')
def stop():
stop_services()
if __name__ == '__main__': if __name__ == '__main__':
try: try:
hooks.execute(sys.argv) hooks.execute(sys.argv)

View File

@@ -1,7 +1,12 @@
from charmhelpers.core.host import service_running from charmhelpers.core.host import (
service_running,
service_stop
)
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
config, config,
relations_of_type,
unit_private_ip
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
@@ -10,12 +15,13 @@ from charmhelpers.fetch import (
from charmhelpers.contrib.network.ovs import ( from charmhelpers.contrib.network.ovs import (
add_bridge, add_bridge,
add_bridge_port, add_bridge_port,
full_restart, full_restart
) )
from charmhelpers.contrib.openstack.utils import ( from charmhelpers.contrib.openstack.utils import (
configure_installation_source, configure_installation_source,
get_os_codename_install_source, get_os_codename_install_source,
get_os_codename_package get_os_codename_package,
get_hostname
) )
import charmhelpers.contrib.openstack.context as context import charmhelpers.contrib.openstack.context as context
@@ -27,6 +33,7 @@ from quantum_contexts import (
networking_name, networking_name,
QuantumGatewayContext, QuantumGatewayContext,
NetworkServiceContext, NetworkServiceContext,
L3AgentContext,
QuantumSharedDBContext, QuantumSharedDBContext,
ExternalPortContext, ExternalPortContext,
) )
@@ -209,7 +216,8 @@ NEUTRON_OVS_CONFIG_FILES = {
'neutron-plugin-openvswitch-agent'] 'neutron-plugin-openvswitch-agent']
}, },
NEUTRON_L3_AGENT_CONF: { NEUTRON_L3_AGENT_CONF: {
'hook_contexts': [NetworkServiceContext()], 'hook_contexts': [NetworkServiceContext(),
L3AgentContext()],
'services': ['neutron-l3-agent'] 'services': ['neutron-l3-agent']
}, },
# TODO: Check to see if this is actually required # TODO: Check to see if this is actually required
@@ -268,6 +276,16 @@ def register_configs():
return configs return configs
def stop_services():
name = networking_name()
svcs = set()
for ctxt in CONFIG_FILES[name][config('plugin')].itervalues():
for svc in ctxt['services']:
svcs.add(svc)
for svc in svcs:
service_stop(svc)
def restart_map(): def restart_map():
''' '''
Determine the correct resource map to be passed to Determine the correct resource map to be passed to
@@ -315,6 +333,11 @@ def reassign_agent_resources():
auth_url=auth_url, auth_url=auth_url,
region_name=env['region']) region_name=env['region'])
partner_gateways = [unit_private_ip().split('.')[0]]
for partner_gateway in relations_of_type(reltype='cluster'):
gateway_hostname = get_hostname(partner_gateway['private-address'])
partner_gateways.append(gateway_hostname.partition('.')[0])
agents = quantum.list_agents(agent_type=DHCP_AGENT) agents = quantum.list_agents(agent_type=DHCP_AGENT)
dhcp_agents = [] dhcp_agents = []
l3_agents = [] l3_agents = []
@@ -327,7 +350,8 @@ def reassign_agent_resources():
agent['id'])['networks']: agent['id'])['networks']:
networks[network['id']] = agent['id'] networks[network['id']] = agent['id']
else: else:
dhcp_agents.append(agent['id']) if agent['host'].partition('.')[0] in partner_gateways:
dhcp_agents.append(agent['id'])
agents = quantum.list_agents(agent_type=L3_AGENT) agents = quantum.list_agents(agent_type=L3_AGENT)
routers = {} routers = {}
@@ -339,7 +363,13 @@ def reassign_agent_resources():
agent['id'])['routers']: agent['id'])['routers']:
routers[router['id']] = agent['id'] routers[router['id']] = agent['id']
else: else:
l3_agents.append(agent['id']) if agent['host'].split('.')[0] in partner_gateways:
l3_agents.append(agent['id'])
if len(dhcp_agents) == 0 or len(l3_agents) == 0:
log('Unable to relocate resources, there are %s dhcp_agents and %s \
l3_agents in this cluster' % (len(dhcp_agents), len(l3_agents)))
return
index = 0 index = 0
for router_id in routers: for router_id in routers:

View File

@@ -1 +1 @@
57 59

View File

@@ -7,3 +7,7 @@ admin_user = {{ service_username }}
admin_password = {{ service_password }} admin_password = {{ service_password }}
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
ovs_use_veth = True ovs_use_veth = True
handle_internal_only_routers = {{ handle_internal_only_router }}
{% if ext_net_id %}
gateway_external_network_id = {{ ext_net_id }}
{% endif %}

View File

@@ -15,6 +15,7 @@ TO_PATCH = [
'unit_get', 'unit_get',
'apt_install', 'apt_install',
'get_os_codename_install_source', 'get_os_codename_install_source',
'eligible_leader',
] ]
@@ -141,6 +142,36 @@ class TestExternalPortContext(CharmTestCase):
{'ext_port': 'eth1010'}) {'ext_port': 'eth1010'})
class TestL3AgentContext(CharmTestCase):
def setUp(self):
super(TestL3AgentContext, self).setUp(quantum_contexts,
TO_PATCH)
self.config.side_effect = self.test_config.get
def test_no_ext_netid(self):
self.test_config.set('run-internal-router', 'none')
self.test_config.set('external-network-id', '')
self.eligible_leader.return_value = False
self.assertEquals(quantum_contexts.L3AgentContext()(),
{'handle_internal_only_router': False})
def test_hior_leader(self):
self.test_config.set('run-internal-router', 'leader')
self.test_config.set('external-network-id', 'netid')
self.eligible_leader.return_value = True
self.assertEquals(quantum_contexts.L3AgentContext()(),
{'handle_internal_only_router': True,
'ext_net_id': 'netid'})
def test_hior_all(self):
self.test_config.set('run-internal-router', 'all')
self.test_config.set('external-network-id', 'netid')
self.eligible_leader.return_value = True
self.assertEquals(quantum_contexts.L3AgentContext()(),
{'handle_internal_only_router': True,
'ext_net_id': 'netid'})
class TestQuantumGatewayContext(CharmTestCase): class TestQuantumGatewayContext(CharmTestCase):
def setUp(self): def setUp(self):
super(TestQuantumGatewayContext, self).setUp(quantum_contexts, super(TestQuantumGatewayContext, self).setUp(quantum_contexts,

View File

@@ -34,7 +34,8 @@ TO_PATCH = [
'reassign_agent_resources', 'reassign_agent_resources',
'get_common_package', 'get_common_package',
'execd_preinstall', 'execd_preinstall',
'lsb_release' 'lsb_release',
'stop_services',
] ]
@@ -157,3 +158,7 @@ class TestQuantumHooks(CharmTestCase):
self.eligible_leader.return_value = True self.eligible_leader.return_value = True
self._call_hook('cluster-relation-departed') self._call_hook('cluster-relation-departed')
self.reassign_agent_resources.assert_called() self.reassign_agent_resources.assert_called()
def test_stop(self):
self._call_hook('stop')
self.stop_services.assert_called

View File

@@ -6,6 +6,7 @@ templating.OSConfigRenderer = MagicMock()
import quantum_utils import quantum_utils
try: try:
import neutronclient import neutronclient
except ImportError: except ImportError:
@@ -17,6 +18,7 @@ from test_utils import (
import charmhelpers.core.hookenv as hookenv import charmhelpers.core.hookenv as hookenv
TO_PATCH = [ TO_PATCH = [
'config', 'config',
'get_os_codename_install_source', 'get_os_codename_install_source',
@@ -31,7 +33,10 @@ TO_PATCH = [
'headers_package', 'headers_package',
'full_restart', 'full_restart',
'service_running', 'service_running',
'NetworkServiceContext' 'NetworkServiceContext',
'unit_private_ip',
'relations_of_type',
'service_stop',
] ]
@@ -174,6 +179,32 @@ class TestQuantumUtils(CharmTestCase):
['hook_contexts'] ['hook_contexts']
) )
def test_stop_services_nvp(self):
self.config.return_value = 'nvp'
quantum_utils.stop_services()
calls = [
call('neutron-dhcp-agent'),
call('nova-api-metadata'),
call('neutron-metadata-agent')
]
self.service_stop.assert_has_calls(
calls,
any_order=True,
)
def test_stop_services_ovs(self):
self.config.return_value = 'ovs'
quantum_utils.stop_services()
calls = [call('neutron-dhcp-agent'),
call('neutron-plugin-openvswitch-agent'),
call('nova-api-metadata'),
call('neutron-l3-agent'),
call('neutron-metadata-agent')]
self.service_stop.assert_has_calls(
calls,
any_order=True,
)
def test_restart_map_nvp(self): def test_restart_map_nvp(self):
self.config.return_value = 'nvp' self.config.return_value = 'nvp'
ex_map = { ex_map = {
@@ -235,44 +266,103 @@ agents_all_alive = {
'DHCP Agent': { 'DHCP Agent': {
'agents': [ 'agents': [
{'alive': True, {'alive': True,
'host': 'cluster1-machine1.internal',
'id': '3e3550f2-38cc-11e3-9617-3c970e8b1cf7'}, 'id': '3e3550f2-38cc-11e3-9617-3c970e8b1cf7'},
{'alive': True, {'alive': True,
'host': 'cluster1-machine2.internal',
'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'}, 'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'},
{'alive': True, {'alive': True,
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'} 'host': 'cluster2-machine1.internal',
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine3.internal',
'id': 'ebdcc950-51c8-11e3-a804-1c6f65b044df'},
] ]
}, },
'L3 Agent': { 'L3 Agent': {
'agents': [ 'agents': [
{'alive': True, {'alive': True,
'host': 'cluster1-machine1.internal',
'id': '7128198e-38ce-11e3-ba78-3c970e8b1cf7'}, 'id': '7128198e-38ce-11e3-ba78-3c970e8b1cf7'},
{'alive': True, {'alive': True,
'host': 'cluster1-machine2.internal',
'id': '72453824-38ce-11e3-938e-3c970e8b1cf7'}, 'id': '72453824-38ce-11e3-938e-3c970e8b1cf7'},
{'alive': True, {'alive': True,
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'} 'host': 'cluster2-machine1.internal',
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine3.internal',
'id': '00f4268a-51c9-11e3-9177-1c6f65b044df'},
] ]
} }
} }
agents_some_dead = { agents_some_dead_cl1 = {
'DHCP Agent': {
'agents': [
{'alive': False,
'host': 'cluster1-machine1.internal',
'id': '3e3550f2-38cc-11e3-9617-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine1.internal',
'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine2.internal',
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine3.internal',
'id': 'ebdcc950-51c8-11e3-a804-1c6f65b044df'},
]
},
'L3 Agent': {
'agents': [
{'alive': False,
'host': 'cluster1-machine1.internal',
'id': '7128198e-38ce-11e3-ba78-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine1.internal',
'id': '72453824-38ce-11e3-938e-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine2.internal',
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine3.internal',
'id': '00f4268a-51c9-11e3-9177-1c6f65b044df'},
]
}
}
agents_some_dead_cl2 = {
'DHCP Agent': { 'DHCP Agent': {
'agents': [ 'agents': [
{'alive': True, {'alive': True,
'host': 'cluster1-machine1.internal',
'id': '3e3550f2-38cc-11e3-9617-3c970e8b1cf7'}, 'id': '3e3550f2-38cc-11e3-9617-3c970e8b1cf7'},
{'alive': False,
'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'},
{'alive': True, {'alive': True,
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'} 'host': 'cluster2-machine1.internal',
'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'},
{'alive': False,
'host': 'cluster2-machine2.internal',
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine3.internal',
'id': 'ebdcc950-51c8-11e3-a804-1c6f65b044df'},
] ]
}, },
'L3 Agent': { 'L3 Agent': {
'agents': [ 'agents': [
{'alive': True, {'alive': True,
'host': 'cluster1-machine1.internal',
'id': '7128198e-38ce-11e3-ba78-3c970e8b1cf7'}, 'id': '7128198e-38ce-11e3-ba78-3c970e8b1cf7'},
{'alive': True, {'alive': True,
'host': 'cluster2-machine1.internal',
'id': '72453824-38ce-11e3-938e-3c970e8b1cf7'}, 'id': '72453824-38ce-11e3-938e-3c970e8b1cf7'},
{'alive': False, {'alive': False,
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'} 'host': 'cluster2-machine2.internal',
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'},
{'alive': True,
'host': 'cluster2-machine3.internal',
'id': '00f4268a-51c9-11e3-9177-1c6f65b044df'},
] ]
} }
} }
@@ -291,6 +381,10 @@ l3_agent_routers = {
] ]
} }
cluster1 = ['cluster1-machine1.internal']
cluster2 = ['cluster2-machine1.internal', 'cluster2-machine2.internal'
'cluster2-machine3.internal']
class TestQuantumAgentReallocation(CharmTestCase): class TestQuantumAgentReallocation(CharmTestCase):
def setUp(self): def setUp(self):
@@ -327,12 +421,16 @@ class TestQuantumAgentReallocation(CharmTestCase):
self.NetworkServiceContext.return_value = \ self.NetworkServiceContext.return_value = \
DummyNetworkServiceContext(return_value=network_context) DummyNetworkServiceContext(return_value=network_context)
dummy_client = MagicMock() dummy_client = MagicMock()
dummy_client.list_agents.side_effect = agents_some_dead.itervalues() dummy_client.list_agents.side_effect = \
agents_some_dead_cl2.itervalues()
dummy_client.list_networks_on_dhcp_agent.return_value = \ dummy_client.list_networks_on_dhcp_agent.return_value = \
dhcp_agent_networks dhcp_agent_networks
dummy_client.list_routers_on_l3_agent.return_value = \ dummy_client.list_routers_on_l3_agent.return_value = \
l3_agent_routers l3_agent_routers
_client.return_value = dummy_client _client.return_value = dummy_client
self.unit_private_ip.return_value = 'cluster2-machine1.internal'
self.relations_of_type.return_value = \
[{'private-address': 'cluster2-machine3.internal'}]
quantum_utils.reassign_agent_resources() quantum_utils.reassign_agent_resources()
# Ensure routers removed from dead l3 agent # Ensure routers removed from dead l3 agent
@@ -343,19 +441,38 @@ class TestQuantumAgentReallocation(CharmTestCase):
router_id='baz')], any_order=True) router_id='baz')], any_order=True)
# and re-assigned across the remaining two live agents # and re-assigned across the remaining two live agents
dummy_client.add_router_to_l3_agent.assert_has_calls( dummy_client.add_router_to_l3_agent.assert_has_calls(
[call(l3_agent='7128198e-38ce-11e3-ba78-3c970e8b1cf7', [call(l3_agent='00f4268a-51c9-11e3-9177-1c6f65b044df',
body={'router_id': 'bong'}), body={'router_id': 'baz'}),
call(l3_agent='72453824-38ce-11e3-938e-3c970e8b1cf7', call(l3_agent='72453824-38ce-11e3-938e-3c970e8b1cf7',
body={'router_id': 'baz'})], any_order=True) body={'router_id': 'bong'})], any_order=True)
# Ensure networks removed from dead dhcp agent # Ensure networks removed from dead dhcp agent
dummy_client.remove_network_from_dhcp_agent.assert_has_calls( dummy_client.remove_network_from_dhcp_agent.assert_has_calls(
[call(dhcp_agent='53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7', [call(dhcp_agent='92b8b6bc-38ce-11e3-8537-3c970e8b1cf7',
network_id='foo'), network_id='foo'),
call(dhcp_agent='53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7', call(dhcp_agent='92b8b6bc-38ce-11e3-8537-3c970e8b1cf7',
network_id='bar')], any_order=True) network_id='bar')], any_order=True)
# and re-assigned across the remaining two live agents # and re-assigned across the remaining two live agents
dummy_client.add_network_to_dhcp_agent.assert_has_calls( dummy_client.add_network_to_dhcp_agent.assert_has_calls(
[call(dhcp_agent='3e3550f2-38cc-11e3-9617-3c970e8b1cf7', [call(dhcp_agent='53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7',
body={'network_id': 'foo'}), body={'network_id': 'foo'}),
call(dhcp_agent='92b8b6bc-38ce-11e3-8537-3c970e8b1cf7', call(dhcp_agent='ebdcc950-51c8-11e3-a804-1c6f65b044df',
body={'network_id': 'bar'})], any_order=True) body={'network_id': 'bar'})], any_order=True)
@patch('neutronclient.v2_0.client.Client')
def test_agents_down_relocation_impossible(self, _client):
self.NetworkServiceContext.return_value = \
DummyNetworkServiceContext(return_value=network_context)
dummy_client = MagicMock()
dummy_client.list_agents.side_effect = \
agents_some_dead_cl1.itervalues()
dummy_client.list_networks_on_dhcp_agent.return_value = \
dhcp_agent_networks
dummy_client.list_routers_on_l3_agent.return_value = \
l3_agent_routers
_client.return_value = dummy_client
self.unit_private_ip.return_value = 'cluster1-machine1.internal'
self.relations_of_type.return_value = []
quantum_utils.reassign_agent_resources()
self.log.assert_called()
assert not dummy_client.remove_router_from_l3_agent.called
assert not dummy_client.remove_network_from_dhcp_agent.called