Merge "Neutron Router Rebalancing on L3 Agents"
This commit is contained in:
commit
970d6cc4d1
@ -178,6 +178,58 @@ class NFVIInfrastructureAPI(nfvi.api.v1.NFVIInfrastructureAPI):
|
|||||||
(self._openstack_directory.get_service_info(
|
(self._openstack_directory.get_service_info(
|
||||||
OPENSTACK_SERVICE.NOVA) is not None))
|
OPENSTACK_SERVICE.NOVA) is not None))
|
||||||
|
|
||||||
|
def get_datanetworks(self, future, host_uuid, callback):
|
||||||
|
"""
|
||||||
|
Get host data networks from the plugin
|
||||||
|
"""
|
||||||
|
response = dict()
|
||||||
|
response['completed'] = False
|
||||||
|
response['reason'] = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
|
||||||
|
|
||||||
|
if self._platform_token is None or \
|
||||||
|
self._platform_token.is_expired():
|
||||||
|
future.work(openstack.get_token, self._platform_directory)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete() or \
|
||||||
|
future.result.data is None:
|
||||||
|
DLOG.error("OpenStack get-token did not complete.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._platform_token = future.result.data
|
||||||
|
|
||||||
|
future.work(sysinv.get_datanetworks, self._platform_token,
|
||||||
|
host_uuid)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("SysInv get-datanetworks did not complete.")
|
||||||
|
return
|
||||||
|
|
||||||
|
response['result-data'] = future.result.data
|
||||||
|
response['completed'] = True
|
||||||
|
|
||||||
|
except exceptions.OpenStackRestAPIException as e:
|
||||||
|
if httplib.UNAUTHORIZED == e.http_status_code:
|
||||||
|
response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED
|
||||||
|
if self._platform_token is not None:
|
||||||
|
self._platform_token.set_expired()
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.exception("Caught exception while trying to get host %s data "
|
||||||
|
"networks, error=%s." % (host_uuid, e))
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
DLOG.exception("Caught exception while trying to get host %s data networks, "
|
||||||
|
"error=%s." % (host_uuid, e))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
callback.send(response)
|
||||||
|
callback.close()
|
||||||
|
|
||||||
def get_system_info(self, future, callback):
|
def get_system_info(self, future, callback):
|
||||||
"""
|
"""
|
||||||
Get information about the system from the plugin
|
Get information about the system from the plugin
|
||||||
|
@ -1109,6 +1109,452 @@ class NFVINetworkAPI(nfvi.api.v1.NFVINetworkAPI):
|
|||||||
callback.send(response)
|
callback.send(response)
|
||||||
callback.close()
|
callback.close()
|
||||||
|
|
||||||
|
def get_network_agents(self, future, callback):
|
||||||
|
"""
|
||||||
|
Get Network Agent Information for all agents on all hosts.
|
||||||
|
"""
|
||||||
|
response = dict()
|
||||||
|
response['completed'] = False
|
||||||
|
response['result-data'] = ''
|
||||||
|
response['reason'] = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
|
||||||
|
|
||||||
|
if self._token is None or \
|
||||||
|
self._token.is_expired():
|
||||||
|
future.work(openstack.get_token, self._directory)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete() or \
|
||||||
|
future.result.data is None:
|
||||||
|
DLOG.error("OpenStack get-token did not complete")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._token = future.result.data
|
||||||
|
|
||||||
|
if self._neutron_extensions is None:
|
||||||
|
future.work(neutron.get_extensions, self._token)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-extensions did not complete.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._neutron_extensions = future.result.data
|
||||||
|
|
||||||
|
if neutron.lookup_extension(neutron.EXTENSION_NAMES.AGENT,
|
||||||
|
self._neutron_extensions):
|
||||||
|
# Send Query request to Neutron
|
||||||
|
future.work(neutron.get_network_agents,
|
||||||
|
self._token)
|
||||||
|
|
||||||
|
future.result = (yield)
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-network-agents failed, "
|
||||||
|
"operation did not complete")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
response['result-data'] = future.result.data
|
||||||
|
else:
|
||||||
|
DLOG.warn("Neutron Agent Extension not available")
|
||||||
|
return
|
||||||
|
|
||||||
|
response['completed'] = True
|
||||||
|
|
||||||
|
except exceptions.OpenStackRestAPIException as e:
|
||||||
|
if httplib.UNAUTHORIZED == e.http_status_code:
|
||||||
|
response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED
|
||||||
|
if self._token is not None:
|
||||||
|
self._token.set_expired()
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.exception("Caught exception while trying to get "
|
||||||
|
"neutron network agents, error=%s." % e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
DLOG.exception("Caught exception while trying to get "
|
||||||
|
"neutron network agents, error=%s." % e)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
callback.send(response)
|
||||||
|
callback.close()
|
||||||
|
|
||||||
|
def get_agent_routers(self, future, agent_id, callback):
|
||||||
|
"""
|
||||||
|
Get Routers hosted by Network Agent.
|
||||||
|
"""
|
||||||
|
response = dict()
|
||||||
|
response['completed'] = False
|
||||||
|
response['result-data'] = ''
|
||||||
|
response['reason'] = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
|
||||||
|
|
||||||
|
if self._token is None or \
|
||||||
|
self._token.is_expired():
|
||||||
|
future.work(openstack.get_token, self._directory)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete() or \
|
||||||
|
future.result.data is None:
|
||||||
|
DLOG.error("OpenStack get-token did not complete, "
|
||||||
|
"agent_id=%s." % agent_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._token = future.result.data
|
||||||
|
|
||||||
|
if self._neutron_extensions is None:
|
||||||
|
future.work(neutron.get_extensions, self._token)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-extensions did not complete.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._neutron_extensions = future.result.data
|
||||||
|
|
||||||
|
if neutron.lookup_extension(neutron.EXTENSION_NAMES.AGENT,
|
||||||
|
self._neutron_extensions):
|
||||||
|
# Send Query request to Neutron
|
||||||
|
future.work(neutron.get_agent_routers,
|
||||||
|
self._token, agent_id)
|
||||||
|
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-agent-routers failed, "
|
||||||
|
"operation did not complete, agent_id=%s"
|
||||||
|
% agent_id)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
response['result-data'] = future.result.data
|
||||||
|
else:
|
||||||
|
DLOG.warn("Neutron Agent Extension not available")
|
||||||
|
return
|
||||||
|
|
||||||
|
response['completed'] = True
|
||||||
|
|
||||||
|
except exceptions.OpenStackRestAPIException as e:
|
||||||
|
if httplib.UNAUTHORIZED == e.http_status_code:
|
||||||
|
response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED
|
||||||
|
if self._token is not None:
|
||||||
|
self._token.set_expired()
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.exception("Caught exception while trying to get "
|
||||||
|
"agent routers, error=%s." % e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
DLOG.exception("Caught exception while trying to get %s "
|
||||||
|
"neutron agent routers, error=%s."
|
||||||
|
% (agent_id, e))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
callback.send(response)
|
||||||
|
callback.close()
|
||||||
|
|
||||||
|
def get_router_ports(self, future, router_id, callback):
|
||||||
|
"""
|
||||||
|
Get Ports on a Router
|
||||||
|
"""
|
||||||
|
response = dict()
|
||||||
|
response['completed'] = False
|
||||||
|
response['result-data'] = ''
|
||||||
|
response['reason'] = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
|
||||||
|
|
||||||
|
if self._token is None or \
|
||||||
|
self._token.is_expired():
|
||||||
|
future.work(openstack.get_token, self._directory)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete() or \
|
||||||
|
future.result.data is None:
|
||||||
|
DLOG.error("OpenStack get-token did not complete, "
|
||||||
|
"router_id=%s." % router_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._token = future.result.data
|
||||||
|
|
||||||
|
if self._neutron_extensions is None:
|
||||||
|
future.work(neutron.get_extensions, self._token)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-extensions did not complete.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._neutron_extensions = future.result.data
|
||||||
|
|
||||||
|
if neutron.lookup_extension(neutron.EXTENSION_NAMES.AGENT,
|
||||||
|
self._neutron_extensions):
|
||||||
|
# Send Query request to Neutron
|
||||||
|
future.work(neutron.get_router_ports,
|
||||||
|
self._token, router_id)
|
||||||
|
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-router-ports failed, "
|
||||||
|
"operation did not complete, router_id=%s"
|
||||||
|
% router_id)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
response['result-data'] = future.result.data
|
||||||
|
else:
|
||||||
|
DLOG.warn("Neutron Agent Extension not available")
|
||||||
|
return
|
||||||
|
|
||||||
|
response['completed'] = True
|
||||||
|
|
||||||
|
except exceptions.OpenStackRestAPIException as e:
|
||||||
|
if httplib.UNAUTHORIZED == e.http_status_code:
|
||||||
|
response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED
|
||||||
|
if self._token is not None:
|
||||||
|
self._token.set_expired()
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.exception("Caught exception while trying to get "
|
||||||
|
"router ports, error=%s." % e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
DLOG.exception("Caught exception while trying to get %s "
|
||||||
|
"router ports, error=%s."
|
||||||
|
% (router_id, e))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
callback.send(response)
|
||||||
|
callback.close()
|
||||||
|
|
||||||
|
def add_router_to_agent(self, future, agent_id, router_id, callback):
|
||||||
|
"""
|
||||||
|
Add a router to an L3 Agent.
|
||||||
|
"""
|
||||||
|
response = dict()
|
||||||
|
response['completed'] = False
|
||||||
|
response['result-data'] = ''
|
||||||
|
response['reason'] = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
|
||||||
|
|
||||||
|
if self._token is None or \
|
||||||
|
self._token.is_expired():
|
||||||
|
future.work(openstack.get_token, self._directory)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete() or \
|
||||||
|
future.result.data is None:
|
||||||
|
DLOG.error("OpenStack get-token did not complete, "
|
||||||
|
"router_id=%s." % router_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._token = future.result.data
|
||||||
|
|
||||||
|
if self._neutron_extensions is None:
|
||||||
|
future.work(neutron.get_extensions, self._token)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-extensions did not complete.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._neutron_extensions = future.result.data
|
||||||
|
|
||||||
|
if neutron.lookup_extension(neutron.EXTENSION_NAMES.AGENT,
|
||||||
|
self._neutron_extensions):
|
||||||
|
# Send Query request to Neutron
|
||||||
|
future.work(neutron.add_router_to_agent,
|
||||||
|
self._token, agent_id, router_id)
|
||||||
|
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron add-router-to-agent failed, "
|
||||||
|
"operation did not complete, agent_id=%s "
|
||||||
|
"router_id=%s" % (agent_id, router_id))
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
response['result-data'] = future.result.data
|
||||||
|
else:
|
||||||
|
DLOG.warn("Neutron Agent Extension not available")
|
||||||
|
return
|
||||||
|
|
||||||
|
response['completed'] = True
|
||||||
|
|
||||||
|
except exceptions.OpenStackRestAPIException as e:
|
||||||
|
if httplib.UNAUTHORIZED == e.http_status_code:
|
||||||
|
response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED
|
||||||
|
if self._token is not None:
|
||||||
|
self._token.set_expired()
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.exception("Caught exception while trying to add "
|
||||||
|
"router to agent, error=%s." % e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
DLOG.exception("Caught exception while trying to add "
|
||||||
|
"router_id=%s to agent_id=%s, error=%s."
|
||||||
|
% (router_id, agent_id, e))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
callback.send(response)
|
||||||
|
callback.close()
|
||||||
|
|
||||||
|
def remove_router_from_agent(self, future, agent_id, router_id, callback):
|
||||||
|
"""
|
||||||
|
Remove a router from an L3 Agent.
|
||||||
|
"""
|
||||||
|
response = dict()
|
||||||
|
response['completed'] = False
|
||||||
|
response['result-data'] = ''
|
||||||
|
response['reason'] = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
|
||||||
|
|
||||||
|
if self._token is None or \
|
||||||
|
self._token.is_expired():
|
||||||
|
future.work(openstack.get_token, self._directory)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete() or \
|
||||||
|
future.result.data is None:
|
||||||
|
DLOG.error("OpenStack get-token did not complete, "
|
||||||
|
"router_id=%s." % router_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._token = future.result.data
|
||||||
|
|
||||||
|
if self._neutron_extensions is None:
|
||||||
|
future.work(neutron.get_extensions, self._token)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-extensions did not complete.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._neutron_extensions = future.result.data
|
||||||
|
|
||||||
|
if neutron.lookup_extension(neutron.EXTENSION_NAMES.AGENT,
|
||||||
|
self._neutron_extensions):
|
||||||
|
# Send Query request to Neutron
|
||||||
|
future.work(neutron.remove_router_from_agent,
|
||||||
|
self._token, agent_id, router_id)
|
||||||
|
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron remove-router-from-agent failed, "
|
||||||
|
"operation did not complete, agent_id=%s "
|
||||||
|
"router_id=%s" % (agent_id, router_id))
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
response['result-data'] = future.result.data
|
||||||
|
else:
|
||||||
|
DLOG.warn("Neutron Agent Extension not available")
|
||||||
|
return
|
||||||
|
|
||||||
|
response['completed'] = True
|
||||||
|
|
||||||
|
except exceptions.OpenStackRestAPIException as e:
|
||||||
|
if httplib.UNAUTHORIZED == e.http_status_code:
|
||||||
|
response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED
|
||||||
|
if self._token is not None:
|
||||||
|
self._token.set_expired()
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.exception("Caught exception while trying to remove "
|
||||||
|
"router from agent, error=%s." % e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
DLOG.exception("Caught exception while trying to remove "
|
||||||
|
"router_id=%s from agent_id=%s, error=%s."
|
||||||
|
% (router_id, agent_id, e))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
callback.send(response)
|
||||||
|
callback.close()
|
||||||
|
|
||||||
|
def get_physical_network(self, future, network_id, callback):
|
||||||
|
"""
|
||||||
|
Get Physical Network of a network.
|
||||||
|
"""
|
||||||
|
response = dict()
|
||||||
|
response['completed'] = False
|
||||||
|
response['result-data'] = ''
|
||||||
|
response['reason'] = ''
|
||||||
|
|
||||||
|
try:
|
||||||
|
future.set_timeouts(config.CONF.get('nfvi-timeouts', None))
|
||||||
|
|
||||||
|
if self._token is None or \
|
||||||
|
self._token.is_expired():
|
||||||
|
future.work(openstack.get_token, self._directory)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete() or \
|
||||||
|
future.result.data is None:
|
||||||
|
DLOG.error("OpenStack get-token did not complete, "
|
||||||
|
"network_id=%s." % network_id)
|
||||||
|
return
|
||||||
|
|
||||||
|
self._token = future.result.data
|
||||||
|
|
||||||
|
if self._neutron_extensions is None:
|
||||||
|
future.work(neutron.get_extensions, self._token)
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-extensions did not complete.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self._neutron_extensions = future.result.data
|
||||||
|
|
||||||
|
if neutron.lookup_extension(neutron.EXTENSION_NAMES.AGENT,
|
||||||
|
self._neutron_extensions):
|
||||||
|
# Send Query request to Neutron
|
||||||
|
future.work(neutron.get_physical_network,
|
||||||
|
self._token, network_id)
|
||||||
|
|
||||||
|
future.result = (yield)
|
||||||
|
|
||||||
|
if not future.result.is_complete():
|
||||||
|
DLOG.error("Neutron get-physical-network failed, "
|
||||||
|
"operation did not complete, network_id=%s"
|
||||||
|
% network_id)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
response['result-data'] = future.result.data
|
||||||
|
else:
|
||||||
|
DLOG.warn("Neutron Agent Extension not available")
|
||||||
|
return
|
||||||
|
|
||||||
|
response['completed'] = True
|
||||||
|
|
||||||
|
except exceptions.OpenStackRestAPIException as e:
|
||||||
|
if httplib.UNAUTHORIZED == e.http_status_code:
|
||||||
|
response['error-code'] = nfvi.NFVI_ERROR_CODE.TOKEN_EXPIRED
|
||||||
|
if self._token is not None:
|
||||||
|
self._token.set_expired()
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.exception("Caught exception while trying to get "
|
||||||
|
"physical network, error=%s." % e)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
DLOG.exception("Caught exception while trying to get %s "
|
||||||
|
"physical network, error=%s."
|
||||||
|
% (network_id, e))
|
||||||
|
|
||||||
|
finally:
|
||||||
|
callback.send(response)
|
||||||
|
callback.close()
|
||||||
|
|
||||||
def query_host_services(self, future, host_uuid, host_name,
|
def query_host_services(self, future, host_uuid, host_name,
|
||||||
host_personality, check_fully_up,
|
host_personality, check_fully_up,
|
||||||
callback):
|
callback):
|
||||||
@ -1273,6 +1719,7 @@ class NFVINetworkAPI(nfvi.api.v1.NFVINetworkAPI):
|
|||||||
"operation did not complete, host_uuid=%s, "
|
"operation did not complete, host_uuid=%s, "
|
||||||
"host_name=%s." % (host_uuid, host_name))
|
"host_name=%s." % (host_uuid, host_name))
|
||||||
return
|
return
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if not future.result.data:
|
if not future.result.data:
|
||||||
DLOG.error("Neutron disable-host-services (agents) failed, "
|
DLOG.error("Neutron disable-host-services (agents) failed, "
|
||||||
|
125
nfv/nfv-plugins/nfv_plugins/nfvi_plugins/openstack/neutron.py
Executable file → Normal file
125
nfv/nfv-plugins/nfv_plugins/nfvi_plugins/openstack/neutron.py
Executable file → Normal file
@ -77,18 +77,126 @@ AGENT_TYPE = AgentType()
|
|||||||
VNIC_TYPE = VnicType()
|
VNIC_TYPE = VnicType()
|
||||||
|
|
||||||
|
|
||||||
def get_network_agents(token, host_name):
|
def get_network_agents(token):
|
||||||
"""
|
"""
|
||||||
Retrieve all network agents for a host.
|
Get Network Agent information for all agents.
|
||||||
|
"""
|
||||||
|
url, api_cmd, api_cmd_headers, result_data = _get_network_agents(
|
||||||
|
token, None)
|
||||||
|
|
||||||
|
return result_data
|
||||||
|
|
||||||
|
|
||||||
|
def get_agent_routers(token, agent_id):
|
||||||
|
"""
|
||||||
|
Get all routers hosted by a particular agent
|
||||||
"""
|
"""
|
||||||
url = token.get_service_url(OPENSTACK_SERVICE.NEUTRON)
|
url = token.get_service_url(OPENSTACK_SERVICE.NEUTRON)
|
||||||
if url is None:
|
if url is None:
|
||||||
raise ValueError("OpenStack Neutron URL is invalid")
|
raise ValueError("OpenStack Neutron URL is invalid")
|
||||||
|
|
||||||
api_cmd = url + "/v2.0/agents?host=" + host_name
|
api_cmd = url + "/v2.0/agents/" + agent_id + "/l3-routers?fields=id"
|
||||||
|
|
||||||
|
api_cmd_headers = dict()
|
||||||
|
api_cmd_headers['Content-Type'] = "application/json"
|
||||||
|
|
||||||
|
response = rest_api_request(token, "GET", api_cmd, api_cmd_headers)
|
||||||
|
result_data = response.result_data['routers']
|
||||||
|
|
||||||
|
return result_data
|
||||||
|
|
||||||
|
|
||||||
|
def add_router_to_agent(token, agent_id, router_id):
|
||||||
|
"""
|
||||||
|
Schedule a router on an L3 agent
|
||||||
|
"""
|
||||||
|
url = token.get_service_url(OPENSTACK_SERVICE.NEUTRON)
|
||||||
|
if url is None:
|
||||||
|
raise ValueError("OpenStack Neutron URL is invalid")
|
||||||
|
|
||||||
|
api_cmd = url + "/v2.0/agents/" + agent_id + "/l3-routers"
|
||||||
|
api_cmd_headers = dict()
|
||||||
|
api_cmd_headers['Content-Type'] = "application/json"
|
||||||
|
|
||||||
|
api_cmd_payload = dict()
|
||||||
|
api_cmd_payload['router_id'] = router_id
|
||||||
|
|
||||||
|
response = rest_api_request(token, "POST", api_cmd, api_cmd_headers,
|
||||||
|
json.dumps(api_cmd_payload))
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def remove_router_from_agent(token, agent_id, router_id):
|
||||||
|
"""
|
||||||
|
Unschedule a router from an L3 agent
|
||||||
|
"""
|
||||||
|
url = token.get_service_url(OPENSTACK_SERVICE.NEUTRON)
|
||||||
|
if url is None:
|
||||||
|
raise ValueError("OpenStack Neutron URL is invalid")
|
||||||
|
|
||||||
|
api_cmd = url + "/v2.0/agents/" + agent_id + "/l3-routers/" + router_id
|
||||||
|
api_cmd_headers = dict()
|
||||||
|
api_cmd_headers['Content-Type'] = "application/json"
|
||||||
|
|
||||||
|
response = rest_api_request(token, "DELETE", api_cmd, api_cmd_headers)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def get_router_ports(token, router_id):
|
||||||
|
"""
|
||||||
|
Get port information for particular router
|
||||||
|
"""
|
||||||
|
url = token.get_service_url(OPENSTACK_SERVICE.NEUTRON)
|
||||||
|
if url is None:
|
||||||
|
raise ValueError("OpenStack Neutron URL is invalid")
|
||||||
|
|
||||||
|
api_cmd = url + "/v2.0/ports?device_id=" + router_id
|
||||||
|
api_cmd_headers = dict()
|
||||||
|
api_cmd_headers['Content-Type'] = "application/json"
|
||||||
|
|
||||||
|
response = rest_api_request(token, "GET", api_cmd, api_cmd_headers)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def get_physical_network(token, network_id):
|
||||||
|
"""
|
||||||
|
Get the physical network of a network
|
||||||
|
"""
|
||||||
|
url = token.get_service_url(OPENSTACK_SERVICE.NEUTRON)
|
||||||
|
if url is None:
|
||||||
|
raise ValueError("OpenStack Neutron URL is invalid")
|
||||||
|
|
||||||
|
api_cmd = url + "/v2.0/networks/" + network_id + "?fields=provider%3Aphysical_network"
|
||||||
|
api_cmd_headers = dict()
|
||||||
|
api_cmd_headers['Content-Type'] = "application/json"
|
||||||
|
|
||||||
|
response = rest_api_request(token, "GET", api_cmd, api_cmd_headers)
|
||||||
|
result_data = response.result_data['network']
|
||||||
|
|
||||||
|
return result_data
|
||||||
|
|
||||||
|
|
||||||
|
def _get_network_agents(token, host_name):
|
||||||
|
"""
|
||||||
|
Get network agents of a host
|
||||||
|
"""
|
||||||
|
url = token.get_service_url(OPENSTACK_SERVICE.NEUTRON)
|
||||||
|
if url is None:
|
||||||
|
raise ValueError("OpenStack Neutron URL is invalid")
|
||||||
|
|
||||||
|
api_cmd = url + "/v2.0/agents"
|
||||||
|
if host_name is not None:
|
||||||
|
api_cmd = api_cmd + "?host=" + host_name
|
||||||
|
else:
|
||||||
|
# if host_name is None, we are to retrieve information
|
||||||
|
# on all agents on all hosts.
|
||||||
|
fields_qualifier = "?fields=id&fields=host&fields=agent_type&fields=alive&fields=admin_state_up"
|
||||||
|
api_cmd = api_cmd + fields_qualifier
|
||||||
|
|
||||||
api_cmd_headers = dict()
|
api_cmd_headers = dict()
|
||||||
api_cmd_headers['wrs-header'] = 'true'
|
|
||||||
api_cmd_headers['Content-Type'] = "application/json"
|
api_cmd_headers['Content-Type'] = "application/json"
|
||||||
|
|
||||||
response = rest_api_request(token, "GET", api_cmd, api_cmd_headers)
|
response = rest_api_request(token, "GET", api_cmd, api_cmd_headers)
|
||||||
@ -452,7 +560,7 @@ def delete_network_agents(token, host_name):
|
|||||||
Asks OpenStack Neutron to delete agents for a host
|
Asks OpenStack Neutron to delete agents for a host
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
url, api_cmd, api_cmd_headers, result_data = get_network_agents(
|
url, api_cmd, api_cmd_headers, result_data = _get_network_agents(
|
||||||
token, host_name)
|
token, host_name)
|
||||||
|
|
||||||
num_agents_found = 0
|
num_agents_found = 0
|
||||||
@ -538,7 +646,7 @@ def enable_network_agents(token, host_name):
|
|||||||
before declaring success.
|
before declaring success.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
url, api_cmd, api_cmd_headers, result_data = get_network_agents(
|
url, api_cmd, api_cmd_headers, result_data = _get_network_agents(
|
||||||
token, host_name)
|
token, host_name)
|
||||||
|
|
||||||
payload = dict()
|
payload = dict()
|
||||||
@ -613,7 +721,7 @@ def disable_network_agents(token, host_name):
|
|||||||
to False. Other agents are left alone.
|
to False. Other agents are left alone.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
url, api_cmd, api_cmd_headers, result_data = get_network_agents(
|
url, api_cmd, api_cmd_headers, result_data = _get_network_agents(
|
||||||
token, host_name)
|
token, host_name)
|
||||||
|
|
||||||
payload = dict()
|
payload = dict()
|
||||||
@ -700,9 +808,8 @@ def query_network_agents(token, host_name, check_fully_up):
|
|||||||
Input parameter check_fully_up set to True will check for
|
Input parameter check_fully_up set to True will check for
|
||||||
both alive and admin_state_up, otherwise only alive is checked.
|
both alive and admin_state_up, otherwise only alive is checked.
|
||||||
"""
|
"""
|
||||||
url, api_cmd, api_cmd_headers, result_data = get_network_agents(
|
url, api_cmd, api_cmd_headers, result_data = _get_network_agents(
|
||||||
token, host_name)
|
token, host_name)
|
||||||
|
|
||||||
agent_state = 'up'
|
agent_state = 'up'
|
||||||
alive = False
|
alive = False
|
||||||
admin_state_up = False
|
admin_state_up = False
|
||||||
|
@ -12,6 +12,25 @@ from nfv_plugins.nfvi_plugins.openstack.rest_api import rest_api_request
|
|||||||
DLOG = debug.debug_get_logger('nfv_plugins.nfvi_plugins.openstack.sysinv')
|
DLOG = debug.debug_get_logger('nfv_plugins.nfvi_plugins.openstack.sysinv')
|
||||||
|
|
||||||
|
|
||||||
|
def get_datanetworks(token, host_uuid):
|
||||||
|
"""
|
||||||
|
Get all data networks on a host.
|
||||||
|
"""
|
||||||
|
url = token.get_service_url(PLATFORM_SERVICE.SYSINV)
|
||||||
|
if url is None:
|
||||||
|
raise ValueError("OpenStack SysInv URL is invalid")
|
||||||
|
|
||||||
|
api_cmd = url + "/ihosts/" + host_uuid + "/interface_datanetworks"
|
||||||
|
api_cmd_headers = dict()
|
||||||
|
api_cmd_headers['Content-Type'] = "application/json"
|
||||||
|
api_cmd_headers['User-Agent'] = "vim/1.0"
|
||||||
|
|
||||||
|
response = rest_api_request(token, "GET", api_cmd, api_cmd_headers)
|
||||||
|
result_data = response.result_data['interface_datanetworks']
|
||||||
|
|
||||||
|
return result_data
|
||||||
|
|
||||||
|
|
||||||
def get_system_info(token):
|
def get_system_info(token):
|
||||||
"""
|
"""
|
||||||
Asks System Inventory for information about the system, such as
|
Asks System Inventory for information about the system, such as
|
||||||
|
522
nfv/nfv-tests/nfv_unit_tests/tests/test_network_rebalance.py
Normal file
522
nfv/nfv-tests/nfv_unit_tests/tests/test_network_rebalance.py
Normal file
@ -0,0 +1,522 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2015-2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
import mock
|
||||||
|
import random
|
||||||
|
|
||||||
|
from nfv_vim.network_rebalance._network_rebalance import _L3Rebalance
|
||||||
|
from nfv_vim.network_rebalance._network_rebalance import _reschedule_down_agent
|
||||||
|
from nfv_vim.network_rebalance._network_rebalance import _reschedule_new_agent
|
||||||
|
from nfv_vim.network_rebalance._network_rebalance import L3_REBALANCE_STATE
|
||||||
|
|
||||||
|
from . import testcase # noqa: H304
|
||||||
|
|
||||||
|
DEBUG_PRINTING = False
|
||||||
|
|
||||||
|
|
||||||
|
def fake_nfvi_remove_router_from_agent(a, b, c):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@mock.patch('nfv_vim.nfvi.nfvi_remove_router_from_agent', fake_nfvi_remove_router_from_agent)
|
||||||
|
class TestNeutronRebalance(testcase.NFVTestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestNeutronRebalance, self).setUp()
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
super(TestNeutronRebalance, self).tearDown()
|
||||||
|
|
||||||
|
def test_rebalance_down_host_canned(self):
|
||||||
|
_L3Rebalance.reinit()
|
||||||
|
_L3Rebalance.router_diff_threshold = 1
|
||||||
|
|
||||||
|
# Down agent will be first agent in list.
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet3'],
|
||||||
|
'host': u'compute-0',
|
||||||
|
'id': u'00000000-3de6-4717-93d4-0f23c38d2bf2',
|
||||||
|
'host_uuid': u'eb2eca67-1018-4c84-9b2c-b9c2662c41a6'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1'],
|
||||||
|
'host': u'compute-2',
|
||||||
|
'id': u'22222222-5a5f-4c58-9399-12d0b8e7e321',
|
||||||
|
'host_uuid': u'021f35d2-4a98-41ab-87c5-2660cecd501d'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1'],
|
||||||
|
'host': u'compute-1',
|
||||||
|
'id': u'11111111-562c-438c-8083-0733ebbbe881',
|
||||||
|
'host_uuid': u'7ebc0819-2b11-4aa8-8ef1-3a5423c17eef'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1', u'physnet3'],
|
||||||
|
'host': u'compute-3',
|
||||||
|
'id': u'33333333-8989-438c-7083-344322513677',
|
||||||
|
'host_uuid': u'23423524-8b11-4ba8-8ef1-2346625326eb'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1'],
|
||||||
|
'host': u'compute-5',
|
||||||
|
'id': u'55555555-930c-438c-6083-173472902843',
|
||||||
|
'host_uuid': u'09132345-7b11-4ca7-8ef1-3a5423c17ecd'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1', u'physnet3'],
|
||||||
|
'host': u'compute-4',
|
||||||
|
'id': u'44444444-0074-438c-5083-023486659382',
|
||||||
|
'host_uuid': u'89891234-3b11-9da8-8ef1-aaa4a3a17aea'})
|
||||||
|
|
||||||
|
# compute-0 routers
|
||||||
|
agent_id = u'00000000-3de6-4717-93d4-0f23c38d2bf2'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'415302d1-829c-42ec-aab5-a5b592de5c41')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'fb6c7812-5aa6-4303-a8e8-654d2c61c107')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'f900c5a3-a8f2-4348-a63f-ed0b9d2ca2b1')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'71205e20-d42f-46d0-ad6b-dd325f9b959b')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'43223570-ab32-25d0-ae6c-352aaab23532')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'45692991-e52f-96c0-bd6d-ed428f9a969b')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'97867e20-a92e-1610-a161-1d121f1b151b')
|
||||||
|
|
||||||
|
# compute-2 routers
|
||||||
|
agent_id = u'22222222-5a5f-4c58-9399-12d0b8e7e321'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'a913c4a3-4d6b-4a4d-9cf5-f8b7c30224a4')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'7c0909c6-c03f-4c14-9d05-e910ab5eb255')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'0c59b77a-b316-4963-90e5-bf689568ac58')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'23423422-3433-fdfd-2222-fdsdfsasvccd')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'11432542-aabb-3415-4443-xcvlkweroidd')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'sd093kds-b2dd-eb3d-23bs-asdwebesdedw')
|
||||||
|
|
||||||
|
# compute-1 routers
|
||||||
|
agent_id = u'11111111-562c-438c-8083-0733ebbbe881'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'5054adb8-aef5-445d-b335-fc4bb3ee0871')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'91f20f34-ad68-4483-9ae7-8f917a1460d8')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'23093482-bd68-4c83-cae9-9287467ababa')
|
||||||
|
|
||||||
|
# compute-3 routers
|
||||||
|
agent_id = u'33333333-8989-438c-7083-344322513677'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'51019325-a1d4-410f-a83d-9eb54743dcf0')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'c1c8c935-6302-4c5d-98ee-c12bbd900abf')
|
||||||
|
|
||||||
|
# compute-5 routers
|
||||||
|
agent_id = u'55555555-930c-438c-6083-173472902843'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'2e50468a-755a-4bfb-bc29-f7aadc66c598')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'7ebc0819-2b11-4aa8-8ef1-3a5423c17eef')
|
||||||
|
|
||||||
|
# compute-4 routers
|
||||||
|
|
||||||
|
agent_id = u'44444444-0074-438c-5083-023486659382'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'4c0213e7-4b36-439b-9e47-d5509e0950f1')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'021f35d2-4a98-41ab-87c5-2660cecd501d')
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'415302d1-829c-42ec-aab5-a5b592de5c41'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'fb6c7812-5aa6-4303-a8e8-654d2c61c107'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'f900c5a3-a8f2-4348-a63f-ed0b9d2ca2b1'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'71205e20-d42f-46d0-ad6b-dd325f9b959b'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'43223570-ab32-25d0-ae6c-352aaab23532'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'45692991-e52f-96c0-bd6d-ed428f9a969b'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'97867e20-a92e-1610-a161-1d121f1b151b'] = ['physnet0', 'physnet3']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'a913c4a3-4d6b-4a4d-9cf5-f8b7c30224a4'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'7c0909c6-c03f-4c14-9d05-e910ab5eb255'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'0c59b77a-b316-4963-90e5-bf689568ac58'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'23423422-3433-fdfd-2222-fdsdfsasvccd'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'11432542-aabb-3415-4443-xcvlkweroidd'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'sd093kds-b2dd-eb3d-23bs-asdwebesdedw'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'5054adb8-aef5-445d-b335-fc4bb3ee0871'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'91f20f34-ad68-4483-9ae7-8f917a1460d8'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'23093482-bd68-4c83-cae9-9287467ababa'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'51019325-a1d4-410f-a83d-9eb54743dcf0'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'c1c8c935-6302-4c5d-98ee-c12bbd900abf'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'2e50468a-755a-4bfb-bc29-f7aadc66c598'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'7ebc0819-2b11-4aa8-8ef1-3a5423c17eef'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'4c0213e7-4b36-439b-9e47-d5509e0950f1'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'021f35d2-4a98-41ab-87c5-2660cecd501d'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
_L3Rebalance.l3agent_idx = 0
|
||||||
|
_L3Rebalance.router_idx = 0
|
||||||
|
_L3Rebalance.l3agent_down = '00000000-3de6-4717-93d4-0f23c38d2bf2'
|
||||||
|
_L3Rebalance.num_routers_on_agents = [7, 6, 3, 2, 2, 2]
|
||||||
|
_L3Rebalance.num_l3agents = len(_L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.RESCHEDULE_DOWN_AGENT)
|
||||||
|
_L3Rebalance.working_host = 'compute-0'
|
||||||
|
|
||||||
|
while (_L3Rebalance.get_state() == L3_REBALANCE_STATE.RESCHEDULE_DOWN_AGENT):
|
||||||
|
_reschedule_down_agent()
|
||||||
|
|
||||||
|
# Only agents that can host physnet3 are 3 and 5, expect routers from agent 0
|
||||||
|
# to be evenly spread over the two of them.
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("_L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
print("_L3Rebalance.router_ids_per_agent= %s" % _L3Rebalance.router_ids_per_agent)
|
||||||
|
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[0] == 0
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[1] == 6
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[2] == 3
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[3] == 6
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[4] == 2
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[5] == 5
|
||||||
|
|
||||||
|
def test_rebalance_new_host_canned(self):
|
||||||
|
_L3Rebalance.reinit()
|
||||||
|
_L3Rebalance.router_diff_threshold = 1
|
||||||
|
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet3'],
|
||||||
|
'host': u'compute-0',
|
||||||
|
'id': u'00000000-3de6-4717-93d4-0f23c38d2bf2',
|
||||||
|
'host_uuid': u'eb2eca67-1018-4c84-9b2c-b9c2662c41a6'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1'],
|
||||||
|
'host': u'compute-2',
|
||||||
|
'id': u'22222222-5a5f-4c58-9399-12d0b8e7e321',
|
||||||
|
'host_uuid': u'021f35d2-4a98-41ab-87c5-2660cecd501d'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1'],
|
||||||
|
'host': u'compute-1',
|
||||||
|
'id': u'11111111-562c-438c-8083-0733ebbbe881',
|
||||||
|
'host_uuid': u'7ebc0819-2b11-4aa8-8ef1-3a5423c17eef'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1', u'physnet3'],
|
||||||
|
'host': u'compute-3',
|
||||||
|
'id': u'33333333-8989-438c-7083-344322513677',
|
||||||
|
'host_uuid': u'23423524-8b11-4ba8-8ef1-2346625326eb'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1'],
|
||||||
|
'host': u'compute-5',
|
||||||
|
'id': u'55555555-930c-438c-6083-173472902843',
|
||||||
|
'host_uuid': u'09132345-7b11-4ca7-8ef1-3a5423c17ecd'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1', u'physnet3'],
|
||||||
|
'host': u'compute-4',
|
||||||
|
'id': u'44444444-0074-438c-5083-023486659382',
|
||||||
|
'host_uuid': u'89891234-3b11-9da8-8ef1-aaa4a3a17aea'})
|
||||||
|
|
||||||
|
# compute-0 routers
|
||||||
|
agent_id = u'00000000-3de6-4717-93d4-0f23c38d2bf2'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'415302d1-829c-42ec-aab5-a5b592de5c41')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'fb6c7812-5aa6-4303-a8e8-654d2c61c107')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'f900c5a3-a8f2-4348-a63f-ed0b9d2ca2b1')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'71205e20-d42f-46d0-ad6b-dd325f9b959b')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'43223570-ab32-25d0-ae6c-352aaab23532')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'45692991-e52f-96c0-bd6d-ed428f9a969b')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'97867e20-a92e-1610-a161-1d121f1b151b')
|
||||||
|
|
||||||
|
# compute-2 routers
|
||||||
|
agent_id = u'22222222-5a5f-4c58-9399-12d0b8e7e321'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'a913c4a3-4d6b-4a4d-9cf5-f8b7c30224a4')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'7c0909c6-c03f-4c14-9d05-e910ab5eb255')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'0c59b77a-b316-4963-90e5-bf689568ac58')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'23423422-3433-fdfd-2222-fdsdfsasvccd')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'11432542-aabb-3415-4443-xcvlkweroidd')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'sd093kds-b2dd-eb3d-23bs-asdwebesdedw')
|
||||||
|
|
||||||
|
# compute-1 routers
|
||||||
|
agent_id = u'11111111-562c-438c-8083-0733ebbbe881'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'5054adb8-aef5-445d-b335-fc4bb3ee0871')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'91f20f34-ad68-4483-9ae7-8f917a1460d8')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'23093482-bd68-4c83-cae9-9287467ababa')
|
||||||
|
|
||||||
|
# compute-3 routers
|
||||||
|
agent_id = u'33333333-8989-438c-7083-344322513677'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'51019325-a1d4-410f-a83d-9eb54743dcf0')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'c1c8c935-6302-4c5d-98ee-c12bbd900abf')
|
||||||
|
|
||||||
|
# compute-5 routers
|
||||||
|
agent_id = u'55555555-930c-438c-6083-173472902843'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'4c0213e7-4b36-439b-9e47-d5509e0950f1')
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(u'021f35d2-4a98-41ab-87c5-2660cecd501d')
|
||||||
|
|
||||||
|
# compute-4 routers
|
||||||
|
agent_id = u'44444444-0074-438c-5083-023486659382'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'415302d1-829c-42ec-aab5-a5b592de5c41'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'fb6c7812-5aa6-4303-a8e8-654d2c61c107'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'f900c5a3-a8f2-4348-a63f-ed0b9d2ca2b1'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'71205e20-d42f-46d0-ad6b-dd325f9b959b'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'43223570-ab32-25d0-ae6c-352aaab23532'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'45692991-e52f-96c0-bd6d-ed428f9a969b'] = ['physnet0', 'physnet3']
|
||||||
|
_L3Rebalance.networks_per_router[u'97867e20-a92e-1610-a161-1d121f1b151b'] = ['physnet0', 'physnet3']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'a913c4a3-4d6b-4a4d-9cf5-f8b7c30224a4'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'7c0909c6-c03f-4c14-9d05-e910ab5eb255'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'0c59b77a-b316-4963-90e5-bf689568ac58'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'23423422-3433-fdfd-2222-fdsdfsasvccd'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'11432542-aabb-3415-4443-xcvlkweroidd'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'sd093kds-b2dd-eb3d-23bs-asdwebesdedw'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'5054adb8-aef5-445d-b335-fc4bb3ee0871'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'91f20f34-ad68-4483-9ae7-8f917a1460d8'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'23093482-bd68-4c83-cae9-9287467ababa'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'51019325-a1d4-410f-a83d-9eb54743dcf0'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'c1c8c935-6302-4c5d-98ee-c12bbd900abf'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.networks_per_router[u'4c0213e7-4b36-439b-9e47-d5509e0950f1'] = ['physnet0', 'physnet1']
|
||||||
|
_L3Rebalance.networks_per_router[u'021f35d2-4a98-41ab-87c5-2660cecd501d'] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
_L3Rebalance.l3agent_idx = 0
|
||||||
|
_L3Rebalance.router_idx = 0
|
||||||
|
_L3Rebalance.l3agent_down = '00000000-3de6-4717-93d4-0f23c38d2bf2'
|
||||||
|
_L3Rebalance.num_routers_on_agents = [7, 6, 3, 2, 2, 0]
|
||||||
|
_L3Rebalance.num_l3agents = len(_L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.RESCHEDULE_NEW_AGENT)
|
||||||
|
_L3Rebalance.working_host = None
|
||||||
|
|
||||||
|
while (_L3Rebalance.get_state() == L3_REBALANCE_STATE.RESCHEDULE_NEW_AGENT):
|
||||||
|
_reschedule_new_agent()
|
||||||
|
|
||||||
|
# Only agents that can host physnet3 are 3 and 5, expect routers from agent 0
|
||||||
|
# to be evenly spread over the two of them.
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("_L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
print("_L3Rebalance.router_ids_per_agent= %s" % _L3Rebalance.router_ids_per_agent)
|
||||||
|
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[0] == 4
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[1] == 4
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[2] == 3
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[3] == 3
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[4] == 3
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[5] == 3
|
||||||
|
|
||||||
|
def run_rebalance(self, num_agents_list, network_name_extra, host_name):
|
||||||
|
_L3Rebalance.reinit()
|
||||||
|
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': ['physnet0', 'physnet1'],
|
||||||
|
'host': u'compute-0',
|
||||||
|
'id': 'agentid-compute-0',
|
||||||
|
'host_uuid': u'eb2eca67-1018-4c84-9b2c-b9c2662c41a6'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': ['physnet0', 'physnet1'],
|
||||||
|
'host': u'compute-1',
|
||||||
|
'id': 'agentid-compute-1',
|
||||||
|
'host_uuid': u'021f35d2-4a98-41ab-87c5-2660cecd501d'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': ['physnet0', 'physnet1'],
|
||||||
|
'host': u'compute-2',
|
||||||
|
'id': 'agentid-compute-2',
|
||||||
|
'host_uuid': u'7ebc0819-2b11-4aa8-8ef1-3a5423c17eef'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': ['physnet0', 'physnet1', network_name_extra],
|
||||||
|
'host': u'compute-3',
|
||||||
|
'id': 'agentid-compute-3',
|
||||||
|
'host_uuid': u'23423524-8b11-4ba8-8ef1-2346625326eb'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': [u'physnet0', u'physnet1'],
|
||||||
|
'host': u'compute-4',
|
||||||
|
'id': 'agentid-compute-4',
|
||||||
|
'host_uuid': u'09132345-7b11-4ca7-8ef1-3a5423c17ecd'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': ['physnet0', 'physnet1', network_name_extra],
|
||||||
|
'host': u'compute-5',
|
||||||
|
'id': 'agentid-compute-5',
|
||||||
|
'host_uuid': u'89891234-3b11-9da8-8ef1-aaa4a3a17aea'})
|
||||||
|
_L3Rebalance.l3agents.append({'datanets': ['physnet0', 'physnet1', network_name_extra],
|
||||||
|
'host': u'compute-6',
|
||||||
|
'id': 'agentid-compute-6',
|
||||||
|
'host_uuid': u'bbbaaac4-3b21-87a8-65f1-6a3422a11aba'})
|
||||||
|
|
||||||
|
# compute-0 routers
|
||||||
|
agent_id = 'agentid-compute-0'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
num_routers = num_agents_list[0]
|
||||||
|
_L3Rebalance.num_routers_on_agents.append(num_routers)
|
||||||
|
for router in range(0, num_routers):
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(agent_id + '-' + str(router))
|
||||||
|
_L3Rebalance.networks_per_router[agent_id + '-' + str(router)] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
# compute-1 routers
|
||||||
|
agent_id = 'agentid-compute-1'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
num_routers = num_agents_list[1]
|
||||||
|
_L3Rebalance.num_routers_on_agents.append(num_routers)
|
||||||
|
for router in range(0, num_routers):
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(agent_id + '-' + str(router))
|
||||||
|
_L3Rebalance.networks_per_router[agent_id + '-' + str(router)] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
# compute-2 routers
|
||||||
|
agent_id = 'agentid-compute-2'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
num_routers = num_agents_list[2]
|
||||||
|
_L3Rebalance.num_routers_on_agents.append(num_routers)
|
||||||
|
for router in range(0, num_routers):
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(agent_id + '-' + str(router))
|
||||||
|
_L3Rebalance.networks_per_router[agent_id + '-' + str(router)] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
# compute-3 routers
|
||||||
|
agent_id = 'agentid-compute-3'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
num_routers = num_agents_list[3]
|
||||||
|
_L3Rebalance.num_routers_on_agents.append(num_routers)
|
||||||
|
for router in range(0, num_routers):
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(agent_id + '-' + str(router))
|
||||||
|
_L3Rebalance.networks_per_router[agent_id + '-' + str(router)] = ['physnet0', network_name_extra]
|
||||||
|
|
||||||
|
# compute-4 routers
|
||||||
|
agent_id = 'agentid-compute-4'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
num_routers = num_agents_list[4]
|
||||||
|
_L3Rebalance.num_routers_on_agents.append(num_routers)
|
||||||
|
for router in range(0, num_routers):
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(agent_id + '-' + str(router))
|
||||||
|
_L3Rebalance.networks_per_router[agent_id + '-' + str(router)] = ['physnet0', 'physnet1']
|
||||||
|
|
||||||
|
# compute-5 routers
|
||||||
|
agent_id = 'agentid-compute-5'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
num_routers = num_agents_list[5]
|
||||||
|
_L3Rebalance.num_routers_on_agents.append(num_routers)
|
||||||
|
for router in range(0, num_routers):
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(agent_id + '-' + str(router))
|
||||||
|
_L3Rebalance.networks_per_router[agent_id + '-' + str(router)] = ['physnet0', 'physnet1', network_name_extra]
|
||||||
|
|
||||||
|
# compute-6 routers
|
||||||
|
agent_id = 'agentid-compute-6'
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id] = list()
|
||||||
|
num_routers = num_agents_list[6]
|
||||||
|
_L3Rebalance.num_routers_on_agents.append(num_routers)
|
||||||
|
for router in range(0, num_routers):
|
||||||
|
_L3Rebalance.router_ids_per_agent[agent_id].append(agent_id + '-' + str(router))
|
||||||
|
_L3Rebalance.networks_per_router[agent_id + '-' + str(router)] = ['physnet0', 'physnet1', network_name_extra]
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
_L3Rebalance.num_l3agents = len(num_agents_list)
|
||||||
|
|
||||||
|
_L3Rebalance.working_host = host_name
|
||||||
|
if host_name is not None:
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.RESCHEDULE_DOWN_AGENT)
|
||||||
|
while (_L3Rebalance.get_state() == L3_REBALANCE_STATE.RESCHEDULE_DOWN_AGENT):
|
||||||
|
_reschedule_down_agent()
|
||||||
|
else:
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.RESCHEDULE_NEW_AGENT)
|
||||||
|
while (_L3Rebalance.get_state() == L3_REBALANCE_STATE.RESCHEDULE_NEW_AGENT):
|
||||||
|
_reschedule_new_agent()
|
||||||
|
|
||||||
|
def rebalance(self, host_name=None):
|
||||||
|
# test not all nets on all agents, expect balancing
|
||||||
|
# among supported.
|
||||||
|
num_agents_in = [97, 67, 78, 145, 21, 108, 35]
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("host_name = %s, num_agents_in = %s" % (host_name, num_agents_in))
|
||||||
|
self.run_rebalance(num_agents_in, 'physnet3', host_name)
|
||||||
|
assert sum(num_agents_in) == sum(_L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
if host_name is not None:
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[0] == 0
|
||||||
|
elif _L3Rebalance.router_diff_threshold == 1:
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[0] == 66
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[1] == 66
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[2] == 66
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[3] == 96
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[4] == 65
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[5] == 96
|
||||||
|
assert _L3Rebalance.num_routers_on_agents[6] == 96
|
||||||
|
# TODO(kevin), make sure each router is only present once.
|
||||||
|
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("Test 2 _L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
del num_agents_in[:]
|
||||||
|
num_agents_in = [5, 20, 31, 32, 44, 0, 0]
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("host_name = %s, num_agents_in = %s" % (host_name, num_agents_in))
|
||||||
|
self.run_rebalance(num_agents_in, 'physnet3', host_name)
|
||||||
|
assert sum(num_agents_in) == sum(_L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("Test 2 _L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
# print("Test 2 _L3Rebalance.router_ids_per_agent= %s" % _L3Rebalance.router_ids_per_agent)
|
||||||
|
|
||||||
|
del num_agents_in[:]
|
||||||
|
num_agents_in = [0, 11, 31, 11, 44, 0, 25]
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("host_name = %s, num_agents_in = %s" % (host_name, num_agents_in))
|
||||||
|
self.run_rebalance(num_agents_in, 'physnet2', host_name)
|
||||||
|
assert sum(num_agents_in) == sum(_L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("Test 2 _L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
# print("Test 2 _L3Rebalance.router_ids_per_agent= %s" % _L3Rebalance.router_ids_per_agent)
|
||||||
|
|
||||||
|
del num_agents_in[:]
|
||||||
|
num_agents_in = [5, 3, 55, 32, 210, 35, 105]
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("host_name = %s, num_agents_in = %s" % (host_name, num_agents_in))
|
||||||
|
self.run_rebalance(num_agents_in, 'physnet3', host_name)
|
||||||
|
assert sum(num_agents_in) == sum(_L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("Test 2 _L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
# print("Test 2 _L3Rebalance.router_ids_per_agent= %s" % _L3Rebalance.router_ids_per_agent)
|
||||||
|
|
||||||
|
del num_agents_in[:]
|
||||||
|
num_agents_in = [0, 0, 5, 0, 0, 0, 0]
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("host_name = %s, num_agents_in = %s" % (host_name, num_agents_in))
|
||||||
|
self.run_rebalance(num_agents_in, 'physnet2', host_name)
|
||||||
|
assert sum(num_agents_in) == sum(_L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("Test 2 _L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
# print("Test 2 _L3Rebalance.router_ids_per_agent= %s" % _L3Rebalance.router_ids_per_agent)
|
||||||
|
|
||||||
|
for test in range(0, 50):
|
||||||
|
del num_agents_in[:]
|
||||||
|
num_agents_in = [random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150)]
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("host_name = %s, num_agents_in = %s" % (host_name, num_agents_in))
|
||||||
|
self.run_rebalance(num_agents_in, 'physnet1', host_name)
|
||||||
|
assert sum(num_agents_in) == sum(_L3Rebalance.num_routers_on_agents)
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("Test 2 _L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
for test in range(0, 50):
|
||||||
|
del num_agents_in[:]
|
||||||
|
num_agents_in = [random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150),
|
||||||
|
random.randint(0, 150)]
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("host_name = %s, num_agents_in = %s" % (host_name, num_agents_in))
|
||||||
|
self.run_rebalance(num_agents_in, 'physnet3', host_name)
|
||||||
|
assert sum(num_agents_in) == sum(_L3Rebalance.num_routers_on_agents)
|
||||||
|
if DEBUG_PRINTING:
|
||||||
|
print("Test 2 _L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
# print("Test 2 _L3Rebalance.router_ids_per_agent= %s" % _L3Rebalance.router_ids_per_agent)
|
||||||
|
|
||||||
|
def test_rebalance_new_host(self):
|
||||||
|
_L3Rebalance.router_diff_threshold = 1
|
||||||
|
self.rebalance(None)
|
||||||
|
_L3Rebalance.router_diff_threshold = 2
|
||||||
|
self.rebalance(None)
|
||||||
|
_L3Rebalance.router_diff_threshold = 3
|
||||||
|
self.rebalance(None)
|
||||||
|
_L3Rebalance.router_diff_threshold = 4
|
||||||
|
self.rebalance(None)
|
||||||
|
_L3Rebalance.router_diff_threshold = 5
|
||||||
|
self.rebalance(None)
|
||||||
|
_L3Rebalance.router_diff_threshold = 6
|
||||||
|
self.rebalance(None)
|
||||||
|
|
||||||
|
def test_rebalance_down_host(self):
|
||||||
|
_L3Rebalance.router_diff_threshold = 1
|
||||||
|
self.rebalance('compute-0')
|
||||||
|
_L3Rebalance.router_diff_threshold = 2
|
||||||
|
self.rebalance('compute-0')
|
||||||
|
_L3Rebalance.router_diff_threshold = 3
|
||||||
|
self.rebalance('compute-0')
|
||||||
|
_L3Rebalance.router_diff_threshold = 4
|
||||||
|
self.rebalance('compute-0')
|
||||||
|
_L3Rebalance.router_diff_threshold = 5
|
||||||
|
self.rebalance('compute-0')
|
||||||
|
_L3Rebalance.router_diff_threshold = 6
|
||||||
|
self.rebalance('compute-0')
|
@ -45,6 +45,11 @@ max_evacuate_local_image_disk_gb=60
|
|||||||
|
|
||||||
[sw-mgmt-configuration]
|
[sw-mgmt-configuration]
|
||||||
|
|
||||||
|
[l3agent-rebalance]
|
||||||
|
timer_interval=1
|
||||||
|
router_diff_threshold=3
|
||||||
|
hold_off=10
|
||||||
|
|
||||||
[vim]
|
[vim]
|
||||||
rpc_host=127.0.0.1
|
rpc_host=127.0.0.1
|
||||||
rpc_port=4343
|
rpc_port=4343
|
||||||
|
@ -150,6 +150,7 @@ nfv_vim.strategy.phase: debug.level.info
|
|||||||
nfv_vim.strategy.stage: debug.level.info
|
nfv_vim.strategy.stage: debug.level.info
|
||||||
nfv_vim.strategy.step: debug.level.info
|
nfv_vim.strategy.step: debug.level.info
|
||||||
nfv_vim.dor: debug.level.verbose
|
nfv_vim.dor: debug.level.verbose
|
||||||
|
nfv_vim.l3_rebalance: debug.level.info
|
||||||
nfv_vim: debug.level.verbose
|
nfv_vim: debug.level.verbose
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
nfv_vim.api.openstack: debug.level.verbose
|
nfv_vim.api.openstack: debug.level.verbose
|
||||||
|
@ -12,6 +12,7 @@ from nfv_common import timers
|
|||||||
|
|
||||||
from nfv_common.helpers import coroutine
|
from nfv_common.helpers import coroutine
|
||||||
|
|
||||||
|
from nfv_vim import network_rebalance
|
||||||
from nfv_vim import nfvi
|
from nfv_vim import nfvi
|
||||||
|
|
||||||
from nfv_vim.host_fsm._host_defs import HOST_EVENT
|
from nfv_vim.host_fsm._host_defs import HOST_EVENT
|
||||||
@ -178,6 +179,8 @@ class NotifyHostDisabledTaskWork(state_machine.StateTaskWork):
|
|||||||
"""
|
"""
|
||||||
Callback for notify host disabled
|
Callback for notify host disabled
|
||||||
"""
|
"""
|
||||||
|
from nfv_vim import objects
|
||||||
|
|
||||||
response = (yield)
|
response = (yield)
|
||||||
if self.task is not None:
|
if self.task is not None:
|
||||||
DLOG.verbose("Notify-Host-Disabled callback for %s, response=%s."
|
DLOG.verbose("Notify-Host-Disabled callback for %s, response=%s."
|
||||||
@ -186,6 +189,11 @@ class NotifyHostDisabledTaskWork(state_machine.StateTaskWork):
|
|||||||
self.task.task_work_complete(
|
self.task.task_work_complete(
|
||||||
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
|
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
|
||||||
empty_reason)
|
empty_reason)
|
||||||
|
if (self._host.kubernetes_configured and
|
||||||
|
(self._service == objects.HOST_SERVICES.NETWORK)):
|
||||||
|
DLOG.info("Queueing rebalance for host %s disable" % self._host.name)
|
||||||
|
network_rebalance.add_rebalance_work(self._host.name, True)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if self.force_pass:
|
if self.force_pass:
|
||||||
DLOG.info("Notify-Host-Disabled callback for %s, "
|
DLOG.info("Notify-Host-Disabled callback for %s, "
|
||||||
@ -193,6 +201,11 @@ class NotifyHostDisabledTaskWork(state_machine.StateTaskWork):
|
|||||||
self.task.task_work_complete(
|
self.task.task_work_complete(
|
||||||
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
|
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
|
||||||
empty_reason)
|
empty_reason)
|
||||||
|
if (self._host.kubernetes_configured and
|
||||||
|
(self._service == objects.HOST_SERVICES.NETWORK)):
|
||||||
|
DLOG.info("Queueing rebalance for host %s disable" % self._host.name)
|
||||||
|
network_rebalance.add_rebalance_work(self._host.name, True)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self.task.task_work_complete(
|
self.task.task_work_complete(
|
||||||
state_machine.STATE_TASK_WORK_RESULT.FAILED,
|
state_machine.STATE_TASK_WORK_RESULT.FAILED,
|
||||||
@ -830,6 +843,11 @@ class EnableHostServicesTaskWork(state_machine.StateTaskWork):
|
|||||||
self.task.task_work_complete(
|
self.task.task_work_complete(
|
||||||
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
|
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
|
||||||
empty_reason)
|
empty_reason)
|
||||||
|
|
||||||
|
if (self._host.kubernetes_configured and
|
||||||
|
(self._service == objects.HOST_SERVICES.NETWORK)):
|
||||||
|
DLOG.info("Queueing rebalance for host %s enable" % self._host.name)
|
||||||
|
network_rebalance.add_rebalance_work(self._host.name, False)
|
||||||
else:
|
else:
|
||||||
if self.force_pass:
|
if self.force_pass:
|
||||||
DLOG.info("Enable-Host-Services callback for %s, "
|
DLOG.info("Enable-Host-Services callback for %s, "
|
||||||
@ -837,6 +855,11 @@ class EnableHostServicesTaskWork(state_machine.StateTaskWork):
|
|||||||
self.task.task_work_complete(
|
self.task.task_work_complete(
|
||||||
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
|
state_machine.STATE_TASK_WORK_RESULT.SUCCESS,
|
||||||
empty_reason)
|
empty_reason)
|
||||||
|
if (self._host.kubernetes_configured and
|
||||||
|
(self._service == objects.HOST_SERVICES.NETWORK)):
|
||||||
|
DLOG.info("Queueing rebalance for host %s enable" % self._host.name)
|
||||||
|
network_rebalance.add_rebalance_work(self._host.name, False)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self._host.host_services_update(
|
self._host.host_services_update(
|
||||||
self._service,
|
self._service,
|
||||||
|
8
nfv/nfv-vim/nfv_vim/network_rebalance/__init__.py
Normal file
8
nfv/nfv-vim/nfv_vim/network_rebalance/__init__.py
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2015-2019 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
from nfv_vim.network_rebalance._network_rebalance import add_rebalance_work # noqa: F401
|
||||||
|
from nfv_vim.network_rebalance._network_rebalance import nr_finalize # noqa: F401
|
||||||
|
from nfv_vim.network_rebalance._network_rebalance import nr_initialize # noqa: F401
|
990
nfv/nfv-vim/nfv_vim/network_rebalance/_network_rebalance.py
Normal file
990
nfv/nfv-vim/nfv_vim/network_rebalance/_network_rebalance.py
Normal file
@ -0,0 +1,990 @@
|
|||||||
|
#
|
||||||
|
# Copyright (c) 2015-2016 Wind River Systems, Inc.
|
||||||
|
#
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
import six
|
||||||
|
|
||||||
|
from nfv_common.helpers import Constant
|
||||||
|
from nfv_common.helpers import Constants
|
||||||
|
from nfv_common.helpers import coroutine
|
||||||
|
from nfv_common.helpers import Singleton
|
||||||
|
|
||||||
|
from nfv_common import config
|
||||||
|
from nfv_common import debug
|
||||||
|
from nfv_common import timers
|
||||||
|
|
||||||
|
from nfv_vim import nfvi
|
||||||
|
|
||||||
|
DLOG = debug.debug_get_logger('nfv_vim.l3_rebalance')
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(Singleton)
|
||||||
|
class AgentType(Constants):
|
||||||
|
"""
|
||||||
|
AGENT TYPE Constants
|
||||||
|
"""
|
||||||
|
L3 = Constant('L3 agent')
|
||||||
|
DHCP = Constant('DHCP agent')
|
||||||
|
|
||||||
|
AGENT_TYPE = AgentType()
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(Singleton)
|
||||||
|
class L3RebalanceState(Constants):
|
||||||
|
"""
|
||||||
|
L3 REBALANCE STATE Constants
|
||||||
|
"""
|
||||||
|
GET_NETWORK_AGENTS = Constant('GET_NETWORK_AGENTS')
|
||||||
|
GET_ROUTERS_HOSTED_ON_AGENT = Constant('GET_ROUTERS_HOSTED_ON_AGENT')
|
||||||
|
GET_ROUTER_PORT_NETWORKS = Constant('GET_ROUTER_PORT_NETWORKS')
|
||||||
|
GET_PHYSICAL_NETWORK_FROM_NETWORKS = Constant('GET_PHYSICAL_NETWORK_FROM_NETWORKS')
|
||||||
|
GET_HOST_PHYSICAL_NETWORKS = Constant('GET_HOST_PHYSICAL_NETWORKS')
|
||||||
|
RESCHEDULE_DOWN_AGENT = Constant('RESCHEDULE_DOWN_AGENT')
|
||||||
|
RESCHEDULE_NEW_AGENT = Constant('RESCHEDULE_NEW_AGENT')
|
||||||
|
HOLD_OFF = Constant('HOLD_OFF')
|
||||||
|
DONE = Constant('DONE')
|
||||||
|
|
||||||
|
L3_REBALANCE_STATE = L3RebalanceState()
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(Singleton)
|
||||||
|
class L3AgentRebalance(object):
|
||||||
|
def __init__(self):
|
||||||
|
# Our state.
|
||||||
|
self.state = L3_REBALANCE_STATE.DONE
|
||||||
|
# If rebalance occurring due to down agent,
|
||||||
|
# entry zero in below list will be for the down agent
|
||||||
|
# list of dictionaries of agent information.
|
||||||
|
self.l3agents = list()
|
||||||
|
# Dictionary based on agent_id of router ids hosted
|
||||||
|
# on an agent.
|
||||||
|
self.router_ids_per_agent = dict()
|
||||||
|
# For keeping track of routers that cant be schedule.
|
||||||
|
# Useful for logging and test.
|
||||||
|
self.router_ids_per_agent_cant_schedule = dict()
|
||||||
|
# Dictionary based on router_id of list of physical
|
||||||
|
# networks of ports on router.
|
||||||
|
self.networks_per_router = dict()
|
||||||
|
# For determining whether state machine work is finished
|
||||||
|
# in a tick and the state machine can progress.
|
||||||
|
self.state_machine_in_progress = False
|
||||||
|
# Indexes into the various data structures.
|
||||||
|
self.l3agent_idx = 0
|
||||||
|
self.num_l3agents = 0
|
||||||
|
self.router_idx = 0
|
||||||
|
self.num_routers = 0
|
||||||
|
self.net_idx = 0
|
||||||
|
# If rebalance occurring due to down agent,
|
||||||
|
# entry zero in below list will be for the down agent
|
||||||
|
self.num_routers_on_agents = list()
|
||||||
|
# The queue of work that is to be processed
|
||||||
|
self.work_queue = list()
|
||||||
|
# The difference between maximum routers on an agent
|
||||||
|
# and minimum routers on an agent we are trying to achieve.
|
||||||
|
self.router_diff_threshold = 1
|
||||||
|
# List of (index, number of routers on agents) tuples
|
||||||
|
# for all agents.
|
||||||
|
self.agent_list = list()
|
||||||
|
# For rebalance, below will be None, as we don't actually
|
||||||
|
# care about the name of a new host who's agent has just
|
||||||
|
# come up. For agent down, it will be the name of the host
|
||||||
|
# going down.
|
||||||
|
self.working_host = None
|
||||||
|
# Number of ticks to wait after seeing work to begin work.
|
||||||
|
self.hold_off = 3
|
||||||
|
# Current number of ticks waiting to begin work.
|
||||||
|
self.current_hold_off_count = 0
|
||||||
|
# queues that maintain host names of hosts coming up and going down.
|
||||||
|
self.host_up_queue = list()
|
||||||
|
self.host_down_queue = list()
|
||||||
|
|
||||||
|
def reinit(self):
|
||||||
|
self.num_l3agents = 0
|
||||||
|
self.l3agent_idx = 0
|
||||||
|
del self.l3agents[:]
|
||||||
|
self.router_ids_per_agent = {}
|
||||||
|
self.router_ids_per_agent_cant_schedule = {}
|
||||||
|
self.networks_per_router = {}
|
||||||
|
del self.num_routers_on_agents[:]
|
||||||
|
|
||||||
|
def add_agent(self, agent_id):
|
||||||
|
self.router_ids_per_agent[agent_id] = list()
|
||||||
|
self.num_l3agents += 1
|
||||||
|
|
||||||
|
def get_current_l3agent(self):
|
||||||
|
agent_id = self.l3agents[self.l3agent_idx]['id']
|
||||||
|
host_name = self.l3agents[self.l3agent_idx]['host']
|
||||||
|
return agent_id, host_name
|
||||||
|
|
||||||
|
def update_current_l3agent(self, key, value):
|
||||||
|
self.l3agents[self.l3agent_idx][key] = value
|
||||||
|
|
||||||
|
def add_router_to_agent(self, agent_id, router_id):
|
||||||
|
self.router_ids_per_agent[agent_id].append(router_id)
|
||||||
|
|
||||||
|
def agent_routers_done(self):
|
||||||
|
agent_id = self.l3agents[self.l3agent_idx]['id']
|
||||||
|
_L3Rebalance.num_routers_on_agents.append(
|
||||||
|
len(_L3Rebalance.router_ids_per_agent[agent_id]))
|
||||||
|
self.l3agent_idx += 1
|
||||||
|
return self.l3agent_idx == self.num_l3agents
|
||||||
|
|
||||||
|
def add_network_to_router(self, router_to_resched, network_id):
|
||||||
|
self.networks_per_router[router_to_resched].append(network_id)
|
||||||
|
|
||||||
|
def router_ports_done(self):
|
||||||
|
self.router_idx += 1
|
||||||
|
DLOG.debug("router_idx = %s, l3agent_idx = %s, num_routers= %s" %
|
||||||
|
(self.router_idx, self.l3agent_idx, self.num_routers))
|
||||||
|
|
||||||
|
if self.router_idx >= self.num_routers:
|
||||||
|
# We have router port info for all routers on this agent
|
||||||
|
# move on to next one.
|
||||||
|
self.router_idx = 0
|
||||||
|
self.l3agent_idx += 1
|
||||||
|
if (((self.working_host is not None) and (self.l3agent_idx == 1)) or
|
||||||
|
(self.l3agent_idx == self.num_l3agents)):
|
||||||
|
# We have router port info for all routers on all agents
|
||||||
|
# that we care about. Get the Physical Network info for these.
|
||||||
|
return True
|
||||||
|
|
||||||
|
DLOG.debug("self.networks_per_router = %s" % self.networks_per_router)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_current_working_router(self):
|
||||||
|
agent_routers = self.router_ids_per_agent[self.l3agents[self.l3agent_idx]['id']]
|
||||||
|
self.num_routers = len(agent_routers)
|
||||||
|
if self.num_routers > 0:
|
||||||
|
working_router = agent_routers[self.router_idx]
|
||||||
|
self.networks_per_router[working_router] = list()
|
||||||
|
return working_router
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_current_working_network(self):
|
||||||
|
agent_routers = self.router_ids_per_agent[self.l3agents[self.l3agent_idx]['id']]
|
||||||
|
self.num_routers = len(agent_routers)
|
||||||
|
if self.num_routers > 0:
|
||||||
|
working_router = agent_routers[self.router_idx]
|
||||||
|
working_network = self.networks_per_router[working_router][self.net_idx]
|
||||||
|
return working_network
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def current_working_network_advance_agent(self):
|
||||||
|
self.l3agent_idx += 1
|
||||||
|
|
||||||
|
def get_host_id_of_current_l3agent(self):
|
||||||
|
return self.l3agents[self.l3agent_idx]['host_uuid']
|
||||||
|
|
||||||
|
def update_network(self, physical_network):
|
||||||
|
"""
|
||||||
|
Overwrite the network_id of the stored network with the input
|
||||||
|
physical_network.
|
||||||
|
Returns True if there are no more networks to gather, False otherwise
|
||||||
|
"""
|
||||||
|
agent_routers = self.router_ids_per_agent[self.l3agents[self.l3agent_idx]['id']]
|
||||||
|
working_router = agent_routers[self.router_idx]
|
||||||
|
# overwrite the netid with that of the physical network.
|
||||||
|
self.networks_per_router[working_router][self.net_idx] = physical_network
|
||||||
|
self.net_idx += 1
|
||||||
|
|
||||||
|
if self.net_idx == len(self.networks_per_router[working_router]):
|
||||||
|
self.net_idx = 0
|
||||||
|
self.router_idx += 1
|
||||||
|
if self.router_idx >= len(agent_routers):
|
||||||
|
self.router_idx = 0
|
||||||
|
self.l3agent_idx += 1
|
||||||
|
if (self.l3agent_idx >= self.num_l3agents) or self.get_working_host():
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Iterate until we find an agent with routers, or until we've run out of agents
|
||||||
|
while (len(self.router_ids_per_agent[self.l3agents[self.l3agent_idx]['id']]) == 0):
|
||||||
|
self.l3agent_idx += 1
|
||||||
|
if (self.l3agent_idx >= self.num_l3agents):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def update_datanetworks(self, datanetwork_name):
|
||||||
|
if not self.l3agents[self.l3agent_idx].get('datanets', False):
|
||||||
|
self.l3agents[self.l3agent_idx]['datanets'] = list()
|
||||||
|
self.l3agents[self.l3agent_idx]['datanets'].append(datanetwork_name)
|
||||||
|
|
||||||
|
def datanetworks_done(self):
|
||||||
|
self.l3agent_idx += 1
|
||||||
|
if self.l3agent_idx == self.num_l3agents:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_next_router_to_move(self):
|
||||||
|
# If there are any routers on the down agent, then
|
||||||
|
# return the next router and its networks.
|
||||||
|
agent_id = self.get_down_agent_id()
|
||||||
|
if len(self.router_ids_per_agent[agent_id]) > 0:
|
||||||
|
router_to_move = self.router_ids_per_agent[agent_id][0]
|
||||||
|
router_to_move_physical_networks = self.networks_per_router[router_to_move]
|
||||||
|
return router_to_move, router_to_move_physical_networks
|
||||||
|
else:
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
def find_router_with_physical_networks(self,
|
||||||
|
agent_idx,
|
||||||
|
physical_networks):
|
||||||
|
agent_routers = self.router_ids_per_agent[self.l3agents[agent_idx]['id']]
|
||||||
|
|
||||||
|
all_networks_found = False
|
||||||
|
router_id = None
|
||||||
|
for router_id in agent_routers:
|
||||||
|
|
||||||
|
router_networks = self.networks_per_router[router_id]
|
||||||
|
DLOG.debug("router_networks = %s, physical_networks = %s" % (router_networks, physical_networks))
|
||||||
|
all_networks_found = True
|
||||||
|
for network in router_networks:
|
||||||
|
if network not in physical_networks:
|
||||||
|
all_networks_found = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if all_networks_found:
|
||||||
|
break
|
||||||
|
|
||||||
|
if all_networks_found:
|
||||||
|
return router_id
|
||||||
|
else:
|
||||||
|
# we couldn't find a router with networks matching the requirements
|
||||||
|
return None
|
||||||
|
|
||||||
|
def populate_l3agents(self, result_data):
|
||||||
|
for agent in result_data:
|
||||||
|
if agent['agent_type'] == AGENT_TYPE.L3:
|
||||||
|
agent_info_dict = dict()
|
||||||
|
agent_info_dict['host'] = agent['host']
|
||||||
|
agent_info_dict['id'] = agent['id']
|
||||||
|
# For simplicity and easy of access, place the down host
|
||||||
|
# (if applicable) first in the list.
|
||||||
|
if agent['host'] == self.get_working_host():
|
||||||
|
self.l3agents.insert(0, agent_info_dict)
|
||||||
|
elif agent['alive'] and agent['admin_state_up']:
|
||||||
|
self.l3agents.append(agent_info_dict)
|
||||||
|
self.add_agent(agent['id'])
|
||||||
|
|
||||||
|
return len(self.l3agents)
|
||||||
|
|
||||||
|
def get_down_agent_id(self):
|
||||||
|
return self.l3agents[0]['id']
|
||||||
|
|
||||||
|
def get_agent_id_from_index(self, agent_index):
|
||||||
|
return self.l3agents[agent_index]['id']
|
||||||
|
|
||||||
|
def get_num_routers_on_agents(self):
|
||||||
|
return self.num_routers_on_agents
|
||||||
|
|
||||||
|
def get_host_physical_networks(self, agent_index):
|
||||||
|
return self.l3agents[agent_index]['datanets']
|
||||||
|
|
||||||
|
def move_agent_router(self, router_to_move, source_agent, target_agent):
|
||||||
|
target_agent_id = _L3Rebalance.get_agent_id_from_index(target_agent)
|
||||||
|
source_agent_id = _L3Rebalance.get_agent_id_from_index(source_agent)
|
||||||
|
|
||||||
|
_L3Rebalance.num_routers_on_agents[target_agent] += 1
|
||||||
|
_L3Rebalance.num_routers_on_agents[source_agent] -= 1
|
||||||
|
|
||||||
|
self.router_ids_per_agent[source_agent_id].remove(router_to_move)
|
||||||
|
self.router_ids_per_agent[target_agent_id].append(router_to_move)
|
||||||
|
|
||||||
|
def move_agent_router_to_cant_schedule(self, router_to_move, agent_index):
|
||||||
|
source_agent_id = _L3Rebalance.get_agent_id_from_index(agent_index)
|
||||||
|
_L3Rebalance.num_routers_on_agents[agent_index] -= 1
|
||||||
|
|
||||||
|
self.router_ids_per_agent[source_agent_id].remove(router_to_move)
|
||||||
|
if self.router_ids_per_agent_cant_schedule.get(source_agent_id, None) is None:
|
||||||
|
self.router_ids_per_agent_cant_schedule[source_agent_id] = list()
|
||||||
|
|
||||||
|
self.router_ids_per_agent_cant_schedule[source_agent_id].append(router_to_move)
|
||||||
|
|
||||||
|
def get_working_host(self):
|
||||||
|
# working_host will be None if we are doing a rebalance
|
||||||
|
# due to a new l3 agent becoming available.
|
||||||
|
return self.working_host
|
||||||
|
|
||||||
|
def set_working_host(self, host_name=None):
|
||||||
|
# working_host will be None if we are doing a rebalance
|
||||||
|
# due to a new l3 agent becoming available.
|
||||||
|
self.working_host = host_name
|
||||||
|
|
||||||
|
def routers_are_balanced(self):
|
||||||
|
|
||||||
|
possible_agent_targets = range(0, len(self.num_routers_on_agents))
|
||||||
|
|
||||||
|
# find the agent with the least amount of routers.
|
||||||
|
agent_with_least_routers = min(possible_agent_targets,
|
||||||
|
key=self.num_routers_on_agents.__getitem__)
|
||||||
|
min_routers = self.num_routers_on_agents[agent_with_least_routers]
|
||||||
|
|
||||||
|
agent_with_most_routers = max(possible_agent_targets,
|
||||||
|
key=self.num_routers_on_agents.__getitem__)
|
||||||
|
max_routers = self.num_routers_on_agents[agent_with_most_routers]
|
||||||
|
|
||||||
|
if ((max_routers - min_routers) <= _L3Rebalance.router_diff_threshold):
|
||||||
|
DLOG.debug("max:%s - min:%s <= DIFF, balanced" % (max_routers, min_routers))
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def no_routers_on_down_host(self):
|
||||||
|
return self.num_routers_on_agents[0] == 0
|
||||||
|
|
||||||
|
def remove_agent(self, agent_with_most_routers):
|
||||||
|
_L3Rebalance.num_routers_on_agents.remove(agent_with_most_routers)
|
||||||
|
_L3Rebalance.l3agents.remove(agent_with_most_routers)
|
||||||
|
|
||||||
|
def set_state(self, state):
|
||||||
|
# Set up state for next tick.
|
||||||
|
self.state = state
|
||||||
|
self.router_idx = 0
|
||||||
|
self.l3agent_idx = 0
|
||||||
|
self.net_idx = 0
|
||||||
|
self.state_machine_in_progress = False
|
||||||
|
if ((state == L3_REBALANCE_STATE.RESCHEDULE_DOWN_AGENT) or
|
||||||
|
(state == L3_REBALANCE_STATE.RESCHEDULE_NEW_AGENT)):
|
||||||
|
self.create_agent_list()
|
||||||
|
|
||||||
|
elif state == L3_REBALANCE_STATE.DONE:
|
||||||
|
self.debug_dump()
|
||||||
|
elif state == L3_REBALANCE_STATE.HOLD_OFF:
|
||||||
|
self.current_hold_off_count = 0
|
||||||
|
|
||||||
|
def get_state(self):
|
||||||
|
return self.state
|
||||||
|
|
||||||
|
def add_rebalance_work(self, host_name, host_is_going_down):
|
||||||
|
if host_is_going_down:
|
||||||
|
self.host_down_queue.append(host_name)
|
||||||
|
else:
|
||||||
|
self.host_up_queue.append(host_name)
|
||||||
|
|
||||||
|
def create_agent_list(self):
|
||||||
|
del self.agent_list[:]
|
||||||
|
for index, agent in enumerate(self.l3agents):
|
||||||
|
agent_list_entry = (index, self.num_routers_on_agents[index])
|
||||||
|
self.agent_list.append(agent_list_entry)
|
||||||
|
|
||||||
|
def get_min_agent_list_data(self):
|
||||||
|
agent_with_least_routers_entry = min(self.agent_list, key=lambda t: t[1])
|
||||||
|
return agent_with_least_routers_entry[0], agent_with_least_routers_entry[1]
|
||||||
|
|
||||||
|
def get_max_agent_list_data(self):
|
||||||
|
agent_with_most_routers_entry = max(self.agent_list, key=lambda t: t[1])
|
||||||
|
return agent_with_most_routers_entry[0], agent_with_most_routers_entry[1]
|
||||||
|
|
||||||
|
def get_agent_list_scheduling_info(self):
|
||||||
|
possible_agent_targets = list()
|
||||||
|
num_routers_on_agents = list()
|
||||||
|
for entry in self.agent_list:
|
||||||
|
possible_agent_targets.append(entry[0])
|
||||||
|
num_routers_on_agents.append(entry[1])
|
||||||
|
|
||||||
|
return num_routers_on_agents, possible_agent_targets
|
||||||
|
|
||||||
|
def agent_list_remove(self, agent_list_tuple):
|
||||||
|
self.agent_list.remove(agent_list_tuple)
|
||||||
|
|
||||||
|
def agent_list_increment(self, agent_index):
|
||||||
|
for idx, val in enumerate(self.agent_list):
|
||||||
|
if val[0] == agent_index:
|
||||||
|
self.agent_list[idx] = (val[0], val[1] + 1)
|
||||||
|
break
|
||||||
|
|
||||||
|
def agent_list_decrement(self, agent_index):
|
||||||
|
for idx, val in enumerate(self.agent_list):
|
||||||
|
if val[0] == agent_index:
|
||||||
|
self.agent_list[idx] = (val[0], val[1] - 1)
|
||||||
|
break
|
||||||
|
|
||||||
|
def hold_off_is_done(self):
|
||||||
|
self.current_hold_off_count += 1
|
||||||
|
return self.current_hold_off_count >= self.hold_off
|
||||||
|
|
||||||
|
def debug_dump(self):
|
||||||
|
DLOG.debug("_L3Rebalance.l3agents = %s" % _L3Rebalance.l3agents)
|
||||||
|
DLOG.debug("_L3Rebalance.router_ids_per_agent= %s" % _L3Rebalance.router_ids_per_agent)
|
||||||
|
DLOG.debug("_L3Rebalance.networks_per_router= %s" % _L3Rebalance.networks_per_router)
|
||||||
|
DLOG.debug("_L3Rebalance.state_machine_in_progress= %s" % _L3Rebalance.state_machine_in_progress)
|
||||||
|
DLOG.debug("_L3Rebalance.l3agent_idx= %s" % _L3Rebalance.l3agent_idx)
|
||||||
|
DLOG.debug("_L3Rebalance.num_l3agents= %s" % _L3Rebalance.num_l3agents)
|
||||||
|
DLOG.debug("_L3Rebalance.router_idx= %s" % _L3Rebalance.router_idx)
|
||||||
|
DLOG.debug("_L3Rebalance.num_routers_on_agents= %s" % _L3Rebalance.num_routers_on_agents)
|
||||||
|
|
||||||
|
|
||||||
|
_L3Rebalance = L3AgentRebalance()
|
||||||
|
|
||||||
|
|
||||||
|
def add_rebalance_work(host_name, host_is_going_down):
|
||||||
|
"""
|
||||||
|
API for external use to launch a rebalance operation.
|
||||||
|
host_is_going_down is boolean indicating if the host is
|
||||||
|
coming up (rebalance routers, moving some to newly available host),
|
||||||
|
or going down (move routers off this host, distributing amongst rest)
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
_L3Rebalance.add_rebalance_work(host_name, host_is_going_down)
|
||||||
|
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _add_router_to_agent_callback():
|
||||||
|
"""
|
||||||
|
Add router to agent callback
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
response = (yield)
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
DLOG.debug("_add_router_to_agent_callback, response = %s" % response)
|
||||||
|
if not response['completed']:
|
||||||
|
# Nothing we can really do except log this and resume our state machine..
|
||||||
|
DLOG.warn("Unable to add router to l3 agent, response = %s" % response)
|
||||||
|
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _remove_router_from_agent_callback(to_agent_id, router_id):
|
||||||
|
"""
|
||||||
|
Remove router from agent callback
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
response = (yield)
|
||||||
|
|
||||||
|
DLOG.debug("_remove_router_from_agent_callback , response = %s" % response)
|
||||||
|
if response['completed']:
|
||||||
|
# After successfully detaching router from agent, attach
|
||||||
|
# to target agent.
|
||||||
|
nfvi.nfvi_add_router_to_agent(to_agent_id, router_id, _add_router_to_agent_callback())
|
||||||
|
else:
|
||||||
|
# Couldn't detach the router, no sense trying to attach.
|
||||||
|
# Just resume state machine.
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
DLOG.warn("Unable to remove router from l3 agent, response = %s" % response)
|
||||||
|
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _get_datanetworks_callback(host_id):
|
||||||
|
"""
|
||||||
|
Get data networks callback
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
response = (yield)
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
DLOG.debug("_get_datanetworks, response = %s" % response)
|
||||||
|
if response['completed']:
|
||||||
|
result_data = response.get('result-data', None)
|
||||||
|
for data_net in result_data:
|
||||||
|
_L3Rebalance.update_datanetworks(data_net['datanetwork_name'])
|
||||||
|
|
||||||
|
if _L3Rebalance.datanetworks_done():
|
||||||
|
# Make the choice of which state to enter here
|
||||||
|
if _L3Rebalance.get_working_host() is not None:
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.RESCHEDULE_DOWN_AGENT)
|
||||||
|
else:
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.RESCHEDULE_NEW_AGENT)
|
||||||
|
else:
|
||||||
|
DLOG.error("Unable to retrieve data networks for host: %s" % host_id)
|
||||||
|
# TODO(KSMITH) Is this error recoverable? For now, abort.
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_host_data_networks():
|
||||||
|
"""
|
||||||
|
Get the physical networks supported by a host.
|
||||||
|
"""
|
||||||
|
|
||||||
|
host_id = _L3Rebalance.get_host_id_of_current_l3agent()
|
||||||
|
nfvi.nfvi_get_datanetworks(host_id, _get_datanetworks_callback(host_id))
|
||||||
|
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _get_physical_network_callback(network_id):
|
||||||
|
"""
|
||||||
|
Get Physical Network callback
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
response = (yield)
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
DLOG.debug("_get_physical_network_callback, response = %s" % response)
|
||||||
|
if response['completed']:
|
||||||
|
result_data = response.get('result-data', None)
|
||||||
|
if _L3Rebalance.update_network(result_data['provider:physical_network']):
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.GET_HOST_PHYSICAL_NETWORKS)
|
||||||
|
else:
|
||||||
|
DLOG.error("Unable to get physical network for network: %s" % network_id)
|
||||||
|
# TODO(KSMITH) Is this error recoverable? For now, abort.
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_physical_networks():
|
||||||
|
"""
|
||||||
|
Get the physical network corresponding to a network.
|
||||||
|
"""
|
||||||
|
|
||||||
|
network_id = _L3Rebalance.get_current_working_network()
|
||||||
|
DLOG.debug("Current working network: %s" % network_id)
|
||||||
|
if network_id is not None:
|
||||||
|
nfvi.nfvi_get_physical_network(network_id, _get_physical_network_callback(network_id))
|
||||||
|
else:
|
||||||
|
# We get here if there are no routers on this agent,
|
||||||
|
# Stay in same state, but advance to next agent
|
||||||
|
_L3Rebalance.current_working_network_advance_agent()
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _get_router_ports_callback(router):
|
||||||
|
"""
|
||||||
|
Get Router Ports callback. Save the network_id for each port attached
|
||||||
|
to the router.
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
response = (yield)
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
DLOG.debug("_get_router_ports_callback, response = %s" % response)
|
||||||
|
if response['completed']:
|
||||||
|
result_data = response.get('result-data', None)
|
||||||
|
for port in result_data['ports']:
|
||||||
|
network_id = port['network_id']
|
||||||
|
_L3Rebalance.add_network_to_router(router, network_id)
|
||||||
|
|
||||||
|
if _L3Rebalance.router_ports_done():
|
||||||
|
# we're done getting routers for this agent.
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.GET_PHYSICAL_NETWORK_FROM_NETWORKS)
|
||||||
|
|
||||||
|
DLOG.debug("_L3Rebalance.networks_per_router = %s" % _L3Rebalance.networks_per_router)
|
||||||
|
else:
|
||||||
|
DLOG.error("Unable to get ports for router: %s" % router)
|
||||||
|
# TODO(KSMITH) Is this error recoverable? For now, abort.
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_router_port_networks():
|
||||||
|
"""
|
||||||
|
For each router, look at all the ports and find the
|
||||||
|
underlying physical network. Even though pagination is supported,
|
||||||
|
do not worry about it as the assumption is that there will be a
|
||||||
|
relatively small number of ports on the router.
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
router = _L3Rebalance.get_current_working_router()
|
||||||
|
|
||||||
|
if router is not None:
|
||||||
|
nfvi.nfvi_get_router_ports(router, _get_router_ports_callback(router))
|
||||||
|
elif _L3Rebalance.router_ports_done():
|
||||||
|
# we're done getting routers port networks,
|
||||||
|
# advance to next state
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.GET_PHYSICAL_NETWORK_FROM_NETWORKS)
|
||||||
|
else:
|
||||||
|
# We get here if there are no routers on this agent,
|
||||||
|
# Stay in same state, but advance to next agent
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _get_agent_routers_callback(agent_id):
|
||||||
|
"""
|
||||||
|
Get Agent Routers callback
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
response = (yield)
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
DLOG.debug("_get_agent_routers_callback, response = %s" % response)
|
||||||
|
if response['completed']:
|
||||||
|
|
||||||
|
result_data = response.get('result-data', None)
|
||||||
|
for router in result_data:
|
||||||
|
_L3Rebalance.add_router_to_agent(agent_id, router['id'])
|
||||||
|
|
||||||
|
DLOG.debug("_L3Rebalance.l3agent_idx = %s, _L3Rebalance.num_l3agents = %s" %
|
||||||
|
(_L3Rebalance.l3agent_idx, _L3Rebalance.num_l3agents))
|
||||||
|
|
||||||
|
if _L3Rebalance.agent_routers_done():
|
||||||
|
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.GET_ROUTER_PORT_NETWORKS)
|
||||||
|
|
||||||
|
# Do this check here to save us from going through the rest
|
||||||
|
# of the state machine
|
||||||
|
if _L3Rebalance.get_working_host() is None:
|
||||||
|
if _L3Rebalance.routers_are_balanced():
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
return
|
||||||
|
|
||||||
|
if _L3Rebalance.get_working_host() is not None:
|
||||||
|
if _L3Rebalance.no_routers_on_down_host():
|
||||||
|
# Check to see if there are no routers on the
|
||||||
|
# down host in the first place.
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.error("Could not get routers on agent: %s" % agent_id)
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_routers_on_agents():
|
||||||
|
"""
|
||||||
|
Get Routers hosted by an L3 Agent
|
||||||
|
Note paging is not supported by the l3-agent api.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from nfv_vim import tables
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
# Agent of interest is first in the list.
|
||||||
|
# In the case of an agent going down, this will be important
|
||||||
|
|
||||||
|
agent_id, host_name = _L3Rebalance.get_current_l3agent()
|
||||||
|
|
||||||
|
host_table = tables.tables_get_host_table()
|
||||||
|
host = host_table.get(host_name, None)
|
||||||
|
|
||||||
|
if host is not None:
|
||||||
|
_L3Rebalance.update_current_l3agent('host_uuid', host.uuid)
|
||||||
|
else:
|
||||||
|
DLOG.error("Cannot find rebalance host: %s" % host_name)
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
nfvi.nfvi_get_agent_routers(agent_id, _get_agent_routers_callback(agent_id))
|
||||||
|
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _get_network_agents_callback():
|
||||||
|
"""
|
||||||
|
Get Network Agents callback
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
response = (yield)
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
DLOG.debug("_get_network_agents_callback, response = %s" % response)
|
||||||
|
if response['completed']:
|
||||||
|
result_data = response.get('result-data', None)
|
||||||
|
|
||||||
|
num_agents = _L3Rebalance.populate_l3agents(result_data)
|
||||||
|
|
||||||
|
if num_agents < 2:
|
||||||
|
# no sense doing anything if less than 2 agents
|
||||||
|
DLOG.debug("Less than 2 l3agents, no rebalancing required")
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
else:
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.GET_ROUTERS_HOSTED_ON_AGENT)
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.error("Failed to get network agents, aborting l3 agent rebalance")
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_network_agents():
|
||||||
|
"""
|
||||||
|
Get Network Agents
|
||||||
|
Note paging is not supported for getting network agents.
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
nfvi.nfvi_get_network_agents(_get_network_agents_callback())
|
||||||
|
|
||||||
|
|
||||||
|
def _reschedule_down_agent():
|
||||||
|
"""
|
||||||
|
Reschedule down agent
|
||||||
|
"""
|
||||||
|
|
||||||
|
# For each Router on the down agent, schedule it to the host with the
|
||||||
|
# least amount of routers that also hosts the required physical networks.
|
||||||
|
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
found_router_to_move = False
|
||||||
|
router_to_move = ''
|
||||||
|
|
||||||
|
num_routers_on_agents, possible_agent_targets = \
|
||||||
|
_L3Rebalance.get_agent_list_scheduling_info()
|
||||||
|
|
||||||
|
# Remove the agent going down from consideration.
|
||||||
|
possible_agent_targets.pop(0)
|
||||||
|
num_routers_on_agents.pop(0)
|
||||||
|
|
||||||
|
while not found_router_to_move:
|
||||||
|
|
||||||
|
router_to_move, router_to_move_physical_networks = \
|
||||||
|
_L3Rebalance.get_next_router_to_move()
|
||||||
|
|
||||||
|
if router_to_move is None:
|
||||||
|
# we're done...
|
||||||
|
break
|
||||||
|
|
||||||
|
agent_with_least_routers = 0
|
||||||
|
|
||||||
|
while len(possible_agent_targets) > 0:
|
||||||
|
|
||||||
|
min_routers = min(num_routers_on_agents)
|
||||||
|
|
||||||
|
agent_with_least_routers_index = num_routers_on_agents.index(min_routers)
|
||||||
|
agent_with_least_routers = possible_agent_targets[agent_with_least_routers_index]
|
||||||
|
|
||||||
|
host_physical_networks = _L3Rebalance.get_host_physical_networks(agent_with_least_routers)
|
||||||
|
|
||||||
|
# Does the host of this agent have the needed physical networks?
|
||||||
|
target_good = True
|
||||||
|
for network in router_to_move_physical_networks:
|
||||||
|
if network not in host_physical_networks:
|
||||||
|
target_good = False
|
||||||
|
break
|
||||||
|
|
||||||
|
if not target_good:
|
||||||
|
# Check next agent/host
|
||||||
|
possible_agent_targets.pop(agent_with_least_routers_index)
|
||||||
|
num_routers_on_agents.pop(agent_with_least_routers_index)
|
||||||
|
else:
|
||||||
|
# This target agent/host is good.
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(possible_agent_targets) == 0:
|
||||||
|
_L3Rebalance.move_agent_router_to_cant_schedule(router_to_move, 0)
|
||||||
|
DLOG.debug("Unable to reschedule router, no valid target found")
|
||||||
|
found_router_to_move = False
|
||||||
|
else:
|
||||||
|
found_router_to_move = True
|
||||||
|
_L3Rebalance.move_agent_router(router_to_move, 0, agent_with_least_routers)
|
||||||
|
|
||||||
|
_L3Rebalance.agent_list_increment(agent_with_least_routers)
|
||||||
|
_L3Rebalance.agent_list_decrement(0)
|
||||||
|
|
||||||
|
source_agent_id = _L3Rebalance.get_agent_id_from_index(0)
|
||||||
|
target_agent_id = _L3Rebalance.get_agent_id_from_index(agent_with_least_routers)
|
||||||
|
DLOG.debug("Rescheduling router:%s to agent: %s" % (router_to_move, target_agent_id))
|
||||||
|
nfvi.nfvi_remove_router_from_agent(source_agent_id,
|
||||||
|
router_to_move,
|
||||||
|
_remove_router_from_agent_callback(
|
||||||
|
target_agent_id,
|
||||||
|
router_to_move))
|
||||||
|
|
||||||
|
if not found_router_to_move:
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
|
||||||
|
def _reschedule_new_agent():
|
||||||
|
"""
|
||||||
|
Reschedule for a new agent coming up.
|
||||||
|
Try to achieve a balance of routers hosted by the L3 agents.
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
agent_with_least_routers, min_routers = _L3Rebalance.get_min_agent_list_data()
|
||||||
|
agent_with_most_routers, max_routers = _L3Rebalance.get_max_agent_list_data()
|
||||||
|
|
||||||
|
if (max_routers - min_routers) <= _L3Rebalance.router_diff_threshold:
|
||||||
|
DLOG.debug("Threshold exit")
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
return
|
||||||
|
|
||||||
|
num_routers_on_agents = list()
|
||||||
|
possible_agent_targets = list()
|
||||||
|
num_routers_on_agents, possible_agent_targets = \
|
||||||
|
_L3Rebalance.get_agent_list_scheduling_info()
|
||||||
|
|
||||||
|
# Remove our current max router agent from consideration.
|
||||||
|
agent_with_most_routers_index = possible_agent_targets.index(agent_with_most_routers)
|
||||||
|
possible_agent_targets.pop(agent_with_most_routers_index)
|
||||||
|
num_routers_on_agents.pop(agent_with_most_routers_index)
|
||||||
|
|
||||||
|
while (True):
|
||||||
|
|
||||||
|
min_routers = min(num_routers_on_agents)
|
||||||
|
|
||||||
|
agent_with_least_routers_index = num_routers_on_agents.index(min_routers)
|
||||||
|
agent_with_least_routers = possible_agent_targets[agent_with_least_routers_index]
|
||||||
|
|
||||||
|
host_physical_networks = _L3Rebalance.get_host_physical_networks(
|
||||||
|
agent_with_least_routers)
|
||||||
|
|
||||||
|
# find a router on the agent with most routers that has ports
|
||||||
|
# on the physical networks of the agent with least routers.
|
||||||
|
router_to_move = _L3Rebalance.find_router_with_physical_networks(
|
||||||
|
agent_with_most_routers,
|
||||||
|
host_physical_networks)
|
||||||
|
|
||||||
|
if router_to_move is None:
|
||||||
|
# Couldn't find a match, eliminate the current least router agent
|
||||||
|
# as a candidate.
|
||||||
|
DLOG.debug("Could not find a router to move to agent %s" % agent_with_least_routers)
|
||||||
|
agent_with_least_routers_index = possible_agent_targets.index(agent_with_least_routers)
|
||||||
|
possible_agent_targets.pop(agent_with_least_routers_index)
|
||||||
|
num_routers_on_agents.pop(agent_with_least_routers_index)
|
||||||
|
|
||||||
|
if len(possible_agent_targets) == 0:
|
||||||
|
# no more agents left to try, we can't move any routers off
|
||||||
|
# the current max router agent. Remove it from consideration.
|
||||||
|
DLOG.debug("No more agents to try for max router agent")
|
||||||
|
|
||||||
|
_L3Rebalance.agent_list_remove((agent_with_most_routers, max_routers))
|
||||||
|
# keep same state so we will come back, clear the below flag as no callback
|
||||||
|
# will do it for us.
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
return
|
||||||
|
|
||||||
|
else:
|
||||||
|
# before we move this router, it is possible that due to incompatible networks,
|
||||||
|
# we now are looking at an agent that doesn't meet our threshold requirements
|
||||||
|
# if that is the case, do not move the router. We are done trying to move
|
||||||
|
# routers off this agent
|
||||||
|
if (max_routers - min_routers) <= _L3Rebalance.router_diff_threshold:
|
||||||
|
DLOG.debug("No more agents to try for max router agent "
|
||||||
|
"and threshold not met, cannot balance.")
|
||||||
|
_L3Rebalance.agent_list_remove((agent_with_most_routers, max_routers))
|
||||||
|
# clear the below flag as no callback will do it for us.
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
return
|
||||||
|
|
||||||
|
_L3Rebalance.move_agent_router(router_to_move,
|
||||||
|
agent_with_most_routers,
|
||||||
|
agent_with_least_routers)
|
||||||
|
|
||||||
|
_L3Rebalance.agent_list_increment(agent_with_least_routers)
|
||||||
|
_L3Rebalance.agent_list_decrement(agent_with_most_routers)
|
||||||
|
|
||||||
|
source_agent_id = _L3Rebalance.get_agent_id_from_index(agent_with_most_routers)
|
||||||
|
target_agent_id = _L3Rebalance.get_agent_id_from_index(agent_with_least_routers)
|
||||||
|
|
||||||
|
DLOG.debug("Rescheduling router:%s from agent: %s to agent: %s" %
|
||||||
|
(router_to_move, source_agent_id, target_agent_id))
|
||||||
|
nfvi.nfvi_remove_router_from_agent(source_agent_id,
|
||||||
|
router_to_move,
|
||||||
|
_remove_router_from_agent_callback(
|
||||||
|
target_agent_id,
|
||||||
|
router_to_move))
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
|
||||||
|
@coroutine
|
||||||
|
def _nr_timer():
|
||||||
|
"""
|
||||||
|
Network Rebalance timer
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
while True:
|
||||||
|
(yield)
|
||||||
|
|
||||||
|
if not _L3Rebalance.state_machine_in_progress:
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = True
|
||||||
|
|
||||||
|
my_state = _L3Rebalance.get_state()
|
||||||
|
DLOG.debug("Network Rebalance State %s" % my_state)
|
||||||
|
if my_state == L3_REBALANCE_STATE.GET_NETWORK_AGENTS:
|
||||||
|
|
||||||
|
_L3Rebalance.reinit()
|
||||||
|
_get_network_agents()
|
||||||
|
|
||||||
|
elif my_state == L3_REBALANCE_STATE.GET_ROUTERS_HOSTED_ON_AGENT:
|
||||||
|
|
||||||
|
_get_routers_on_agents()
|
||||||
|
|
||||||
|
elif my_state == L3_REBALANCE_STATE.GET_ROUTER_PORT_NETWORKS:
|
||||||
|
|
||||||
|
_get_router_port_networks()
|
||||||
|
|
||||||
|
elif my_state == L3_REBALANCE_STATE.GET_PHYSICAL_NETWORK_FROM_NETWORKS:
|
||||||
|
|
||||||
|
_get_physical_networks()
|
||||||
|
|
||||||
|
elif my_state == L3_REBALANCE_STATE.GET_HOST_PHYSICAL_NETWORKS:
|
||||||
|
|
||||||
|
_get_host_data_networks()
|
||||||
|
|
||||||
|
elif my_state == L3_REBALANCE_STATE.RESCHEDULE_DOWN_AGENT:
|
||||||
|
|
||||||
|
_reschedule_down_agent()
|
||||||
|
|
||||||
|
elif my_state == L3_REBALANCE_STATE.RESCHEDULE_NEW_AGENT:
|
||||||
|
|
||||||
|
_reschedule_new_agent()
|
||||||
|
|
||||||
|
elif my_state == L3_REBALANCE_STATE.DONE:
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
|
||||||
|
# Check for work...
|
||||||
|
if ((len(_L3Rebalance.host_up_queue) > 0) or
|
||||||
|
(len(_L3Rebalance.host_down_queue) > 0)):
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.HOLD_OFF)
|
||||||
|
|
||||||
|
elif my_state == L3_REBALANCE_STATE.HOLD_OFF:
|
||||||
|
|
||||||
|
_L3Rebalance.state_machine_in_progress = False
|
||||||
|
if _L3Rebalance.hold_off_is_done():
|
||||||
|
if len(_L3Rebalance.host_down_queue) > 0:
|
||||||
|
# A reschedule for every down host is required.
|
||||||
|
# Do the down hosts rescheduling before handling
|
||||||
|
# the up hosts, as if both are pending, we don't
|
||||||
|
# want to move routers to agents that are about to
|
||||||
|
# go down.
|
||||||
|
down_host = _L3Rebalance.host_down_queue.pop(0)
|
||||||
|
_L3Rebalance.set_working_host(down_host)
|
||||||
|
DLOG.info("Triggering L3 Agent reschedule for "
|
||||||
|
"disabled l3 agent host: %s" %
|
||||||
|
down_host)
|
||||||
|
elif len(_L3Rebalance.host_up_queue) > 0:
|
||||||
|
# Even if multiple hosts come up, we only need to
|
||||||
|
# reschedule once
|
||||||
|
_L3Rebalance.set_working_host(None)
|
||||||
|
DLOG.info("Triggering L3 Agent reschedule for "
|
||||||
|
"enabled l3 agent host(s): %s" %
|
||||||
|
_L3Rebalance.host_up_queue)
|
||||||
|
del _L3Rebalance.host_up_queue[:]
|
||||||
|
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.GET_NETWORK_AGENTS)
|
||||||
|
|
||||||
|
else:
|
||||||
|
DLOG.error("Unknown state: %s, resetting" % my_state)
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
|
||||||
|
def nr_initialize():
|
||||||
|
"""
|
||||||
|
Initialize Network Rebalance handling
|
||||||
|
"""
|
||||||
|
global _L3Rebalance
|
||||||
|
|
||||||
|
_L3Rebalance.set_state(L3_REBALANCE_STATE.DONE)
|
||||||
|
|
||||||
|
if config.section_exists('l3agent-rebalance'):
|
||||||
|
section = config.CONF['l3agent-rebalance']
|
||||||
|
_nr_timer_interval = int(section.get('timer_interval', 10))
|
||||||
|
_L3Rebalance.router_diff_threshold = int(section.get('router_diff_threshold', 3))
|
||||||
|
_L3Rebalance.hold_off = int(section.get('hold_off', 3))
|
||||||
|
else:
|
||||||
|
_nr_timer_interval = 10
|
||||||
|
_L3Rebalance.router_diff_threshold = 3
|
||||||
|
_L3Rebalance.hold_off = 3
|
||||||
|
|
||||||
|
timers.timers_create_timer('nr', 1, _nr_timer_interval, _nr_timer)
|
||||||
|
|
||||||
|
|
||||||
|
def nr_finalize():
|
||||||
|
"""
|
||||||
|
Finalize Network Rebalance handling
|
||||||
|
"""
|
||||||
|
pass
|
@ -91,6 +91,7 @@ from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_disable_container_host
|
|||||||
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_enable_container_host_services # noqa: F401
|
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_enable_container_host_services # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_alarm_history # noqa: F401
|
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_alarm_history # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_alarms # noqa: F401
|
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_alarms # noqa: F401
|
||||||
|
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_datanetworks # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_host # noqa: F401
|
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_host # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_hosts # noqa: F401
|
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_hosts # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_logs # noqa: F401
|
from nfv_vim.nfvi._nfvi_infrastructure_module import nfvi_get_logs # noqa: F401
|
||||||
@ -124,6 +125,7 @@ from nfv_vim.nfvi._nfvi_module import nfvi_finalize # noqa: F401
|
|||||||
from nfv_vim.nfvi._nfvi_module import nfvi_initialize # noqa: F401
|
from nfv_vim.nfvi._nfvi_module import nfvi_initialize # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_module import nfvi_reinitialize # noqa: F401
|
from nfv_vim.nfvi._nfvi_module import nfvi_reinitialize # noqa: F401
|
||||||
|
|
||||||
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_add_router_to_agent # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_create_network # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_create_network # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_create_network_host_services # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_create_network_host_services # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_create_subnet # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_create_subnet # noqa: F401
|
||||||
@ -131,13 +133,18 @@ from nfv_vim.nfvi._nfvi_network_module import nfvi_delete_network # noqa: F401
|
|||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_delete_network_host_services # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_delete_network_host_services # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_delete_subnet # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_delete_subnet # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_enable_network_host_services # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_enable_network_host_services # noqa: F401
|
||||||
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_agent_routers # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_network # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_network # noqa: F401
|
||||||
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_network_agents # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_networks # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_networks # noqa: F401
|
||||||
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_physical_network # noqa: F401
|
||||||
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_router_ports # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_subnet # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_subnet # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_subnets # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_get_subnets # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_network_plugin_disabled # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_network_plugin_disabled # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_notify_network_host_disabled # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_notify_network_host_disabled # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_query_network_host_services # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_query_network_host_services # noqa: F401
|
||||||
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_remove_router_from_agent # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_update_network # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_update_network # noqa: F401
|
||||||
from nfv_vim.nfvi._nfvi_network_module import nfvi_update_subnet # noqa: F401
|
from nfv_vim.nfvi._nfvi_network_module import nfvi_update_subnet # noqa: F401
|
||||||
|
|
||||||
|
@ -12,6 +12,16 @@ DLOG = debug.debug_get_logger('nfv_vim.nfvi.nfvi_infrastructure_module')
|
|||||||
_infrastructure_plugin = None
|
_infrastructure_plugin = None
|
||||||
|
|
||||||
|
|
||||||
|
def nfvi_get_datanetworks(host_uuid, callback):
|
||||||
|
"""
|
||||||
|
Get host data network information
|
||||||
|
"""
|
||||||
|
cmd_id = _infrastructure_plugin.invoke_plugin('get_datanetworks',
|
||||||
|
host_uuid,
|
||||||
|
callback=callback)
|
||||||
|
return cmd_id
|
||||||
|
|
||||||
|
|
||||||
def nfvi_get_system_info(callback):
|
def nfvi_get_system_info(callback):
|
||||||
"""
|
"""
|
||||||
Get information about the system
|
Get information about the system
|
||||||
|
@ -141,6 +141,60 @@ def nfvi_enable_network_host_services(host_uuid, host_name, host_personality,
|
|||||||
return cmd_id
|
return cmd_id
|
||||||
|
|
||||||
|
|
||||||
|
def nfvi_get_network_agents(callback):
|
||||||
|
"""
|
||||||
|
Get network agents of all hosts
|
||||||
|
"""
|
||||||
|
cmd_id = _network_plugin.invoke_plugin('get_network_agents',
|
||||||
|
callback=callback)
|
||||||
|
return cmd_id
|
||||||
|
|
||||||
|
|
||||||
|
def nfvi_get_agent_routers(agent_id, callback):
|
||||||
|
"""
|
||||||
|
Get routers hosted on a l3 agent
|
||||||
|
"""
|
||||||
|
cmd_id = _network_plugin.invoke_plugin('get_agent_routers',
|
||||||
|
agent_id, callback=callback)
|
||||||
|
return cmd_id
|
||||||
|
|
||||||
|
|
||||||
|
def nfvi_get_router_ports(router_id, callback):
|
||||||
|
"""
|
||||||
|
Get router port information
|
||||||
|
"""
|
||||||
|
cmd_id = _network_plugin.invoke_plugin('get_router_ports',
|
||||||
|
router_id, callback=callback)
|
||||||
|
return cmd_id
|
||||||
|
|
||||||
|
|
||||||
|
def nfvi_add_router_to_agent(agent_id, router_id, callback):
|
||||||
|
"""
|
||||||
|
Add a router to an L3 agent
|
||||||
|
"""
|
||||||
|
cmd_id = _network_plugin.invoke_plugin('add_router_to_agent',
|
||||||
|
agent_id, router_id, callback=callback)
|
||||||
|
return cmd_id
|
||||||
|
|
||||||
|
|
||||||
|
def nfvi_remove_router_from_agent(agent_id, router_id, callback):
|
||||||
|
"""
|
||||||
|
Remove a router from an L3 Agent
|
||||||
|
"""
|
||||||
|
cmd_id = _network_plugin.invoke_plugin('remove_router_from_agent',
|
||||||
|
agent_id, router_id, callback=callback)
|
||||||
|
return cmd_id
|
||||||
|
|
||||||
|
|
||||||
|
def nfvi_get_physical_network(network_id, callback):
|
||||||
|
"""
|
||||||
|
Get physical network of a network
|
||||||
|
"""
|
||||||
|
cmd_id = _network_plugin.invoke_plugin('get_physical_network',
|
||||||
|
network_id, callback=callback)
|
||||||
|
return cmd_id
|
||||||
|
|
||||||
|
|
||||||
def nfvi_delete_network_host_services(host_uuid, host_name, host_personality,
|
def nfvi_delete_network_host_services(host_uuid, host_name, host_personality,
|
||||||
callback):
|
callback):
|
||||||
"""
|
"""
|
||||||
|
@ -40,6 +40,13 @@ class NFVIInfrastructureAPI(object):
|
|||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_datanetworks(self, future, host_uuid, callback):
|
||||||
|
"""
|
||||||
|
Get data networks on a host from the plugin
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def get_system_info(self, future, callback):
|
def get_system_info(self, future, callback):
|
||||||
"""
|
"""
|
||||||
|
@ -130,6 +130,48 @@ class NFVINetworkAPI(object):
|
|||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_network_agents(self, future, callback):
|
||||||
|
"""
|
||||||
|
Get network agent information using the plugin
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_agent_routers(self, future, agent_id, callback):
|
||||||
|
"""
|
||||||
|
Get network routers on a hosting agent using the plugin
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_router_ports(self, future, router_id, callback):
|
||||||
|
"""
|
||||||
|
Get router ports using the plugin
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def add_router_to_agent(self, future, agent_id, router_id, callback):
|
||||||
|
"""
|
||||||
|
Add a router to an agent using the plugin
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def remove_router_from_agent(self, future, agent_id, router_id, callback):
|
||||||
|
"""
|
||||||
|
Remove router from an agent using the plugin
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_physical_network(self, future, network_id, callback):
|
||||||
|
"""
|
||||||
|
Get physical network of a network using the plugin
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def delete_host_services(self, future, host_uuid, host_name,
|
def delete_host_services(self, future, host_uuid, host_name,
|
||||||
host_personality, callback):
|
host_personality, callback):
|
||||||
|
@ -23,6 +23,7 @@ from nfv_vim import database
|
|||||||
from nfv_vim import directors
|
from nfv_vim import directors
|
||||||
from nfv_vim import dor
|
from nfv_vim import dor
|
||||||
from nfv_vim import events
|
from nfv_vim import events
|
||||||
|
from nfv_vim import network_rebalance
|
||||||
from nfv_vim import nfvi
|
from nfv_vim import nfvi
|
||||||
from nfv_vim import tables
|
from nfv_vim import tables
|
||||||
|
|
||||||
@ -85,6 +86,7 @@ def process_initialize():
|
|||||||
events.events_initialize()
|
events.events_initialize()
|
||||||
audits.audits_initialize()
|
audits.audits_initialize()
|
||||||
dor.dor_initialize()
|
dor.dor_initialize()
|
||||||
|
network_rebalance.nr_initialize()
|
||||||
return init_complete
|
return init_complete
|
||||||
|
|
||||||
|
|
||||||
@ -107,6 +109,7 @@ def process_finalize():
|
|||||||
Virtual Infrastructure Manager - Finalize
|
Virtual Infrastructure Manager - Finalize
|
||||||
"""
|
"""
|
||||||
dor.dor_finalize()
|
dor.dor_finalize()
|
||||||
|
network_rebalance.nr_finalize()
|
||||||
audits.audits_finalize()
|
audits.audits_finalize()
|
||||||
events.events_finalize()
|
events.events_finalize()
|
||||||
directors.directors_finalize()
|
directors.directors_finalize()
|
||||||
|
Loading…
Reference in New Issue
Block a user