Add subnet update operation support
1. What is the problem Tricircle does not support subnet update operation now. 2. What is the solution to the problem Implement related functions 3. What the features need to be implemented to the Tricircle No new features Change-Id: I840caef4176f879ceedae98f6d2c22964ebf32d6
This commit is contained in:
parent
c4824356d1
commit
654fd620ec
@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Network
|
||||||
|
|
||||||
|
* Update networks
|
||||||
|
|
||||||
|
* qos-policy not supported
|
||||||
|
|
||||||
|
- |
|
||||||
|
Subnet
|
||||||
|
|
||||||
|
* Update subnets
|
||||||
|
|
||||||
|
issues:
|
||||||
|
- |
|
||||||
|
Update network or subnet may not lead to the expected result if an
|
||||||
|
instance is being booted at the same time. You can redo the update
|
||||||
|
operation later to make it execute correctly.
|
||||||
|
|
||||||
|
|
@ -82,7 +82,7 @@ JT_ROUTER_SETUP = 'router_setup'
|
|||||||
JT_PORT_DELETE = 'port_delete'
|
JT_PORT_DELETE = 'port_delete'
|
||||||
JT_SEG_RULE_SETUP = 'seg_rule_setup'
|
JT_SEG_RULE_SETUP = 'seg_rule_setup'
|
||||||
JT_NETWORK_UPDATE = 'update_network'
|
JT_NETWORK_UPDATE = 'update_network'
|
||||||
|
JT_SUBNET_UPDATE = 'subnet_update'
|
||||||
# network type
|
# network type
|
||||||
NT_LOCAL = 'local'
|
NT_LOCAL = 'local'
|
||||||
NT_VLAN = 'vlan'
|
NT_VLAN = 'vlan'
|
||||||
|
@ -102,3 +102,9 @@ class XJobAPI(object):
|
|||||||
self.client.prepare(exchange='openstack').cast(
|
self.client.prepare(exchange='openstack').cast(
|
||||||
ctxt, 'update_network',
|
ctxt, 'update_network',
|
||||||
payload={constants.JT_NETWORK_UPDATE: combine_id})
|
payload={constants.JT_NETWORK_UPDATE: combine_id})
|
||||||
|
|
||||||
|
def update_subnet(self, ctxt, subnet_id, pod_id):
|
||||||
|
combine_id = '%s#%s' % (pod_id, subnet_id)
|
||||||
|
self.client.prepare(exchange='openstack').cast(
|
||||||
|
ctxt, 'update_subnet',
|
||||||
|
payload={constants.JT_SUBNET_UPDATE: combine_id})
|
||||||
|
@ -441,8 +441,39 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||||||
super(TricirclePlugin, self).delete_subnet(context, subnet_id)
|
super(TricirclePlugin, self).delete_subnet(context, subnet_id)
|
||||||
|
|
||||||
def update_subnet(self, context, subnet_id, subnet):
|
def update_subnet(self, context, subnet_id, subnet):
|
||||||
return super(TricirclePlugin, self).update_subnet(
|
"""update top subnet
|
||||||
context, subnet_id, subnet)
|
|
||||||
|
update top subnet and trigger asynchronous job via RPC to update
|
||||||
|
bottom subnet.
|
||||||
|
:param context: neutron context
|
||||||
|
:param subnet_id: top subnet id
|
||||||
|
:param subnet: updated subnet body
|
||||||
|
:return: updated subnet
|
||||||
|
"""
|
||||||
|
with context.session.begin():
|
||||||
|
subnet_data = subnet[attributes.SUBNET]
|
||||||
|
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||||
|
# update top subnet
|
||||||
|
result = super(TricirclePlugin, self).update_subnet(
|
||||||
|
context, subnet_id, subnet)
|
||||||
|
# prepare top dhcp port if user enables dhcp,
|
||||||
|
# the top pre-created dhcp port will not be deleted even
|
||||||
|
# "enable_dhcp" is updated from True to False
|
||||||
|
enable_dhcp = subnet_data.get('enable_dhcp', False)
|
||||||
|
if enable_dhcp:
|
||||||
|
subnet = super(TricirclePlugin, self).get_subnet(
|
||||||
|
context, subnet_id)
|
||||||
|
self.helper.prepare_top_dhcp_port(t_ctx, context,
|
||||||
|
t_ctx.project_id,
|
||||||
|
subnet['network_id'],
|
||||||
|
subnet_id)
|
||||||
|
# update bottom pod subnet if exist
|
||||||
|
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||||
|
t_ctx, subnet_id, t_constants.RT_SUBNET)
|
||||||
|
if mappings:
|
||||||
|
self.xjob_handler.update_subnet(t_ctx, subnet_id,
|
||||||
|
t_constants.POD_NOT_SPECIFIED)
|
||||||
|
return result
|
||||||
|
|
||||||
def create_port(self, context, port):
|
def create_port(self, context, port):
|
||||||
port_body = port['port']
|
port_body = port['port']
|
||||||
|
@ -227,14 +227,12 @@ class NetworkHelper(object):
|
|||||||
return body
|
return body
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_create_subnet_body(project_id, t_subnet, b_net_id, gateway_ip):
|
def get_bottom_subnet_pools(t_subnet, gateway_ip):
|
||||||
"""Get request body to create bottom subnet
|
"""Get bottom subnet allocation pools
|
||||||
|
|
||||||
:param project_id: project id
|
:param t_subnet: top subnet
|
||||||
:param t_subnet: top subnet dict
|
:param gateway_ip: bottom subnet gateway ip
|
||||||
:param b_net_id: bottom network id
|
:return: bottom subnet allocation pools
|
||||||
:param gateway_ip: bottom gateway ip
|
|
||||||
:return: request body to create bottom subnet
|
|
||||||
"""
|
"""
|
||||||
pools = t_subnet['allocation_pools']
|
pools = t_subnet['allocation_pools']
|
||||||
t_gateway_ip = t_subnet['gateway_ip']
|
t_gateway_ip = t_subnet['gateway_ip']
|
||||||
@ -263,6 +261,24 @@ class NetworkHelper(object):
|
|||||||
new_pools.append(
|
new_pools.append(
|
||||||
{'start': ip_range[i + 1].format(),
|
{'start': ip_range[i + 1].format(),
|
||||||
'end': ip_range[ip_num - 1].format()})
|
'end': ip_range[ip_num - 1].format()})
|
||||||
|
break
|
||||||
|
if not ip_found:
|
||||||
|
new_pools.extend(pools)
|
||||||
|
if not ip_merged:
|
||||||
|
new_pools.insert(0, {'start': t_gateway_ip, 'end': t_gateway_ip})
|
||||||
|
return new_pools
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_create_subnet_body(project_id, t_subnet, b_net_id, gateway_ip):
|
||||||
|
"""Get request body to create bottom subnet
|
||||||
|
|
||||||
|
:param project_id: project id
|
||||||
|
:param t_subnet: top subnet dict
|
||||||
|
:param b_net_id: bottom network id
|
||||||
|
:param gateway_ip: bottom gateway ip
|
||||||
|
:return: request body to create bottom subnet
|
||||||
|
"""
|
||||||
|
new_pools = NetworkHelper.get_bottom_subnet_pools(t_subnet, gateway_ip)
|
||||||
body = {
|
body = {
|
||||||
'subnet': {
|
'subnet': {
|
||||||
'network_id': b_net_id,
|
'network_id': b_net_id,
|
||||||
@ -287,7 +303,7 @@ class NetworkHelper(object):
|
|||||||
:param subnet_map: dict with top subnet id as key and bottom subnet
|
:param subnet_map: dict with top subnet id as key and bottom subnet
|
||||||
id as value
|
id as value
|
||||||
:param b_net_id: bottom network id
|
:param b_net_id: bottom network id
|
||||||
:param security_group_ids: list of bottom security group id
|
:param b_security_group_ids: list of bottom security group id
|
||||||
:return: request body to create bottom port
|
:return: request body to create bottom port
|
||||||
"""
|
"""
|
||||||
b_fixed_ips = []
|
b_fixed_ips = []
|
||||||
|
@ -210,7 +210,7 @@ class TricirclePlugin(plugin.Ml2Plugin):
|
|||||||
|
|
||||||
def get_network(self, context, _id, fields=None):
|
def get_network(self, context, _id, fields=None):
|
||||||
try:
|
try:
|
||||||
b_network = self.core_plugin.get_network(context, _id, fields)
|
b_network = self.core_plugin.get_network(context, _id)
|
||||||
subnet_ids = self._ensure_subnet(context, b_network, False)
|
subnet_ids = self._ensure_subnet(context, b_network, False)
|
||||||
except q_exceptions.NotFound:
|
except q_exceptions.NotFound:
|
||||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||||
@ -301,7 +301,7 @@ class TricirclePlugin(plugin.Ml2Plugin):
|
|||||||
def get_subnet(self, context, _id, fields=None):
|
def get_subnet(self, context, _id, fields=None):
|
||||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||||
try:
|
try:
|
||||||
b_subnet = self.core_plugin.get_subnet(context, _id, fields)
|
b_subnet = self.core_plugin.get_subnet(context, _id)
|
||||||
except q_exceptions.NotFound:
|
except q_exceptions.NotFound:
|
||||||
if self._skip_non_api_query(t_ctx):
|
if self._skip_non_api_query(t_ctx):
|
||||||
raise q_exceptions.SubnetNotFound(subnet_id=_id)
|
raise q_exceptions.SubnetNotFound(subnet_id=_id)
|
||||||
@ -354,6 +354,32 @@ class TricirclePlugin(plugin.Ml2Plugin):
|
|||||||
b_subnets.append(self._fields(b_subnet, fields))
|
b_subnets.append(self._fields(b_subnet, fields))
|
||||||
return b_subnets
|
return b_subnets
|
||||||
|
|
||||||
|
def update_subnet(self, context, _id, subnet):
|
||||||
|
"""update bottom subnet
|
||||||
|
|
||||||
|
Can not directly use ML2 plugin's update_subnet function,
|
||||||
|
because it will call local plugin's get_subnet in a transaction,
|
||||||
|
the local plugin's get_subnet will create a dhcp port when subnet's
|
||||||
|
enable_dhcp attribute is changed from False to True, but neutron
|
||||||
|
doesn't allow calling create_port in a transaction and will raise an
|
||||||
|
exception.
|
||||||
|
|
||||||
|
:param context: neutron context
|
||||||
|
:param _id: subnet_id
|
||||||
|
:param subnet: update body
|
||||||
|
:return: updated subnet
|
||||||
|
"""
|
||||||
|
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||||
|
b_subnet = self.core_plugin.get_subnet(context, _id)
|
||||||
|
origin_enable_dhcp = b_subnet['enable_dhcp']
|
||||||
|
req_enable_dhcp = subnet['subnet']['enable_dhcp']
|
||||||
|
# when request enable dhcp, and origin dhcp is disabled,
|
||||||
|
# ensure subnet dhcp port is created
|
||||||
|
if req_enable_dhcp and not origin_enable_dhcp:
|
||||||
|
self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet)
|
||||||
|
res = self.core_plugin.update_subnet(context, _id, subnet)
|
||||||
|
return res
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _is_special_port(port):
|
def _is_special_port(port):
|
||||||
return port.get('device_owner') in (
|
return port.get('device_owner') in (
|
||||||
|
@ -49,6 +49,7 @@ from neutron.ipam import driver
|
|||||||
from neutron.ipam import exceptions as ipam_exc
|
from neutron.ipam import exceptions as ipam_exc
|
||||||
from neutron.ipam import requests
|
from neutron.ipam import requests
|
||||||
import neutron.ipam.utils as ipam_utils
|
import neutron.ipam.utils as ipam_utils
|
||||||
|
|
||||||
from neutron import manager
|
from neutron import manager
|
||||||
import neutronclient.common.exceptions as q_exceptions
|
import neutronclient.common.exceptions as q_exceptions
|
||||||
|
|
||||||
@ -232,7 +233,7 @@ class FakePool(driver.Pool):
|
|||||||
return FakeIpamSubnet(subnet_request)
|
return FakeIpamSubnet(subnet_request)
|
||||||
|
|
||||||
raise ipam_exc.InvalidSubnetRequest(
|
raise ipam_exc.InvalidSubnetRequest(
|
||||||
reason=_("updated subnet id no not found"))
|
reason=_("updated subnet id not found"))
|
||||||
|
|
||||||
def remove_subnet(self, subnet_id):
|
def remove_subnet(self, subnet_id):
|
||||||
pass
|
pass
|
||||||
@ -473,7 +474,20 @@ class FakeClient(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
def update_subnets(self, ctx, subnet_id, body):
|
def update_subnets(self, ctx, subnet_id, body):
|
||||||
pass
|
subnet_data = body[neutron_attributes.SUBNET]
|
||||||
|
if self.region_name == 'pod_1':
|
||||||
|
subnets = BOTTOM1_SUBNETS
|
||||||
|
else:
|
||||||
|
subnets = BOTTOM2_SUBNETS
|
||||||
|
|
||||||
|
for subnet in subnets:
|
||||||
|
if subnet['id'] == subnet_id:
|
||||||
|
for key in subnet_data:
|
||||||
|
subnet[key] = subnet_data[key]
|
||||||
|
return
|
||||||
|
|
||||||
|
raise ipam_exc.InvalidSubnetRequest(
|
||||||
|
reason=_("updated subnet id not found"))
|
||||||
|
|
||||||
def create_ports(self, ctx, body):
|
def create_ports(self, ctx, body):
|
||||||
return self.create_resources('port', ctx, body)
|
return self.create_resources('port', ctx, body)
|
||||||
@ -988,6 +1002,11 @@ class FakeBaseRPCAPI(object):
|
|||||||
self.xmanager.update_network(
|
self.xmanager.update_network(
|
||||||
ctxt, payload={constants.JT_NETWORK_UPDATE: combine_id})
|
ctxt, payload={constants.JT_NETWORK_UPDATE: combine_id})
|
||||||
|
|
||||||
|
def update_subnet(self, ctxt, subnet_id, pod_id):
|
||||||
|
combine_id = '%s#%s' % (pod_id, subnet_id)
|
||||||
|
self.xmanager.update_subnet(
|
||||||
|
ctxt, payload={constants.JT_SUBNET_UPDATE: combine_id})
|
||||||
|
|
||||||
|
|
||||||
class FakeRPCAPI(FakeBaseRPCAPI):
|
class FakeRPCAPI(FakeBaseRPCAPI):
|
||||||
def __init__(self, fake_plugin):
|
def __init__(self, fake_plugin):
|
||||||
@ -1719,7 +1738,10 @@ class PluginTest(unittest.TestCase,
|
|||||||
'gateway_ip': '10.0.%d.1' % index,
|
'gateway_ip': '10.0.%d.1' % index,
|
||||||
'ipv6_address_mode': '',
|
'ipv6_address_mode': '',
|
||||||
'ipv6_ra_mode': '',
|
'ipv6_ra_mode': '',
|
||||||
'tenant_id': tenant_id
|
'tenant_id': tenant_id,
|
||||||
|
'description': 'description',
|
||||||
|
'host_routes': [],
|
||||||
|
'dns_nameservers': []
|
||||||
}
|
}
|
||||||
TOP_NETS.append(DotDict(t_net))
|
TOP_NETS.append(DotDict(t_net))
|
||||||
TOP_SUBNETS.append(DotDict(t_subnet))
|
TOP_SUBNETS.append(DotDict(t_subnet))
|
||||||
@ -1740,10 +1762,13 @@ class PluginTest(unittest.TestCase,
|
|||||||
'cidr': '10.0.%d.0/24' % index,
|
'cidr': '10.0.%d.0/24' % index,
|
||||||
'allocation_pools': [],
|
'allocation_pools': [],
|
||||||
'enable_dhcp': enable_dhcp,
|
'enable_dhcp': enable_dhcp,
|
||||||
'gateway_ip': '10.0.%d.1' % index,
|
'gateway_ip': '10.0.%d.25' % index,
|
||||||
'ipv6_address_mode': '',
|
'ipv6_address_mode': '',
|
||||||
'ipv6_ra_mode': '',
|
'ipv6_ra_mode': '',
|
||||||
'tenant_id': tenant_id
|
'tenant_id': tenant_id,
|
||||||
|
'description': 'description',
|
||||||
|
'host_routes': [],
|
||||||
|
'dns_nameservers': []
|
||||||
}
|
}
|
||||||
if region_name == 'pod_1':
|
if region_name == 'pod_1':
|
||||||
BOTTOM1_NETS.append(DotDict(b_net))
|
BOTTOM1_NETS.append(DotDict(b_net))
|
||||||
@ -1828,6 +1853,125 @@ class PluginTest(unittest.TestCase,
|
|||||||
# check pre-created ports are all deleted
|
# check pre-created ports are all deleted
|
||||||
self.assertEqual(port_num - pre_created_port_num, len(TOP_PORTS))
|
self.assertEqual(port_num - pre_created_port_num, len(TOP_PORTS))
|
||||||
|
|
||||||
|
@patch.object(directory, 'get_plugin', new=fake_get_plugin)
|
||||||
|
@patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
|
||||||
|
@patch.object(context, 'get_context_from_neutron_context')
|
||||||
|
def test_update_subnet(self, mock_context):
|
||||||
|
tenant_id = TEST_TENANT_ID
|
||||||
|
self._basic_pod_route_setup()
|
||||||
|
neutron_context = FakeNeutronContext()
|
||||||
|
t_ctx = context.get_db_context()
|
||||||
|
_, t_subnet_id, _, b_subnet_id = self._prepare_network_test(
|
||||||
|
tenant_id, t_ctx, 'pod_1', 1)
|
||||||
|
|
||||||
|
fake_plugin = FakePlugin()
|
||||||
|
fake_client = FakeClient('pod_1')
|
||||||
|
mock_context.return_value = t_ctx
|
||||||
|
update_body = {
|
||||||
|
'subnet':
|
||||||
|
{'name': 'new_name',
|
||||||
|
'description': 'new_description',
|
||||||
|
'allocation_pools': [{"start": "10.0.1.10",
|
||||||
|
"end": "10.0.1.254"}],
|
||||||
|
'gateway_ip': '10.0.1.2',
|
||||||
|
'host_routes': [{"nexthop": "10.1.0.1",
|
||||||
|
"destination": "10.1.0.0/24"},
|
||||||
|
{"nexthop": "10.2.0.1",
|
||||||
|
"destination": "10.2.0.0/24"}],
|
||||||
|
'dns_nameservers': ['114.114.114.114', '8.8.8.8']}
|
||||||
|
}
|
||||||
|
body_copy = copy.deepcopy(update_body)
|
||||||
|
fake_plugin.update_subnet(neutron_context, t_subnet_id, update_body)
|
||||||
|
top_subnet = fake_plugin.get_subnet(neutron_context, t_subnet_id)
|
||||||
|
self.assertEqual(top_subnet['name'], body_copy['subnet']['name'])
|
||||||
|
self.assertEqual(top_subnet['description'],
|
||||||
|
body_copy['subnet']['description'])
|
||||||
|
self.assertEqual(top_subnet['allocation_pools'],
|
||||||
|
body_copy['subnet']['allocation_pools'])
|
||||||
|
six.assertCountEqual(self, top_subnet['host_routes'],
|
||||||
|
body_copy['subnet']['host_routes'])
|
||||||
|
six.assertCountEqual(self, top_subnet['dns_nameservers'],
|
||||||
|
body_copy['subnet']['dns_nameservers'])
|
||||||
|
self.assertEqual(top_subnet['gateway_ip'],
|
||||||
|
body_copy['subnet']['gateway_ip'])
|
||||||
|
|
||||||
|
bottom_subnet = fake_client.get_subnets(t_ctx, b_subnet_id)
|
||||||
|
# name is set to top resource id, which is used by lock_handle to
|
||||||
|
# retrieve bottom/local resources that have been created but not
|
||||||
|
# registered in the resource routing table, so it's not allowed
|
||||||
|
# to be updated
|
||||||
|
self.assertEqual(bottom_subnet['name'], b_subnet_id)
|
||||||
|
self.assertEqual(bottom_subnet['description'],
|
||||||
|
body_copy['subnet']['description'])
|
||||||
|
bottom_allocation_pools = [{'start': '10.0.1.2', 'end': '10.0.1.2'},
|
||||||
|
{'start': '10.0.1.10', 'end': '10.0.1.24'},
|
||||||
|
{'start': '10.0.1.26', 'end': '10.0.1.254'}]
|
||||||
|
self.assertEqual(bottom_subnet['allocation_pools'],
|
||||||
|
bottom_allocation_pools)
|
||||||
|
six.assertCountEqual(self,
|
||||||
|
bottom_subnet['host_routes'],
|
||||||
|
body_copy['subnet']['host_routes'])
|
||||||
|
six.assertCountEqual(self,
|
||||||
|
bottom_subnet['dns_nameservers'],
|
||||||
|
body_copy['subnet']['dns_nameservers'])
|
||||||
|
# gateway ip is set to origin gateway ip ,because it is reserved
|
||||||
|
# by top pod, so it's not allowed to be updated
|
||||||
|
self.assertEqual(bottom_subnet['gateway_ip'], '10.0.1.25')
|
||||||
|
|
||||||
|
@patch.object(directory, 'get_plugin', new=fake_get_plugin)
|
||||||
|
@patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
|
||||||
|
@patch.object(ipam_pluggable_backend.IpamPluggableBackend,
|
||||||
|
'_allocate_ips_for_port', new=fake_allocate_ips_for_port)
|
||||||
|
@patch.object(context, 'get_context_from_neutron_context')
|
||||||
|
def test_update_subnet_enable_disable_dhcp(self, mock_context):
|
||||||
|
|
||||||
|
tenant_id = TEST_TENANT_ID
|
||||||
|
self._basic_pod_route_setup()
|
||||||
|
neutron_context = FakeNeutronContext()
|
||||||
|
t_ctx = context.get_db_context()
|
||||||
|
_, t_subnet_id, _, b_subnet_id = self._prepare_network_test(
|
||||||
|
tenant_id, t_ctx, 'pod_1', 1, enable_dhcp=False)
|
||||||
|
|
||||||
|
fake_plugin = FakePlugin()
|
||||||
|
fake_client = FakeClient('pod_1')
|
||||||
|
mock_context.return_value = t_ctx
|
||||||
|
|
||||||
|
self.assertEqual(0, len(TOP_PORTS))
|
||||||
|
self.assertEqual(0, len(BOTTOM1_PORTS))
|
||||||
|
|
||||||
|
update_body = {
|
||||||
|
'subnet':
|
||||||
|
{'enable_dhcp': 'True'}
|
||||||
|
}
|
||||||
|
body_copy = copy.deepcopy(update_body)
|
||||||
|
# from disable dhcp to enable dhcp, create a new dhcp port
|
||||||
|
fake_plugin.update_subnet(neutron_context, t_subnet_id, update_body)
|
||||||
|
top_subnet = fake_plugin.get_subnet(neutron_context, t_subnet_id)
|
||||||
|
self.assertEqual(top_subnet['enable_dhcp'],
|
||||||
|
body_copy['subnet']['enable_dhcp'])
|
||||||
|
self.assertEqual(1, len(TOP_PORTS))
|
||||||
|
|
||||||
|
bottom_subnet = fake_client.get_subnets(t_ctx, b_subnet_id)
|
||||||
|
self.assertEqual(bottom_subnet['enable_dhcp'],
|
||||||
|
body_copy['subnet']['enable_dhcp'])
|
||||||
|
|
||||||
|
update_body = {
|
||||||
|
'subnet':
|
||||||
|
{'enable_dhcp': 'False'}
|
||||||
|
}
|
||||||
|
body_copy = copy.deepcopy(update_body)
|
||||||
|
# from enable dhcp to disable dhcp, reserved dhcp port
|
||||||
|
# previously created
|
||||||
|
fake_plugin.update_subnet(neutron_context, t_subnet_id, update_body)
|
||||||
|
top_subnet = fake_plugin.get_subnet(neutron_context, t_subnet_id)
|
||||||
|
self.assertEqual(top_subnet['enable_dhcp'],
|
||||||
|
body_copy['subnet']['enable_dhcp'])
|
||||||
|
self.assertEqual(1, len(TOP_PORTS))
|
||||||
|
|
||||||
|
bottom_subnet = fake_client.get_subnets(t_ctx, b_subnet_id)
|
||||||
|
self.assertEqual(bottom_subnet['enable_dhcp'],
|
||||||
|
body_copy['subnet']['enable_dhcp'])
|
||||||
|
|
||||||
@patch.object(directory, 'get_plugin', new=fake_get_plugin)
|
@patch.object(directory, 'get_plugin', new=fake_get_plugin)
|
||||||
@patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
|
@patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
|
||||||
@patch.object(_utils, 'filter_non_model_columns',
|
@patch.object(_utils, 'filter_non_model_columns',
|
||||||
|
@ -37,32 +37,42 @@ class HelperTest(unittest.TestCase):
|
|||||||
'ip_version': 4,
|
'ip_version': 4,
|
||||||
'cidr': '10.0.1.0/24',
|
'cidr': '10.0.1.0/24',
|
||||||
'gateway_ip': '10.0.1.1',
|
'gateway_ip': '10.0.1.1',
|
||||||
'allocation_pools': [{'start': '10.0.1.2', 'end': '10.0.1.254'}],
|
'allocation_pools': [{'start': '10.0.1.10', 'end': '10.0.1.254'}],
|
||||||
'enable_dhcp': True,
|
'enable_dhcp': True,
|
||||||
'tenant_id': project_id
|
'tenant_id': project_id
|
||||||
}
|
}
|
||||||
body = self.helper.get_create_subnet_body(project_id, t_subnet,
|
body = self.helper.get_create_subnet_body(project_id, t_subnet,
|
||||||
b_net_id, '10.0.1.2')
|
b_net_id, '10.0.1.10')
|
||||||
six.assertCountEqual(self,
|
six.assertCountEqual(self,
|
||||||
[{'start': '10.0.1.1', 'end': '10.0.1.1'},
|
[{'start': '10.0.1.1', 'end': '10.0.1.1'},
|
||||||
{'start': '10.0.1.3', 'end': '10.0.1.254'}],
|
{'start': '10.0.1.11', 'end': '10.0.1.254'}],
|
||||||
body['subnet']['allocation_pools'])
|
body['subnet']['allocation_pools'])
|
||||||
self.assertEqual('10.0.1.2', body['subnet']['gateway_ip'])
|
self.assertEqual('10.0.1.10', body['subnet']['gateway_ip'])
|
||||||
|
|
||||||
body = self.helper.get_create_subnet_body(project_id, t_subnet,
|
body = self.helper.get_create_subnet_body(project_id, t_subnet,
|
||||||
b_net_id, '10.0.1.254')
|
b_net_id, '10.0.1.254')
|
||||||
six.assertCountEqual(self,
|
six.assertCountEqual(self,
|
||||||
[{'start': '10.0.1.1', 'end': '10.0.1.253'}],
|
[{'start': '10.0.1.1', 'end': '10.0.1.1'},
|
||||||
|
{'start': '10.0.1.10', 'end': '10.0.1.253'}],
|
||||||
body['subnet']['allocation_pools'])
|
body['subnet']['allocation_pools'])
|
||||||
self.assertEqual('10.0.1.254', body['subnet']['gateway_ip'])
|
self.assertEqual('10.0.1.254', body['subnet']['gateway_ip'])
|
||||||
|
|
||||||
|
body = self.helper.get_create_subnet_body(project_id, t_subnet,
|
||||||
|
b_net_id, '10.0.1.8')
|
||||||
|
six.assertCountEqual(self,
|
||||||
|
[{'start': '10.0.1.1', 'end': '10.0.1.1'},
|
||||||
|
{'start': '10.0.1.10', 'end': '10.0.1.254'}],
|
||||||
|
body['subnet']['allocation_pools'])
|
||||||
|
self.assertEqual('10.0.1.8', body['subnet']['gateway_ip'])
|
||||||
|
|
||||||
t_subnet['allocation_pools'] = [
|
t_subnet['allocation_pools'] = [
|
||||||
{'start': '10.0.1.2', 'end': '10.0.1.10'},
|
{'start': '10.0.1.2', 'end': '10.0.1.10'},
|
||||||
{'start': '10.0.1.20', 'end': '10.0.1.254'}]
|
{'start': '10.0.1.20', 'end': '10.0.1.254'}]
|
||||||
body = self.helper.get_create_subnet_body(project_id, t_subnet,
|
body = self.helper.get_create_subnet_body(project_id, t_subnet,
|
||||||
b_net_id, '10.0.1.5')
|
b_net_id, '10.0.1.5')
|
||||||
six.assertCountEqual(self, [{'start': '10.0.1.1', 'end': '10.0.1.4'},
|
six.assertCountEqual(self,
|
||||||
{'start': '10.0.1.6', 'end': '10.0.1.10'},
|
[{'start': '10.0.1.1', 'end': '10.0.1.4'},
|
||||||
{'start': '10.0.1.20', 'end': '10.0.1.254'}],
|
{'start': '10.0.1.6', 'end': '10.0.1.10'},
|
||||||
|
{'start': '10.0.1.20', 'end': '10.0.1.254'}],
|
||||||
body['subnet']['allocation_pools'])
|
body['subnet']['allocation_pools'])
|
||||||
self.assertEqual('10.0.1.5', body['subnet']['gateway_ip'])
|
self.assertEqual('10.0.1.5', body['subnet']['gateway_ip'])
|
||||||
|
@ -150,7 +150,8 @@ class XManager(PeriodicTasks):
|
|||||||
constants.JT_ROUTER_SETUP: self.setup_bottom_router,
|
constants.JT_ROUTER_SETUP: self.setup_bottom_router,
|
||||||
constants.JT_PORT_DELETE: self.delete_server_port,
|
constants.JT_PORT_DELETE: self.delete_server_port,
|
||||||
constants.JT_SEG_RULE_SETUP: self.configure_security_group_rules,
|
constants.JT_SEG_RULE_SETUP: self.configure_security_group_rules,
|
||||||
constants.JT_NETWORK_UPDATE: self.update_network}
|
constants.JT_NETWORK_UPDATE: self.update_network,
|
||||||
|
constants.JT_SUBNET_UPDATE: self.update_subnet}
|
||||||
self.helper = helper.NetworkHelper()
|
self.helper = helper.NetworkHelper()
|
||||||
self.xjob_handler = xrpcapi.XJobAPI()
|
self.xjob_handler = xrpcapi.XJobAPI()
|
||||||
super(XManager, self).__init__()
|
super(XManager, self).__init__()
|
||||||
@ -849,3 +850,64 @@ class XManager(PeriodicTasks):
|
|||||||
LOG.error(_LE('network: %(net_id)s not found,'
|
LOG.error(_LE('network: %(net_id)s not found,'
|
||||||
'pod name: %(name)s'),
|
'pod name: %(name)s'),
|
||||||
{'net_id': b_network_id, 'name': b_region_name})
|
{'net_id': b_network_id, 'name': b_region_name})
|
||||||
|
|
||||||
|
@_job_handle(constants.JT_SUBNET_UPDATE)
|
||||||
|
def update_subnet(self, ctx, payload):
|
||||||
|
"""update bottom subnet
|
||||||
|
|
||||||
|
if bottom pod id equal to POD_NOT_SPECIFIED, dispatch jobs for every
|
||||||
|
mapped bottom pod via RPC, otherwise update subnet in the specified
|
||||||
|
pod.
|
||||||
|
|
||||||
|
:param ctx: tricircle context
|
||||||
|
:param payload: dict whose key is JT_SUBNET_UPDATE and value
|
||||||
|
is "top_subnet_id#bottom_pod_id"
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
(b_pod_id, t_subnet_id) = payload[
|
||||||
|
constants.JT_SUBNET_UPDATE].split('#')
|
||||||
|
if b_pod_id == constants.POD_NOT_SPECIFIED:
|
||||||
|
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||||
|
ctx, t_subnet_id, constants.RT_SUBNET)
|
||||||
|
b_pods = [mapping[0] for mapping in mappings]
|
||||||
|
for b_pod in b_pods:
|
||||||
|
self.xjob_handler.update_subnet(ctx, t_subnet_id,
|
||||||
|
b_pod['pod_id'])
|
||||||
|
return
|
||||||
|
|
||||||
|
t_client = self._get_client()
|
||||||
|
t_subnet = t_client.get_subnets(ctx, t_subnet_id)
|
||||||
|
if not t_subnet:
|
||||||
|
return
|
||||||
|
b_pod = db_api.get_pod(ctx, b_pod_id)
|
||||||
|
b_region_name = b_pod['region_name']
|
||||||
|
b_subnet_id = db_api.get_bottom_id_by_top_id_region_name(
|
||||||
|
ctx, t_subnet_id, b_region_name, constants.RT_SUBNET)
|
||||||
|
b_client = self._get_client(region_name=b_region_name)
|
||||||
|
b_subnet = b_client.get_subnets(ctx, b_subnet_id)
|
||||||
|
b_gateway_ip = b_subnet['gateway_ip']
|
||||||
|
|
||||||
|
# we need to remove the bottom subnet gateway ip from the top subnet
|
||||||
|
# allaction pools
|
||||||
|
b_allocation_pools = helper.NetworkHelper.get_bottom_subnet_pools(
|
||||||
|
t_subnet, b_gateway_ip)
|
||||||
|
|
||||||
|
# bottom gateway_ip doesn't need to be updated, because it is reserved
|
||||||
|
# by top pod.
|
||||||
|
# name is not allowed to be updated, because it is used by
|
||||||
|
# lock_handle to retrieve bottom/local resources that have been
|
||||||
|
# created but not registered in the resource routing table
|
||||||
|
body = {
|
||||||
|
'subnet':
|
||||||
|
{'description': t_subnet['description'],
|
||||||
|
'enable_dhcp': t_subnet['enable_dhcp'],
|
||||||
|
'allocation_pools': b_allocation_pools,
|
||||||
|
'host_routes': t_subnet['host_routes'],
|
||||||
|
'dns_nameservers': t_subnet['dns_nameservers']}
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
b_client.update_subnets(ctx, b_subnet_id, body)
|
||||||
|
except q_cli_exceptions.NotFound:
|
||||||
|
LOG.error(_LE('subnet: %(subnet_id)s not found, '
|
||||||
|
'pod name: %(name)s'),
|
||||||
|
{'subnet_id': b_subnet_id, 'name': b_region_name})
|
||||||
|
Loading…
x
Reference in New Issue
Block a user