Avoid adding duplicated sec-group rules with same CIDR
1. What is the problem If a tenant creates two subnets with the same CIDR but different allocation pools, Xjob will fail to setup local security group rules because it generates two identical rules for these two subnets but Neutron disallows adding a rule which is identical to the existing one. 2. What is the solution for the problem Use "set" to avoid generating duplicated rules 3. What features need to be implemented to the Tricircle to realize the solution N/A Change-Id: If21562458f2ad82e71577392172deeb8dc5a2a13 Closes-Bug: #1691315
This commit is contained in:
parent
50067c3b4b
commit
875fac7e81
|
@ -35,7 +35,15 @@ echo create network1
|
|||
$openstacktop network create net1
|
||||
|
||||
echo create subnet1
|
||||
$openstacktop subnet create --subnet-range 10.0.1.0/24 --network net1 subnet1
|
||||
$openstacktop subnet create --subnet-range 10.0.1.0/24 --network net1 \
|
||||
--allocation-pool start=10.0.1.10,end=10.0.1.90 subnet1
|
||||
|
||||
echo create network3
|
||||
$openstacktop network create net3
|
||||
|
||||
echo create subnet3 that has same CIDR with subnet1
|
||||
$openstacktop subnet create --subnet-range 10.0.1.0/24 --network net3 \
|
||||
--allocation-pool start=10.0.1.110,end=10.0.1.190 subnet3
|
||||
|
||||
echo create port1
|
||||
port1_id=$($openstacktop port create --network net1 port1 -c id -f value)
|
||||
|
|
|
@ -698,6 +698,94 @@ class XManagerTest(unittest.TestCase):
|
|||
'security_group_id': sg_id}]})]
|
||||
mock_create.assert_has_calls(calls)
|
||||
|
||||
@patch.object(FakeClient, 'delete_security_group_rules')
|
||||
@patch.object(FakeClient, 'create_security_group_rules')
|
||||
def test_configure_security_group_rules_duplicated_cidr(self, mock_create,
|
||||
mock_delete):
|
||||
project_id = uuidutils.generate_uuid()
|
||||
sg_id = uuidutils.generate_uuid()
|
||||
sg_rule_id_1 = uuidutils.generate_uuid()
|
||||
sg_rule_id_2 = uuidutils.generate_uuid()
|
||||
|
||||
sg = {'id': sg_id,
|
||||
'tenant_id': project_id,
|
||||
'name': 'default',
|
||||
'security_group_rules': [{
|
||||
'id': sg_rule_id_1,
|
||||
'remote_group_id': sg_id,
|
||||
'direction': 'ingress',
|
||||
'remote_ip_prefix': None,
|
||||
'protocol': None,
|
||||
'ethertype': 'IPv4',
|
||||
'port_range_max': -1,
|
||||
'port_range_min': -1,
|
||||
'security_group_id': sg_id},
|
||||
{'id': sg_rule_id_2,
|
||||
'remote_group_id': None,
|
||||
'direction': 'engress',
|
||||
'remote_ip_prefix': None,
|
||||
'protocol': None,
|
||||
'ethertype': 'IPv4',
|
||||
'port_range_max': -1,
|
||||
'port_range_min': -1,
|
||||
'security_group_id': sg_id}]}
|
||||
RES_MAP['top']['security_group'].append(sg)
|
||||
|
||||
for i in xrange(1, 3):
|
||||
pod_dict = {'pod_id': 'pod_id_%d' % i,
|
||||
'region_name': 'pod_%d' % i,
|
||||
'az_name': 'az_name_%d' % i}
|
||||
db_api.create_pod(self.context, pod_dict)
|
||||
|
||||
network = {'id': 'network_%d_id' % i,
|
||||
'tenant_id': project_id}
|
||||
# we create two subnets with identical cidr but different
|
||||
# allocation pools
|
||||
subnet = {'id': 'subnet_%d_id' % i,
|
||||
'network_id': network['id'],
|
||||
'cidr': '10.0.1.0/24',
|
||||
'gateway_ip': '10.0.1.%d' % i,
|
||||
'tenant_id': project_id,
|
||||
'allocation_pools': {'start': '10.0.1.%d' % 10 * i,
|
||||
'end': '10.0.1.%d' % (10 * i + 9)},
|
||||
'ip_version': q_constants.IP_VERSION_4}
|
||||
RES_MAP['top']['network'].append(network)
|
||||
RES_MAP['top']['subnet'].append(subnet)
|
||||
|
||||
region_name = 'pod_%d' % i
|
||||
RES_MAP[region_name]['security_group'].append(sg)
|
||||
route = {'top_id': sg_id, 'bottom_id': sg_id,
|
||||
'pod_id': pod_dict['pod_id'],
|
||||
'resource_type': 'security_group'}
|
||||
with self.context.session.begin():
|
||||
core.create_resource(self.context, models.ResourceRouting,
|
||||
route)
|
||||
|
||||
db_api.new_job(self.context, project_id, constants.JT_SEG_RULE_SETUP,
|
||||
project_id)
|
||||
self.xmanager.configure_security_group_rules(
|
||||
self.context, payload={constants.JT_SEG_RULE_SETUP: project_id})
|
||||
|
||||
calls = [mock.call(self.context, sg_rule_id_1)]
|
||||
mock_delete.assert_has_calls(calls)
|
||||
call_rules_id = [
|
||||
call_arg[0][1] for call_arg in mock_delete.call_args_list]
|
||||
# bottom security group already has sg_rule_id_2, so this rule will
|
||||
# not be deleted
|
||||
self.assertNotIn(sg_rule_id_2, call_rules_id)
|
||||
|
||||
calls = [mock.call(self.context,
|
||||
{'security_group_rules': [
|
||||
{'remote_group_id': None,
|
||||
'direction': 'ingress',
|
||||
'remote_ip_prefix': '10.0.1.0/24',
|
||||
'protocol': None,
|
||||
'ethertype': 'IPv4',
|
||||
'port_range_max': -1,
|
||||
'port_range_min': -1,
|
||||
'security_group_id': sg_id}]})]
|
||||
mock_create.assert_has_calls(calls)
|
||||
|
||||
@patch.object(helper.NetworkHelper, '_get_client', new=fake_get_client)
|
||||
@patch.object(FakeXJobAPI, 'setup_shadow_ports')
|
||||
def test_setup_shadow_ports(self, mock_setup):
|
||||
|
|
|
@ -819,6 +819,7 @@ class XManager(PeriodicTasks):
|
|||
ctx, [{'key': 'tenant_id', 'comparator': 'eq',
|
||||
'value': project_id}])
|
||||
bridge_ip_net = netaddr.IPNetwork(CONF.client.bridge_cidr)
|
||||
subnet_cidr_set = set()
|
||||
for subnet in subnets:
|
||||
ip_net = netaddr.IPNetwork(subnet['cidr'])
|
||||
if ip_net in bridge_ip_net:
|
||||
|
@ -827,6 +828,9 @@ class XManager(PeriodicTasks):
|
|||
# Tricircle has not supported IPv6 well yet,
|
||||
# so we ignore seg rules temporarily.
|
||||
if subnet['ip_version'] == q_constants.IP_VERSION_4:
|
||||
if subnet['cidr'] in subnet_cidr_set:
|
||||
continue
|
||||
subnet_cidr_set.add(subnet['cidr'])
|
||||
new_b_rules.append(
|
||||
self._construct_bottom_rule(t_rule, '',
|
||||
subnet['cidr']))
|
||||
|
|
Loading…
Reference in New Issue