[PRD-1308] network range intersection check NovaNet
1. Check intersection between networks address spaces 2. Check intersection inside Public and Floating address spaces 3. Check intersection of untagged networks on one interface Change-Id: Ib477811259bf856416a1b6378d5ab6713c4719a2
This commit is contained in:
parent
86a0f13dfc
commit
1267d1e3e3
@ -237,6 +237,15 @@ class Cluster(Base):
|
||||
netmanager.assign_admin_ips(
|
||||
node.id, len(node.meta.get('interfaces', [])))
|
||||
|
||||
@property
|
||||
def network_manager(self):
|
||||
if self.net_provider == 'neutron':
|
||||
from nailgun.network.neutron import NeutronManager
|
||||
return NeutronManager
|
||||
else:
|
||||
from nailgun.network.manager import NetworkManager
|
||||
return NetworkManager
|
||||
|
||||
|
||||
class AttributesGenerators(object):
|
||||
@classmethod
|
||||
|
@ -19,7 +19,7 @@ from nailgun.network.manager import NetworkManager
|
||||
from nailgun.network.neutron import NeutronManager
|
||||
|
||||
|
||||
class NovaNetworkConfigurationSerializer(BasicSerializer):
|
||||
class NetworkConfigurationSerializer(BasicSerializer):
|
||||
|
||||
fields = ('id', 'cluster_id', 'name', 'cidr', 'netmask',
|
||||
'gateway', 'vlan_start', 'network_size', 'amount')
|
||||
@ -34,6 +34,9 @@ class NovaNetworkConfigurationSerializer(BasicSerializer):
|
||||
data_dict.setdefault("gateway", "")
|
||||
return data_dict
|
||||
|
||||
|
||||
class NovaNetworkConfigurationSerializer(NetworkConfigurationSerializer):
|
||||
|
||||
@classmethod
|
||||
def serialize_for_cluster(cls, cluster):
|
||||
result = {}
|
||||
@ -67,20 +70,7 @@ class NovaNetworkConfigurationSerializer(BasicSerializer):
|
||||
return result
|
||||
|
||||
|
||||
class NeutronNetworkConfigurationSerializer(BasicSerializer):
|
||||
|
||||
fields = ('id', 'cluster_id', 'name', 'cidr', 'netmask', 'gateway',
|
||||
'vlan_start', 'network_size', 'amount')
|
||||
|
||||
@classmethod
|
||||
def serialize_network_group(cls, instance, fields=None):
|
||||
data_dict = BasicSerializer.serialize(instance, fields=cls.fields)
|
||||
data_dict["ip_ranges"] = [
|
||||
[ir.first, ir.last] for ir in instance.ip_ranges
|
||||
]
|
||||
data_dict.setdefault("netmask", "")
|
||||
data_dict.setdefault("gateway", "")
|
||||
return data_dict
|
||||
class NeutronNetworkConfigurationSerializer(NetworkConfigurationSerializer):
|
||||
|
||||
@classmethod
|
||||
def serialize_for_cluster(cls, cluster):
|
||||
|
531
nailgun/nailgun/network/checker.py
Normal file
531
nailgun/nailgun/network/checker.py
Normal file
@ -0,0 +1,531 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2013 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from itertools import combinations
|
||||
from itertools import product
|
||||
|
||||
import netaddr
|
||||
|
||||
from nailgun.api.models import NetworkGroup
|
||||
from nailgun.api.serializers.network_configuration \
|
||||
import NetworkConfigurationSerializer
|
||||
from nailgun.db import db
|
||||
from nailgun.errors import errors
|
||||
from nailgun.logger import logger
|
||||
from nailgun.task.helpers import TaskHelper
|
||||
|
||||
|
||||
class NetworkCheck(object):
|
||||
|
||||
def __init__(self, task, data):
|
||||
"""Collect Network Groups data
|
||||
"""
|
||||
self.cluster = task.cluster
|
||||
self.task = task
|
||||
self.data = data
|
||||
self.net_man = self.cluster.network_manager()
|
||||
self.net_provider = self.cluster.net_provider
|
||||
admin_ng = self.net_man.get_admin_network_group()
|
||||
fields = NetworkGroup.__mapper__.columns.keys()
|
||||
net = NetworkConfigurationSerializer.serialize_network_group(admin_ng,
|
||||
fields)
|
||||
# change Admin name for UI
|
||||
net.update(name='admin (PXE)')
|
||||
self.networks = [net]
|
||||
for ng in self.cluster.network_groups:
|
||||
net = NetworkConfigurationSerializer.serialize_network_group(
|
||||
ng,
|
||||
fields)
|
||||
self.networks.append(net)
|
||||
# merge with data['networks']
|
||||
if 'networks' in data:
|
||||
for data_net in data['networks']:
|
||||
for net in self.networks:
|
||||
if data_net['id'] == net['id']:
|
||||
net.update(data_net)
|
||||
break
|
||||
else:
|
||||
raise errors.NetworkCheckError(
|
||||
u"Invalid network ID: {0}".format(data_net['id']),
|
||||
add_client=False)
|
||||
|
||||
self.result = []
|
||||
self.err_msgs = []
|
||||
|
||||
def expose_error_messages(self):
|
||||
TaskHelper.expose_network_check_error_messages(
|
||||
self.task,
|
||||
self.result,
|
||||
self.err_msgs)
|
||||
|
||||
def check_untagged_intersection(self):
|
||||
# check if there are untagged networks on the same interface
|
||||
untagged_nets = set([n['name'] for n in self.networks
|
||||
if n['vlan_start'] is None])
|
||||
# check only if we have 2 or more untagged networks
|
||||
pub_flt = set(['public', 'floating'])
|
||||
if len(untagged_nets) >= 2 and untagged_nets != pub_flt:
|
||||
logger.info(
|
||||
"Untagged networks found, "
|
||||
"checking intersection between them...")
|
||||
interfaces = []
|
||||
for node in self.cluster.nodes:
|
||||
for iface in node.interfaces:
|
||||
interfaces.append(iface)
|
||||
found_intersection = []
|
||||
|
||||
for iface in interfaces:
|
||||
# network name is changed for Admin on UI
|
||||
nets = [[ng['name'] for ng in self.networks
|
||||
if n.id == ng['id']][0]
|
||||
for n in iface.assigned_networks]
|
||||
crossed_nets = set(nets) & untagged_nets
|
||||
if len(crossed_nets) > 1 and crossed_nets != pub_flt:
|
||||
err_net_names = ['"{0}"'.format(i)
|
||||
for i in crossed_nets]
|
||||
found_intersection.append(
|
||||
[iface.node.name, err_net_names])
|
||||
|
||||
if found_intersection:
|
||||
nodes_with_errors = [
|
||||
u'Node "{0}": {1}'.format(
|
||||
int_node,
|
||||
", ".join(int_nets)
|
||||
) for int_node, int_nets in found_intersection]
|
||||
self.err_msgs.append(
|
||||
u"Some untagged networks are assigned to the same "
|
||||
u"physical interface. You should assign them to "
|
||||
u"different physical interfaces:\n{0}".format(
|
||||
"\n".join(nodes_with_errors)))
|
||||
self.result.append({"id": [],
|
||||
"range_errors": [],
|
||||
"errors": ["untagged"]})
|
||||
self.expose_error_messages()
|
||||
|
||||
def check_net_addr_spaces_intersection(self, pub_cidr):
|
||||
# check intersection of networks address spaces
|
||||
# for all networks
|
||||
def addr_space(ng, ng_pair):
|
||||
if ng['name'] == 'floating':
|
||||
return [netaddr.IPRange(v[0], v[1])
|
||||
for v in ng['ip_ranges']]
|
||||
elif ng['name'] == 'public':
|
||||
if ng_pair['name'] == 'floating':
|
||||
return [netaddr.IPRange(v[0], v[1])
|
||||
for v in ng['ip_ranges']]
|
||||
else:
|
||||
return [pub_cidr]
|
||||
else:
|
||||
return [netaddr.IPNetwork(ng['cidr'])]
|
||||
|
||||
for ngs in combinations(self.networks, 2):
|
||||
for addrs in product(addr_space(ngs[0], ngs[1]),
|
||||
addr_space(ngs[1], ngs[0])):
|
||||
if self.net_man.is_range_intersection(addrs[0], addrs[1]):
|
||||
self.err_msgs.append(
|
||||
u"Address space intersection between "
|
||||
"networks: {0}.".format(
|
||||
", ".join([ngs[0]['name'], ngs[1]['name']])
|
||||
)
|
||||
)
|
||||
self.result.append({
|
||||
"id": [int(ngs[0]["id"]), int(ngs[1]["id"])],
|
||||
"range_errors": [str(addrs[0]), str(addrs[1])],
|
||||
"errors": ["cidr"]
|
||||
})
|
||||
self.expose_error_messages()
|
||||
|
||||
def check_public_floating_ranges_intersection(self):
|
||||
# 1. Check intersection of networks address spaces inside
|
||||
# Public and Floating network
|
||||
# 2. Check that Public Gateway is in Public CIDR
|
||||
# 3. Check that Public IP ranges are in Public CIDR
|
||||
ng = [ng for ng in self.networks
|
||||
if ng['name'] == 'public'][0]
|
||||
pub_gw = netaddr.IPAddress(ng['gateway'])
|
||||
try:
|
||||
pub_cidr = netaddr.IPNetwork(
|
||||
ng['ip_ranges'][0][0] + '/' + ng['netmask'])
|
||||
except (netaddr.AddrFormatError, KeyError):
|
||||
self.err_msgs.append(
|
||||
u"Invalid netmask for public network")
|
||||
self.result.append({"id": int(ng["id"]),
|
||||
"range_errors": [],
|
||||
"errors": ["netmask"]})
|
||||
self.expose_error_messages()
|
||||
# Check that Public Gateway is in Public CIDR
|
||||
if pub_gw not in pub_cidr:
|
||||
self.err_msgs.append(
|
||||
u"Public gateway is not in public CIDR."
|
||||
)
|
||||
self.result.append({"id": int(ng["id"]),
|
||||
"range_errors": [],
|
||||
"errors": ["gateway"]})
|
||||
# Check intersection of networks address spaces inside
|
||||
# Public and Floating network
|
||||
for ng in self.networks:
|
||||
if ng['name'] in ['public', 'floating']:
|
||||
nets = [netaddr.IPRange(v[0], v[1])
|
||||
for v in ng['ip_ranges']]
|
||||
for npair in combinations(nets, 2):
|
||||
if self.net_man.is_range_intersection(npair[0], npair[1]):
|
||||
self.err_msgs.append(
|
||||
u"Address space intersection between ranges "
|
||||
"of {0} network.".format(ng['name'])
|
||||
)
|
||||
self.result.append({"id": int(ng["id"]),
|
||||
"range_errors": [],
|
||||
"errors": ["range"]})
|
||||
if pub_gw in npair[0] or pub_gw in npair[1]:
|
||||
self.err_msgs.append(
|
||||
u"Address intersection between "
|
||||
u"public gateway and IP range "
|
||||
u"of {0} network.".format(ng['name'])
|
||||
)
|
||||
self.result.append({"id": int(ng["id"]),
|
||||
"range_errors": [],
|
||||
"errors": ["gateway"]})
|
||||
# Check that Public IP ranges are in Public CIDR
|
||||
if ng['name'] == 'public':
|
||||
for net in nets:
|
||||
if net not in pub_cidr:
|
||||
self.err_msgs.append(
|
||||
u"Public ranges are not in one CIDR."
|
||||
)
|
||||
self.result.append({"id": int(ng["id"]),
|
||||
"range_errors": [],
|
||||
"errors": ["range"]})
|
||||
self.expose_error_messages()
|
||||
return pub_cidr
|
||||
|
||||
def neutron_check_config(self):
|
||||
|
||||
# check: networks VLAN IDs should not be in
|
||||
# Neutron L2 private VLAN ID range (VLAN segmentation only)
|
||||
tagged_nets = dict((n["name"], n["vlan_start"]) for n in filter(
|
||||
lambda n: (n["vlan_start"] is not None), self.networks))
|
||||
|
||||
if tagged_nets:
|
||||
if self.cluster.net_segment_type == 'vlan':
|
||||
if 'neutron_parameters' in self.data:
|
||||
l2cfg = self.data['neutron_parameters']['L2']
|
||||
else:
|
||||
l2cfg = self.cluster.neutron_config.L2
|
||||
for net, net_conf in l2cfg['phys_nets'].iteritems():
|
||||
vrange = net_conf['vlan_range']
|
||||
if vrange:
|
||||
break
|
||||
else:
|
||||
err_msg = u"Wrong VLAN range.\n"
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
net_intersect = [name for name, vlan in tagged_nets.iteritems()
|
||||
if vrange[0] <= vlan <= vrange[1]]
|
||||
if net_intersect:
|
||||
nets_with_errors = ", ". \
|
||||
join(net_intersect)
|
||||
err_msg = u"Networks VLAN tags are in " \
|
||||
"ID range defined for Neutron L2. " \
|
||||
"You should assign VLAN tags that are " \
|
||||
"not in Neutron L2 VLAN ID range:\n{0}". \
|
||||
format(nets_with_errors)
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
# check: networks VLAN IDs should not intersect
|
||||
net_intersect = [name for name, vlan in tagged_nets.iteritems()
|
||||
if tagged_nets.values().count(vlan) >= 2]
|
||||
if net_intersect:
|
||||
nets_with_errors = ", ". \
|
||||
join(net_intersect)
|
||||
err_msg = u"Some networks use the same VLAN tags. " \
|
||||
"You should assign different VLAN tag " \
|
||||
"to every network:\n{0}". \
|
||||
format(nets_with_errors)
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
# check intersection of address ranges
|
||||
# between admin networks and all other networks
|
||||
admin_ng = self.net_man.get_admin_network_group()
|
||||
admin_range = netaddr.IPNetwork(admin_ng.cidr)
|
||||
for ng in self.networks[1:]:
|
||||
net_errors = []
|
||||
sub_ranges = []
|
||||
ng_db = db().query(NetworkGroup).get(ng['id'])
|
||||
if not ng_db:
|
||||
net_errors.append("id")
|
||||
self.err_msgs.append(
|
||||
"Invalid network ID: {0}".format(ng['id']))
|
||||
else:
|
||||
if ng.get('cidr'):
|
||||
fnet = netaddr.IPNetwork(ng['cidr'])
|
||||
if self.net_man.is_range_intersection(fnet, admin_range):
|
||||
net_errors.append("cidr")
|
||||
self.err_msgs.append(
|
||||
u"Intersection with admin "
|
||||
"network(s) '{0}' found".format(
|
||||
admin_ng.cidr
|
||||
)
|
||||
)
|
||||
# ng['amount'] is always equal 1 for Neutron
|
||||
if fnet.size < ng['network_size']: # * ng['amount']:
|
||||
net_errors.append("cidr")
|
||||
self.err_msgs.append(
|
||||
u"CIDR size for network '{0}' "
|
||||
"is less than required".format(
|
||||
ng.get('name') or ng_db.name or ng_db.id
|
||||
)
|
||||
)
|
||||
# Check for intersection with Admin network
|
||||
if 'ip_ranges' in ng:
|
||||
for k, v in enumerate(ng['ip_ranges']):
|
||||
ip_range = netaddr.IPRange(v[0], v[1])
|
||||
if self.net_man.is_range_intersection(admin_range,
|
||||
ip_range):
|
||||
net_errors.append("cidr")
|
||||
self.err_msgs.append(
|
||||
u"IP range {0} - {1} in {2} network intersects"
|
||||
" with admin range of {3}".format(
|
||||
v[0], v[1],
|
||||
ng.get('name') or ng_db.name or ng_db.id,
|
||||
admin_ng.cidr
|
||||
)
|
||||
)
|
||||
sub_ranges.append(k)
|
||||
if net_errors:
|
||||
self.result.append({
|
||||
"id": int(ng["id"]),
|
||||
"range_errors": sub_ranges,
|
||||
"errors": net_errors
|
||||
})
|
||||
self.expose_error_messages()
|
||||
|
||||
# check intersection of address ranges
|
||||
# between networks except admin network
|
||||
ng_names = dict((ng['id'], ng['name']) for ng in self.networks)
|
||||
ngs = list(self.networks)
|
||||
for ng1 in self.networks:
|
||||
net_errors = []
|
||||
ngs.remove(ng1)
|
||||
for ng2 in ngs:
|
||||
if ng1.get('cidr') and ng2.get('cidr'):
|
||||
cidr1 = netaddr.IPNetwork(ng1['cidr'])
|
||||
cidr2 = netaddr.IPNetwork(ng2['cidr'])
|
||||
if self.net_man.is_cidr_intersection(cidr1, cidr2):
|
||||
net_errors.append("cidr")
|
||||
self.err_msgs.append(
|
||||
u"Intersection between network address "
|
||||
"spaces found:\n{0}".format(
|
||||
", ".join([ng_names[ng1['id']],
|
||||
ng_names[ng2['id']]])
|
||||
)
|
||||
)
|
||||
if net_errors:
|
||||
self.result.append({
|
||||
"id": int(ng1["id"]),
|
||||
"errors": net_errors
|
||||
})
|
||||
self.expose_error_messages()
|
||||
|
||||
# check Public gateway, Floating Start and Stop IPs
|
||||
# belong to Public CIDR
|
||||
if 'neutron_parameters' in self.data:
|
||||
pre_net = self.data['neutron_parameters']['predefined_networks']
|
||||
else:
|
||||
pre_net = self.cluster.neutron_config.predefined_networks
|
||||
public = [n for n in self.networks if n['name'] == 'public'][0]
|
||||
net_errors = []
|
||||
fl_range = pre_net['net04_ext']['L3']['floating']
|
||||
if public.get('cidr') and public.get('gateway'):
|
||||
cidr = netaddr.IPNetwork(public['cidr'])
|
||||
if netaddr.IPAddress(public['gateway']) not in cidr:
|
||||
net_errors.append("gateway")
|
||||
self.err_msgs.append(
|
||||
u"Public gateway {0} is not in Public "
|
||||
"address space {1}.".format(
|
||||
public['gateway'], public['cidr']
|
||||
)
|
||||
)
|
||||
if netaddr.IPRange(fl_range[0], fl_range[1]) not in cidr:
|
||||
net_errors.append("float_range")
|
||||
self.err_msgs.append(
|
||||
u"Floating address range {0}:{1} is not in Public "
|
||||
"address space {2}.".format(
|
||||
netaddr.IPAddress(fl_range[0]),
|
||||
netaddr.IPAddress(fl_range[1]),
|
||||
public['cidr']
|
||||
)
|
||||
)
|
||||
else:
|
||||
net_errors.append("format")
|
||||
self.err_msgs.append(
|
||||
u"Public gateway or CIDR specification is invalid."
|
||||
)
|
||||
self.result = {"id": int(public["id"]), "errors": net_errors}
|
||||
self.expose_error_messages()
|
||||
|
||||
# check internal Gateway is in Internal CIDR
|
||||
internal = pre_net['net04']['L3']
|
||||
if internal.get('cidr') and internal.get('gateway'):
|
||||
cidr = netaddr.IPNetwork(internal['cidr'])
|
||||
if netaddr.IPAddress(internal['gateway']) not in cidr:
|
||||
net_errors.append("gateway")
|
||||
self.err_msgs.append(
|
||||
u"Internal gateway {0} is not in Internal "
|
||||
"address space {1}.".format(
|
||||
internal['gateway'], internal['cidr']
|
||||
)
|
||||
)
|
||||
if self.net_man.is_range_intersection(
|
||||
netaddr.IPRange(fl_range[0], fl_range[1]),
|
||||
cidr):
|
||||
net_errors.append("cidr")
|
||||
self.err_msgs.append(
|
||||
u"Intersection between Internal CIDR and Floating range."
|
||||
)
|
||||
else:
|
||||
net_errors.append("format")
|
||||
self.err_msgs.append(
|
||||
u"Internal gateway or CIDR specification is invalid."
|
||||
)
|
||||
self.result = {"name": "internal", "errors": net_errors}
|
||||
self.expose_error_messages()
|
||||
|
||||
def neutron_check_interface_mapping(self):
|
||||
|
||||
# check if there any networks
|
||||
# on the same interface as admin network (main)
|
||||
admin_interfaces = map(lambda node: node.admin_interface,
|
||||
self.cluster.nodes)
|
||||
found_intersection = []
|
||||
|
||||
all_roles = set([n["id"] for n in self.networks
|
||||
if n != self.networks[0]])
|
||||
for iface in admin_interfaces:
|
||||
nets = dict(
|
||||
(n.id, n.name)
|
||||
for n in iface.assigned_networks)
|
||||
|
||||
err_nets = set(nets.keys()) & all_roles
|
||||
if err_nets:
|
||||
err_net_names = [
|
||||
'"{0}"'.format(nets[i]) for i in err_nets]
|
||||
found_intersection.append(
|
||||
[iface.node.name, err_net_names])
|
||||
|
||||
if found_intersection:
|
||||
nodes_with_errors = [
|
||||
u'Node "{0}": {1}'.format(
|
||||
name,
|
||||
", ".join(_networks)
|
||||
) for name, _networks in found_intersection]
|
||||
err_msg = u"Some networks are " \
|
||||
"assigned to the same physical interface as " \
|
||||
"admin (PXE) network. You should move them to " \
|
||||
"another physical interfaces:\n{0}". \
|
||||
format("\n".join(nodes_with_errors))
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
# check if there any networks
|
||||
# on the same interface as private network (for vlan)
|
||||
if self.cluster.net_segment_type == 'vlan':
|
||||
private_interfaces = []
|
||||
# there should be shorter method to do this !
|
||||
for node in self.cluster.nodes:
|
||||
for iface in node.interfaces:
|
||||
for anet in iface.assigned_networks:
|
||||
if anet.name == 'private':
|
||||
private_interfaces.append(iface)
|
||||
found_intersection = []
|
||||
|
||||
all_roles = set(n["id"] for n in self.networks
|
||||
if n["name"] != 'private')
|
||||
for iface in private_interfaces:
|
||||
nets = dict(
|
||||
(n.id, n.name)
|
||||
for n in iface.assigned_networks)
|
||||
|
||||
err_nets = set(nets.keys()) & all_roles
|
||||
if err_nets:
|
||||
err_net_names = [
|
||||
'"{0}"'.format(nets[i]) for i in err_nets]
|
||||
found_intersection.append(
|
||||
[iface.node.name, err_net_names])
|
||||
|
||||
if found_intersection:
|
||||
nodes_with_errors = [
|
||||
u'Node "{0}": {1}'.format(
|
||||
name,
|
||||
", ".join(_networks)
|
||||
) for name, _networks in found_intersection]
|
||||
err_msg = u"Some networks are " \
|
||||
"assigned to the same physical interface as " \
|
||||
"private network. You should move them to " \
|
||||
"another physical interfaces:\n{0}". \
|
||||
format("\n".join(nodes_with_errors))
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
# check untagged networks intersection
|
||||
untagged_nets = set(
|
||||
n["id"] for n in filter(
|
||||
lambda n: (n["vlan_start"] is None), self.networks))
|
||||
if untagged_nets:
|
||||
logger.info(
|
||||
"Untagged networks found, "
|
||||
"checking intersection between them...")
|
||||
interfaces = []
|
||||
for node in self.cluster.nodes:
|
||||
for iface in node.interfaces:
|
||||
interfaces.append(iface)
|
||||
found_intersection = []
|
||||
|
||||
for iface in interfaces:
|
||||
nets = dict(
|
||||
(n.id, n.name)
|
||||
for n in iface.assigned_networks)
|
||||
|
||||
crossed_nets = set(nets.keys()) & untagged_nets
|
||||
if len(crossed_nets) > 1:
|
||||
err_net_names = [
|
||||
'"{0}"'.format(nets[i]) for i in crossed_nets]
|
||||
found_intersection.append(
|
||||
[iface.node.name, err_net_names])
|
||||
|
||||
if found_intersection:
|
||||
nodes_with_errors = [
|
||||
u'Node "{0}": {1}'.format(
|
||||
name,
|
||||
", ".join(_networks)
|
||||
) for name, _networks in found_intersection]
|
||||
err_msg = u"Some untagged networks are " \
|
||||
"assigned to the same physical interface. " \
|
||||
"You should assign them to " \
|
||||
"different physical interfaces:\n{0}". \
|
||||
format("\n".join(nodes_with_errors))
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
def check_configuration(self):
|
||||
if self.net_provider == 'neutron':
|
||||
self.neutron_check_config()
|
||||
else:
|
||||
pub_cidr = self.check_public_floating_ranges_intersection()
|
||||
self.check_net_addr_spaces_intersection(pub_cidr)
|
||||
|
||||
def check_interface_mapping(self):
|
||||
if self.net_provider == 'neutron':
|
||||
self.neutron_check_interface_mapping()
|
||||
else:
|
||||
self.check_untagged_intersection()
|
@ -20,6 +20,7 @@ import shutil
|
||||
from nailgun.api.models import IPAddr
|
||||
from nailgun.api.models import Task
|
||||
from nailgun.db import db
|
||||
from nailgun.errors import errors
|
||||
from nailgun.logger import logger
|
||||
from nailgun.network.manager import NetworkManager
|
||||
from nailgun.settings import settings
|
||||
@ -300,3 +301,12 @@ class TaskHelper(object):
|
||||
status="error",
|
||||
progress=100,
|
||||
msg=str(message))
|
||||
|
||||
@staticmethod
|
||||
def expose_network_check_error_messages(task, result, err_messages):
|
||||
if err_messages:
|
||||
task.result = result
|
||||
db().add(task)
|
||||
db().commit()
|
||||
full_err_msg = u"\n".join(err_messages)
|
||||
raise errors.NetworkCheckError(full_err_msg, add_client=False)
|
||||
|
@ -18,6 +18,7 @@ import shlex
|
||||
import subprocess
|
||||
|
||||
import netaddr
|
||||
|
||||
from sqlalchemy import func
|
||||
from sqlalchemy.orm import ColumnProperty
|
||||
from sqlalchemy.orm import joinedload
|
||||
@ -25,15 +26,13 @@ from sqlalchemy.orm import object_mapper
|
||||
|
||||
from nailgun.api.models import CapacityLog
|
||||
from nailgun.api.models import Cluster
|
||||
from nailgun.api.models import NetworkGroup
|
||||
from nailgun.api.models import Node
|
||||
from nailgun.api.models import RedHatAccount
|
||||
from nailgun.api.models import Release
|
||||
from nailgun.db import db
|
||||
from nailgun.errors import errors
|
||||
from nailgun.logger import logger
|
||||
from nailgun.network.manager import NetworkManager
|
||||
from nailgun.network.neutron import NeutronManager
|
||||
from nailgun.network.checker import NetworkCheck
|
||||
from nailgun.orchestrator import deployment_serializers
|
||||
from nailgun.orchestrator import provisioning_serializers
|
||||
import nailgun.rpc as rpc
|
||||
@ -401,466 +400,11 @@ class CheckNetworksTask(object):
|
||||
|
||||
@classmethod
|
||||
def execute(cls, task, data, check_admin_untagged=False):
|
||||
if task.cluster.net_provider == 'neutron':
|
||||
cls.neutron_check_config(task, data)
|
||||
if check_admin_untagged:
|
||||
cls.neutron_check_interface_mapping(task, data)
|
||||
elif task.cluster.net_provider == 'nova_network':
|
||||
cls.nova_net_check(task, data, check_admin_untagged)
|
||||
|
||||
@classmethod
|
||||
def nova_net_check(cls, task, data, check_admin_untagged):
|
||||
# If not set in data then fetch from db
|
||||
if 'net_manager' in data:
|
||||
netmanager = data['net_manager']
|
||||
else:
|
||||
netmanager = task.cluster.net_manager
|
||||
|
||||
if 'networks' in data:
|
||||
networks = data['networks']
|
||||
else:
|
||||
networks = map(lambda x: x.__dict__, task.cluster.network_groups)
|
||||
|
||||
result = []
|
||||
err_msgs = []
|
||||
|
||||
# checking if there are untagged
|
||||
# networks on the same interface
|
||||
# (main) as admin network
|
||||
checker = NetworkCheck(task, data)
|
||||
checker.check_configuration()
|
||||
if check_admin_untagged:
|
||||
untagged_nets = set(
|
||||
n["id"] for n in filter(
|
||||
lambda n: (n["vlan_start"] is None), networks))
|
||||
if untagged_nets:
|
||||
logger.info(
|
||||
"Untagged networks found, "
|
||||
"checking admin network intersection...")
|
||||
admin_interfaces = map(lambda node: node.admin_interface,
|
||||
task.cluster.nodes)
|
||||
found_intersection = []
|
||||
|
||||
for iface in admin_interfaces:
|
||||
nets = dict(
|
||||
(n.id, n.name)
|
||||
for n in iface.assigned_networks)
|
||||
|
||||
err_nets = set(nets.keys()) & untagged_nets
|
||||
if err_nets:
|
||||
err_net_names = [
|
||||
'"{0}"'.format(nets[i]) for i in err_nets]
|
||||
found_intersection.append(
|
||||
[iface.node.name, err_net_names])
|
||||
|
||||
if found_intersection:
|
||||
nodes_with_errors = [
|
||||
u'Node "{0}": {1}'.format(
|
||||
name,
|
||||
", ".join(_networks)
|
||||
) for name, _networks in found_intersection]
|
||||
err_msg = u"Some untagged networks are " \
|
||||
"assigned to the same physical interface as " \
|
||||
"admin (PXE) network. You can whether turn " \
|
||||
"on tagging for these OpenStack " \
|
||||
"networks or move them to another physical " \
|
||||
"interface:\n{0}".format("\n".join(
|
||||
nodes_with_errors))
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
net_man = NetworkManager()
|
||||
admin_ng = net_man.get_admin_network_group()
|
||||
admin_range = netaddr.IPNetwork(admin_ng.cidr)
|
||||
for ng in networks:
|
||||
net_errors = []
|
||||
sub_ranges = []
|
||||
ng_db = db().query(NetworkGroup).get(ng['id'])
|
||||
if not ng_db:
|
||||
net_errors.append("id")
|
||||
err_msgs.append(u"Invalid network ID: {0}".format(ng['id']))
|
||||
else:
|
||||
if ng.get('cidr'):
|
||||
fnet = netaddr.IPNetwork(ng['cidr'])
|
||||
if net_man.is_range_intersection(fnet, admin_range):
|
||||
net_errors.append("cidr")
|
||||
err_msgs.append(
|
||||
u"Intersection with admin "
|
||||
"network(s) '{0}' found".format(
|
||||
admin_ng.cidr
|
||||
)
|
||||
)
|
||||
if fnet.size < ng['network_size'] * ng['amount']:
|
||||
net_errors.append("cidr")
|
||||
err_msgs.append(
|
||||
u"CIDR size for network '{0}' "
|
||||
"is less than required".format(
|
||||
ng.get('name') or ng_db.name or ng_db.id
|
||||
)
|
||||
)
|
||||
# Check for intersection with Admin network
|
||||
if 'ip_ranges' in ng:
|
||||
for k, v in enumerate(ng['ip_ranges']):
|
||||
ip_range = netaddr.IPRange(v[0], v[1])
|
||||
if net_man.is_range_intersection(admin_range,
|
||||
ip_range):
|
||||
net_errors.append("cidr")
|
||||
err_msgs.append(
|
||||
u"IP range {0} - {1} in {2} network intersects"
|
||||
" with admin range of {3}".format(
|
||||
v[0], v[1],
|
||||
ng.get('name') or ng_db.name or ng_db.id,
|
||||
admin_ng.cidr
|
||||
)
|
||||
)
|
||||
sub_ranges.append(k)
|
||||
|
||||
if ng.get('amount') > 1 and netmanager == 'FlatDHCPManager':
|
||||
net_errors.append("amount")
|
||||
err_msgs.append(
|
||||
u"Network amount for '{0}' is more than 1 "
|
||||
"while using FlatDHCP manager.".format(
|
||||
ng.get('name') or ng_db.name or ng_db.id
|
||||
)
|
||||
)
|
||||
if net_errors:
|
||||
result.append({
|
||||
"id": int(ng["id"]),
|
||||
"range_errors": sub_ranges,
|
||||
"errors": net_errors
|
||||
})
|
||||
if err_msgs:
|
||||
task.result = result
|
||||
db().add(task)
|
||||
db().commit()
|
||||
full_err_msg = "\n".join(err_msgs)
|
||||
raise errors.NetworkCheckError(full_err_msg, add_client=False)
|
||||
|
||||
@classmethod
|
||||
def neutron_check_config(cls, task, data):
|
||||
|
||||
if 'networks' in data:
|
||||
networks = data['networks']
|
||||
else:
|
||||
networks = map(lambda x: x.__dict__, task.cluster.network_groups)
|
||||
|
||||
result = []
|
||||
|
||||
# check: networks VLAN IDs should not be in
|
||||
# Neutron L2 private VLAN ID range (VLAN segmentation only)
|
||||
tagged_nets = dict((n["name"], n["vlan_start"]) for n in filter(
|
||||
lambda n: (n["vlan_start"] is not None), networks))
|
||||
|
||||
if tagged_nets:
|
||||
if task.cluster.net_segment_type == 'vlan':
|
||||
if 'neutron_parameters' in data:
|
||||
l2cfg = data['neutron_parameters']['L2']
|
||||
else:
|
||||
l2cfg = task.cluster.neutron_config.L2
|
||||
for net, net_conf in l2cfg['phys_nets'].iteritems():
|
||||
vrange = net_conf['vlan_range']
|
||||
if vrange:
|
||||
break
|
||||
else:
|
||||
err_msg = u"Wrong VLAN range.\n"
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
net_intersect = [name for name, vlan in tagged_nets.iteritems()
|
||||
if vrange[0] <= vlan <= vrange[1]]
|
||||
if net_intersect:
|
||||
nets_with_errors = ", ".\
|
||||
join(net_intersect)
|
||||
err_msg = u"Networks VLAN tags are in " \
|
||||
"ID range defined for Neutron L2. " \
|
||||
"You should assign VLAN tags that are " \
|
||||
"not in Neutron L2 VLAN ID range:\n{0}". \
|
||||
format(nets_with_errors)
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
# check: networks VLAN IDs should not intersect
|
||||
net_intersect = [name for name, vlan in tagged_nets.iteritems()
|
||||
if tagged_nets.values().count(vlan) >= 2]
|
||||
if net_intersect:
|
||||
nets_with_errors = ", ". \
|
||||
join(net_intersect)
|
||||
err_msg = u"Some networks use the same VLAN tags. " \
|
||||
"You should assign different VLAN tag " \
|
||||
"to every network:\n{0}". \
|
||||
format(nets_with_errors)
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
def expose_error_messages():
|
||||
if err_msgs:
|
||||
task.result = result
|
||||
db().add(task)
|
||||
db().commit()
|
||||
full_err_msg = "\n".join(err_msgs)
|
||||
raise errors.NetworkCheckError(full_err_msg, add_client=False)
|
||||
|
||||
# check intersection of address ranges
|
||||
# between admin networks and all other networks
|
||||
net_man = NeutronManager()
|
||||
admin_ng = net_man.get_admin_network_group()
|
||||
admin_range = netaddr.IPNetwork(admin_ng.cidr)
|
||||
err_msgs = []
|
||||
for ng in networks:
|
||||
net_errors = []
|
||||
sub_ranges = []
|
||||
ng_db = db().query(NetworkGroup).get(ng['id'])
|
||||
if not ng_db:
|
||||
net_errors.append("id")
|
||||
err_msgs.append("Invalid network ID: {0}".format(ng['id']))
|
||||
else:
|
||||
if ng.get('cidr'):
|
||||
fnet = netaddr.IPNetwork(ng['cidr'])
|
||||
if net_man.is_range_intersection(fnet, admin_range):
|
||||
net_errors.append("cidr")
|
||||
err_msgs.append(
|
||||
u"Intersection with admin "
|
||||
"network(s) '{0}' found".format(
|
||||
admin_ng.cidr
|
||||
)
|
||||
)
|
||||
# ng['amount'] is always equal 1 for Neutron
|
||||
if fnet.size < ng['network_size']: # * ng['amount']:
|
||||
net_errors.append("cidr")
|
||||
err_msgs.append(
|
||||
u"CIDR size for network '{0}' "
|
||||
"is less than required".format(
|
||||
ng.get('name') or ng_db.name or ng_db.id
|
||||
)
|
||||
)
|
||||
# Check for intersection with Admin network
|
||||
if 'ip_ranges' in ng:
|
||||
for k, v in enumerate(ng['ip_ranges']):
|
||||
ip_range = netaddr.IPRange(v[0], v[1])
|
||||
if net_man.is_range_intersection(admin_range,
|
||||
ip_range):
|
||||
net_errors.append("cidr")
|
||||
err_msgs.append(
|
||||
u"IP range {0} - {1} in {2} network intersects"
|
||||
" with admin range of {3}".format(
|
||||
v[0], v[1],
|
||||
ng.get('name') or ng_db.name or ng_db.id,
|
||||
admin_ng.cidr
|
||||
)
|
||||
)
|
||||
sub_ranges.append(k)
|
||||
if net_errors:
|
||||
result.append({
|
||||
"id": int(ng["id"]),
|
||||
"range_errors": sub_ranges,
|
||||
"errors": net_errors
|
||||
})
|
||||
expose_error_messages()
|
||||
|
||||
# check intersection of address ranges
|
||||
# between networks except admin network
|
||||
ng_names = dict((ng['id'], (ng.get('name')) or
|
||||
db().query(NetworkGroup).get(ng['id']).name)
|
||||
for ng in networks)
|
||||
ngs = list(networks)
|
||||
for ng1 in networks:
|
||||
net_errors = []
|
||||
ngs.remove(ng1)
|
||||
for ng2 in ngs:
|
||||
if ng1.get('cidr') and ng2.get('cidr'):
|
||||
cidr1 = netaddr.IPNetwork(ng1['cidr'])
|
||||
cidr2 = netaddr.IPNetwork(ng2['cidr'])
|
||||
if net_man.is_cidr_intersection(cidr1, cidr2):
|
||||
net_errors.append("cidr")
|
||||
err_msgs.append(
|
||||
u"Intersection between network address "
|
||||
"spaces found:\n{0}".format(
|
||||
", ".join([ng_names[ng1['id']],
|
||||
ng_names[ng2['id']]])
|
||||
)
|
||||
)
|
||||
if net_errors:
|
||||
result.append({
|
||||
"id": int(ng1["id"]),
|
||||
"errors": net_errors
|
||||
})
|
||||
expose_error_messages()
|
||||
|
||||
# check Public gateway, Floating Start and Stop IPs
|
||||
# belong to Public CIDR
|
||||
if 'neutron_parameters' in data:
|
||||
pre_net = data['neutron_parameters']['predefined_networks']
|
||||
else:
|
||||
pre_net = task.cluster.neutron_config.predefined_networks
|
||||
public = [n for n in networks if n['name'] == 'public'][0]
|
||||
net_errors = []
|
||||
fl_range = pre_net['net04_ext']['L3']['floating']
|
||||
if public.get('cidr') and public.get('gateway'):
|
||||
cidr = netaddr.IPNetwork(public['cidr'])
|
||||
if netaddr.IPAddress(public['gateway']) not in cidr:
|
||||
net_errors.append("gateway")
|
||||
err_msgs.append(
|
||||
u"Public gateway {0} is not in Public "
|
||||
"address space {1}.".format(
|
||||
public['gateway'], public['cidr']
|
||||
)
|
||||
)
|
||||
if netaddr.IPRange(fl_range[0], fl_range[1]) not in cidr:
|
||||
net_errors.append("float_range")
|
||||
err_msgs.append(
|
||||
u"Floating address range {0}:{1} is not in Public "
|
||||
"address space {2}.".format(
|
||||
netaddr.IPAddress(fl_range[0]),
|
||||
netaddr.IPAddress(fl_range[1]),
|
||||
public['cidr']
|
||||
)
|
||||
)
|
||||
else:
|
||||
net_errors.append("format")
|
||||
err_msgs.append(
|
||||
u"Public gateway or CIDR specification is invalid."
|
||||
)
|
||||
result = {"id": int(public["id"]), "errors": net_errors}
|
||||
expose_error_messages()
|
||||
|
||||
# check internal Gateway is in Internal CIDR
|
||||
internal = pre_net['net04']['L3']
|
||||
if internal.get('cidr') and internal.get('gateway'):
|
||||
cidr = netaddr.IPNetwork(internal['cidr'])
|
||||
if netaddr.IPAddress(internal['gateway']) not in cidr:
|
||||
net_errors.append("gateway")
|
||||
err_msgs.append(
|
||||
u"Internal gateway {0} is not in Internal "
|
||||
"address space {1}.".format(
|
||||
internal['gateway'], internal['cidr']
|
||||
)
|
||||
)
|
||||
if net_man.is_range_intersection(
|
||||
netaddr.IPRange(fl_range[0], fl_range[1]),
|
||||
cidr):
|
||||
net_errors.append("cidr")
|
||||
err_msgs.append(
|
||||
u"Intersection between Internal CIDR and Floating range."
|
||||
)
|
||||
else:
|
||||
net_errors.append("format")
|
||||
err_msgs.append(
|
||||
u"Internal gateway or CIDR specification is invalid."
|
||||
)
|
||||
result = {"name": "internal", "errors": net_errors}
|
||||
expose_error_messages()
|
||||
|
||||
@classmethod
|
||||
def neutron_check_interface_mapping(cls, task, data):
|
||||
|
||||
if 'networks' in data:
|
||||
networks = data['networks']
|
||||
else:
|
||||
networks = map(lambda x: x.__dict__, task.cluster.network_groups)
|
||||
|
||||
# check if there any networks
|
||||
# on the same interface as admin network (main)
|
||||
admin_interfaces = map(lambda node: node.admin_interface,
|
||||
task.cluster.nodes)
|
||||
found_intersection = []
|
||||
|
||||
all_roles = set(n["id"] for n in networks)
|
||||
for iface in admin_interfaces:
|
||||
nets = dict(
|
||||
(n.id, n.name)
|
||||
for n in iface.assigned_networks)
|
||||
|
||||
err_nets = set(nets.keys()) & all_roles
|
||||
if err_nets:
|
||||
err_net_names = [
|
||||
'"{0}"'.format(nets[i]) for i in err_nets]
|
||||
found_intersection.append(
|
||||
[iface.node.name, err_net_names])
|
||||
|
||||
if found_intersection:
|
||||
nodes_with_errors = [
|
||||
u'Node "{0}": {1}'.format(
|
||||
name,
|
||||
", ".join(_networks)
|
||||
) for name, _networks in found_intersection]
|
||||
err_msg = u"Some networks are " \
|
||||
"assigned to the same physical interface as " \
|
||||
"admin (PXE) network. You should move them to " \
|
||||
"another physical interfaces:\n{0}". \
|
||||
format("\n".join(nodes_with_errors))
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
# check if there any networks
|
||||
# on the same interface as private network (for vlan)
|
||||
if task.cluster.net_segment_type == 'vlan':
|
||||
private_interfaces = []
|
||||
# there should be shorter method to do this !
|
||||
for node in task.cluster.nodes:
|
||||
for iface in node.interfaces:
|
||||
for anet in iface.assigned_networks:
|
||||
if anet.name == 'private':
|
||||
private_interfaces.append(iface)
|
||||
found_intersection = []
|
||||
|
||||
all_roles = set(n["id"] for n in networks
|
||||
if n["name"] != 'private')
|
||||
for iface in private_interfaces:
|
||||
nets = dict(
|
||||
(n.id, n.name)
|
||||
for n in iface.assigned_networks)
|
||||
|
||||
err_nets = set(nets.keys()) & all_roles
|
||||
if err_nets:
|
||||
err_net_names = [
|
||||
'"{0}"'.format(nets[i]) for i in err_nets]
|
||||
found_intersection.append(
|
||||
[iface.node.name, err_net_names])
|
||||
|
||||
if found_intersection:
|
||||
nodes_with_errors = [
|
||||
u'Node "{0}": {1}'.format(
|
||||
name,
|
||||
", ".join(_networks)
|
||||
) for name, _networks in found_intersection]
|
||||
err_msg = u"Some networks are " \
|
||||
"assigned to the same physical interface as " \
|
||||
"private network. You should move them to " \
|
||||
"another physical interfaces:\n{0}". \
|
||||
format("\n".join(nodes_with_errors))
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
|
||||
# check untagged networks intersection
|
||||
untagged_nets = set(
|
||||
n["id"] for n in filter(
|
||||
lambda n: (n["vlan_start"] is None), networks))
|
||||
if untagged_nets:
|
||||
logger.info(
|
||||
"Untagged networks found, "
|
||||
"checking intersection between them...")
|
||||
interfaces = []
|
||||
for node in task.cluster.nodes:
|
||||
for iface in node.interfaces:
|
||||
interfaces.append(iface)
|
||||
found_intersection = []
|
||||
|
||||
for iface in interfaces:
|
||||
nets = dict(
|
||||
(n.id, n.name)
|
||||
for n in iface.assigned_networks)
|
||||
|
||||
crossed_nets = set(nets.keys()) & untagged_nets
|
||||
if len(crossed_nets) > 1:
|
||||
err_net_names = [
|
||||
'"{0}"'.format(nets[i]) for i in crossed_nets]
|
||||
found_intersection.append(
|
||||
[iface.node.name, err_net_names])
|
||||
|
||||
if found_intersection:
|
||||
nodes_with_errors = [
|
||||
u'Node "{0}": {1}'.format(
|
||||
name,
|
||||
", ".join(_networks)
|
||||
) for name, _networks in found_intersection]
|
||||
err_msg = u"Some untagged networks are " \
|
||||
"assigned to the same physical interface. " \
|
||||
"You should assign them to " \
|
||||
"different physical interfaces:\n{0}". \
|
||||
format("\n".join(nodes_with_errors))
|
||||
raise errors.NetworkCheckError(err_msg, add_client=False)
|
||||
checker.check_interface_mapping()
|
||||
|
||||
|
||||
class CheckBeforeDeploymentTask(object):
|
||||
|
@ -421,7 +421,7 @@ class Environment(object):
|
||||
"172.16.0.0/24",
|
||||
"172.16.1.0/24",
|
||||
"192.168.0.0/24",
|
||||
"192.168.0.0/24",
|
||||
"192.168.1.0/24",
|
||||
"10.0.0.0/24"
|
||||
)
|
||||
nets = {'networks': [{
|
||||
@ -441,80 +441,6 @@ class Environment(object):
|
||||
|
||||
return nets
|
||||
|
||||
def generate_ui_neutron_networks(self, cluster_id):
|
||||
start_id = self.db.query(NetworkGroup.id).order_by(
|
||||
NetworkGroup.id
|
||||
).first()
|
||||
start_id = 0 if not start_id else start_id[-1] + 1
|
||||
net_names = (
|
||||
"public",
|
||||
"management",
|
||||
"storage"
|
||||
)
|
||||
net_cidrs = (
|
||||
"172.16.1.0/24",
|
||||
"192.168.0.0/24",
|
||||
"192.168.1.0/24"
|
||||
)
|
||||
nets = {'networks': [{
|
||||
"network_size": 256,
|
||||
"name": nd[0],
|
||||
"amount": 1,
|
||||
"cluster_id": cluster_id,
|
||||
"vlan_start": 100 + i,
|
||||
"cidr": nd[1],
|
||||
"id": start_id + i
|
||||
} for i, nd in enumerate(zip(net_names, net_cidrs))]}
|
||||
|
||||
public = filter(
|
||||
lambda net: net['name'] == 'public',
|
||||
nets['networks'])[0]
|
||||
public['netmask'] = '255.255.255.0'
|
||||
public['gateway'] = '172.16.1.1'
|
||||
|
||||
nets['neutron_parameters'] = {
|
||||
"segmentation_type": "vlan",
|
||||
"predefined_networks": {
|
||||
"net04_ext": {
|
||||
"L3": {
|
||||
"nameservers": [],
|
||||
"cidr": "172.16.1.0/24",
|
||||
"gateway": None,
|
||||
"floating": ["172.16.1.131", "172.16.1.254"],
|
||||
"public": True
|
||||
}
|
||||
},
|
||||
"net04": {
|
||||
"L3": {
|
||||
"nameservers": [
|
||||
"8.8.4.4",
|
||||
"8.8.8.8"
|
||||
],
|
||||
"cidr": "192.168.111.0/24",
|
||||
"gateway": "192.168.111.1",
|
||||
"floating": [],
|
||||
"public": False
|
||||
}
|
||||
}
|
||||
},
|
||||
"L2": {
|
||||
"phys_nets": {
|
||||
"physnet2": {
|
||||
"bridge": "br-prv",
|
||||
"vlan_range": []
|
||||
},
|
||||
"physnet1": {
|
||||
"bridge": "br-ex",
|
||||
"vlan_range": [1000, 2999]
|
||||
}
|
||||
},
|
||||
"base_mac": "fa:16:3e:00:00:00",
|
||||
"segmentation_type": "vlan"
|
||||
}
|
||||
}
|
||||
|
||||
return nets
|
||||
|
||||
def get_default_roles(self):
|
||||
return ['controller', 'compute', 'cinder', 'ceph-osd']
|
||||
|
||||
@ -771,6 +697,61 @@ class Environment(object):
|
||||
raise TimeoutError(error_message)
|
||||
time.sleep(0.1)
|
||||
|
||||
def _api_get(self, method, instance_id, expect_errors=False):
|
||||
return self.app.get(
|
||||
reverse(method,
|
||||
kwargs=instance_id),
|
||||
headers=self.default_headers,
|
||||
expect_errors=expect_errors)
|
||||
|
||||
def _api_put(self, method, instance_id, data, expect_errors=False):
|
||||
return self.app.put(
|
||||
reverse(method,
|
||||
kwargs=instance_id),
|
||||
json.dumps(data),
|
||||
headers=self.default_headers,
|
||||
expect_errors=expect_errors)
|
||||
|
||||
def nova_networks_get(self, cluster_id, expect_errors=False):
|
||||
return self._api_get('NovaNetworkConfigurationHandler',
|
||||
{'cluster_id': cluster_id},
|
||||
expect_errors)
|
||||
|
||||
def nova_networks_put(self, cluster_id, networks, expect_errors=False):
|
||||
return self._api_put('NovaNetworkConfigurationHandler',
|
||||
{'cluster_id': cluster_id},
|
||||
networks,
|
||||
expect_errors)
|
||||
|
||||
def neutron_networks_get(self, cluster_id, expect_errors=False):
|
||||
return self._api_get('NeutronNetworkConfigurationHandler',
|
||||
{'cluster_id': cluster_id},
|
||||
expect_errors)
|
||||
|
||||
def neutron_networks_put(self, cluster_id, networks, expect_errors=False):
|
||||
return self._api_put('NeutronNetworkConfigurationHandler',
|
||||
{'cluster_id': cluster_id},
|
||||
networks,
|
||||
expect_errors)
|
||||
|
||||
def cluster_changes_put(self, cluster_id, expect_errors=False):
|
||||
return self._api_put('ClusterChangesHandler',
|
||||
{'cluster_id': cluster_id},
|
||||
[],
|
||||
expect_errors)
|
||||
|
||||
def node_nics_get(self, node_id, expect_errors=False):
|
||||
return self._api_get('NodeNICsHandler',
|
||||
{'node_id': node_id},
|
||||
expect_errors)
|
||||
|
||||
def node_collection_nics_put(self, node_id, interfaces,
|
||||
expect_errors=False):
|
||||
return self._api_put('NodeCollectionNICsHandler',
|
||||
{'node_id': node_id},
|
||||
interfaces,
|
||||
expect_errors)
|
||||
|
||||
|
||||
class BaseTestCase(TestCase):
|
||||
|
||||
|
@ -716,6 +716,7 @@ class TestHandlers(BaseIntegrationTest):
|
||||
net_data = {
|
||||
"networks": [{
|
||||
'id': public_network.id,
|
||||
'gateway': '240.0.1.1',
|
||||
'ip_ranges': [[
|
||||
'240.0.1.2',
|
||||
'240.0.1.3']]}]}
|
||||
@ -866,18 +867,13 @@ class TestHandlers(BaseIntegrationTest):
|
||||
cluster_id = self.env.clusters[0].id
|
||||
node_db = self.env.nodes[0]
|
||||
|
||||
nets = self.env.generate_ui_networks(cluster_id)
|
||||
resp = self.env.nova_networks_get(cluster_id)
|
||||
nets = json.loads(resp.body)
|
||||
for net in nets["networks"]:
|
||||
if net["name"] in ["public", "floating"]:
|
||||
net["vlan_start"] = None
|
||||
|
||||
resp = self.app.put(
|
||||
reverse('NovaNetworkConfigurationHandler', kwargs={
|
||||
'cluster_id': cluster_id
|
||||
}),
|
||||
json.dumps(nets),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.env.nova_networks_put(cluster_id, nets)
|
||||
|
||||
main_iface_db = node_db.admin_interface
|
||||
|
||||
@ -900,10 +896,8 @@ class TestHandlers(BaseIntegrationTest):
|
||||
|
||||
ifaces = json.loads(resp.body)
|
||||
|
||||
wrong_nets = filter(
|
||||
lambda nic: (nic["name"] in ["public", "floating"]),
|
||||
ifaces[0]["assigned_networks"]
|
||||
)
|
||||
wrong_nets = [nic for nic in ifaces[0]["assigned_networks"]
|
||||
if nic["name"] in ["public", "floating"]]
|
||||
|
||||
map(
|
||||
ifaces[0]["assigned_networks"].remove,
|
||||
|
@ -204,22 +204,6 @@ class TestHandlers(BaseIntegrationTest):
|
||||
]
|
||||
self.assertItemsEqual(expected, obtained)
|
||||
|
||||
def test_network_validation_on_cluster_creation(self):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
nets = self.env.generate_ui_networks(cluster["id"])
|
||||
nets['networks'][-1]["network_size"] = 16
|
||||
nets['networks'][-1]["amount"] = 3
|
||||
resp = self.app.put(
|
||||
reverse('NovaNetworkConfigurationHandler',
|
||||
kwargs={'cluster_id': cluster['id']}),
|
||||
json.dumps(nets),
|
||||
headers=self.default_headers,
|
||||
expect_errors=True
|
||||
)
|
||||
self.assertEquals(202, resp.status)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
|
||||
@patch('nailgun.rpc.cast')
|
||||
def test_verify_networks(self, mocked_rpc):
|
||||
cluster = self.env.create_cluster(api=True)
|
||||
|
@ -102,13 +102,10 @@ class TestNetworkConfigurationHandlerMultinodeMode(BaseIntegrationTest):
|
||||
)
|
||||
|
||||
def test_do_not_update_net_manager_if_validation_is_failed(self):
|
||||
self.db.query(NetworkGroup).filter(
|
||||
not_(NetworkGroup.name == "fuelweb_admin")
|
||||
).first()
|
||||
new_net_manager = {'net_manager': 'VlanManager',
|
||||
'networks': [{'id': 500, 'vlan_start': 500}]}
|
||||
self.put(self.cluster.id, new_net_manager, expect_errors=True)
|
||||
|
||||
self.put(self.cluster.id, new_net_manager, expect_errors=True)
|
||||
self.db.refresh(self.cluster)
|
||||
self.assertNotEquals(
|
||||
self.cluster.net_manager,
|
||||
|
@ -15,58 +15,51 @@
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
from netaddr import IPAddress
|
||||
from netaddr import IPNetwork
|
||||
|
||||
from nailgun.api.models import NetworkGroup
|
||||
from nailgun.test.base import BaseIntegrationTest
|
||||
from nailgun.test.base import reverse
|
||||
|
||||
|
||||
class TestNovaHandlers(BaseIntegrationTest):
|
||||
|
||||
def update_networks(self, cluster_id, networks, expect_errors=False):
|
||||
return self.app.put(
|
||||
reverse('NovaNetworkConfigurationHandler',
|
||||
kwargs={'cluster_id': cluster_id}),
|
||||
json.dumps(networks),
|
||||
headers=self.default_headers,
|
||||
expect_errors=expect_errors)
|
||||
|
||||
def test_network_checking(self):
|
||||
def setUp(self):
|
||||
super(TestNovaHandlers, self).setUp()
|
||||
self.env.create(
|
||||
cluster_kwargs={},
|
||||
nodes_kwargs=[
|
||||
{"pending_addition": True},
|
||||
]
|
||||
)
|
||||
cluster = self.env.clusters[0]
|
||||
self.cluster = self.env.clusters[0]
|
||||
resp = self.env.nova_networks_get(self.cluster.id)
|
||||
self.nets = json.loads(resp.body)
|
||||
|
||||
nets = self.env.generate_ui_networks(
|
||||
cluster.id
|
||||
)
|
||||
resp = self.update_networks(cluster.id, nets)
|
||||
def find_net_by_name(self, name):
|
||||
for net in self.nets['networks']:
|
||||
if net['name'] == name:
|
||||
return net
|
||||
|
||||
def test_network_checking(self):
|
||||
resp = self.env.nova_networks_put(self.cluster.id, self.nets)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
|
||||
ngs_created = self.db.query(NetworkGroup).filter(
|
||||
NetworkGroup.name.in_([n['name'] for n in nets['networks']])
|
||||
NetworkGroup.name.in_([n['name'] for n in self.nets['networks']])
|
||||
).all()
|
||||
self.assertEquals(len(ngs_created), len(nets['networks']))
|
||||
self.assertEquals(len(ngs_created), len(self.nets['networks']))
|
||||
|
||||
def test_network_checking_fails_if_admin_intersection(self):
|
||||
self.env.create(
|
||||
cluster_kwargs={},
|
||||
nodes_kwargs=[
|
||||
{"pending_addition": True},
|
||||
]
|
||||
)
|
||||
cluster = self.env.clusters[0]
|
||||
nets = self.env.generate_ui_networks(cluster.id)
|
||||
admin_ng = self.env.network_manager.get_admin_network_group()
|
||||
nets['networks'][-1]["cidr"] = admin_ng.cidr
|
||||
resp = self.update_networks(cluster.id, nets, expect_errors=True)
|
||||
self.find_net_by_name('fixed')["cidr"] = admin_ng.cidr
|
||||
|
||||
resp = self.env.nova_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -74,30 +67,19 @@ class TestNovaHandlers(BaseIntegrationTest):
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
"Intersection with admin "
|
||||
"network(s) '{0}' found".format(
|
||||
admin_ng.cidr
|
||||
)
|
||||
"Address space intersection between networks: "
|
||||
"admin (PXE), fixed."
|
||||
)
|
||||
|
||||
def test_network_checking_fails_if_admin_intersection_ip_range(self):
|
||||
self.env.create(
|
||||
cluster_kwargs={},
|
||||
nodes_kwargs=[
|
||||
{"pending_addition": True},
|
||||
]
|
||||
)
|
||||
cluster = self.env.clusters[0]
|
||||
nets = self.env.generate_ui_networks(cluster.id)
|
||||
admin_ng = self.env.network_manager.get_admin_network_group()
|
||||
base = IPNetwork(admin_ng.cidr)
|
||||
base.prefixlen += 1
|
||||
start_range = str(base[0])
|
||||
end_range = str(base[-1])
|
||||
nets['networks'][1]['ip_ranges'] = [
|
||||
[start_range, end_range]
|
||||
cidr = IPNetwork(admin_ng.cidr)
|
||||
self.find_net_by_name('floating')['ip_ranges'] = [
|
||||
[str(IPAddress(cidr.first + 2)), str(IPAddress(cidr.last))]
|
||||
]
|
||||
resp = self.update_networks(cluster.id, nets, expect_errors=True)
|
||||
|
||||
resp = self.env.nova_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -105,59 +87,21 @@ class TestNovaHandlers(BaseIntegrationTest):
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
"IP range {0} - {1} in {2} network intersects with admin "
|
||||
"range of {3}".format(
|
||||
start_range, end_range,
|
||||
nets['networks'][1]['name'],
|
||||
admin_ng.cidr
|
||||
)
|
||||
"Address space intersection between networks: "
|
||||
"admin (PXE), floating."
|
||||
)
|
||||
|
||||
def test_network_checking_fails_if_amount_flatdhcp(self):
|
||||
self.env.create(
|
||||
cluster_kwargs={},
|
||||
nodes_kwargs=[
|
||||
{"pending_addition": True},
|
||||
]
|
||||
)
|
||||
cluster = self.env.clusters[0]
|
||||
|
||||
nets = self.env.generate_ui_networks(
|
||||
cluster.id
|
||||
)
|
||||
nets['networks'][-1]["amount"] = 2
|
||||
nets['networks'][-1]["cidr"] = "10.0.0.0/23"
|
||||
resp = self.update_networks(cluster.id, nets, expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
"Network amount for '{0}' is more than 1 "
|
||||
"while using FlatDHCP manager.".format(
|
||||
nets['networks'][-1]["name"]))
|
||||
|
||||
def test_fails_if_netmask_for_public_network_not_set_or_not_valid(self):
|
||||
self.env.create(
|
||||
cluster_kwargs={},
|
||||
nodes_kwargs=[
|
||||
{"pending_addition": True}])
|
||||
cluster = self.env.clusters[0]
|
||||
net_without_netmask = self.find_net_by_name('public')
|
||||
net_with_invalid_netmask = self.find_net_by_name('public')
|
||||
|
||||
net_without_netmask = self.env.generate_ui_networks(
|
||||
cluster.id)
|
||||
|
||||
net_with_invalid_netmask = self.env.generate_ui_networks(
|
||||
cluster.id)
|
||||
|
||||
del net_without_netmask['networks'][1]['netmask']
|
||||
net_with_invalid_netmask['networks'][1]['netmask'] = '255.255.255.2'
|
||||
|
||||
for nets in [net_without_netmask, net_with_invalid_netmask]:
|
||||
resp = self.update_networks(cluster.id, nets, expect_errors=True)
|
||||
net_without_netmask['netmask'] = None
|
||||
net_with_invalid_netmask['netmask'] = '255.255.255.2'
|
||||
|
||||
for net in [net_without_netmask, net_with_invalid_netmask]:
|
||||
resp = self.env.nova_networks_put(self.cluster.id,
|
||||
{'networks': [net]},
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -166,6 +110,41 @@ class TestNovaHandlers(BaseIntegrationTest):
|
||||
self.assertEquals(
|
||||
task['message'], 'Invalid netmask for public network')
|
||||
|
||||
def test_network_checking_fails_if_networks_cidr_intersection(self):
|
||||
self.find_net_by_name('management')["cidr"] = \
|
||||
self.find_net_by_name('storage')["cidr"]
|
||||
|
||||
resp = self.env.nova_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
"Address space intersection between "
|
||||
"networks: management, storage."
|
||||
)
|
||||
|
||||
def test_network_checking_fails_if_untagged_intersection(self):
|
||||
self.find_net_by_name('public')["vlan_start"] = None
|
||||
self.find_net_by_name('management')["vlan_start"] = None
|
||||
self.env.nova_networks_put(self.cluster.id, self.nets)
|
||||
|
||||
resp = self.env.cluster_changes_put(self.cluster.id)
|
||||
self.assertEquals(resp.status, 200)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'deploy')
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
'Some untagged networks are assigned to the same physical '
|
||||
'interface. You should assign them to different physical '
|
||||
'interfaces:\nNode "None": "management", "public"'
|
||||
)
|
||||
|
||||
|
||||
class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
|
||||
@ -198,18 +177,11 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
]
|
||||
)
|
||||
self.cluster = self.env.clusters[0]
|
||||
self.nets = self.env.generate_ui_neutron_networks(self.cluster.id)
|
||||
|
||||
def update_networks(self, cluster_id, networks, expect_errors=False):
|
||||
return self.app.put(
|
||||
reverse('NeutronNetworkConfigurationHandler',
|
||||
kwargs={'cluster_id': cluster_id}),
|
||||
json.dumps(networks),
|
||||
headers=self.default_headers,
|
||||
expect_errors=expect_errors)
|
||||
resp = self.env.neutron_networks_get(self.cluster.id)
|
||||
self.nets = json.loads(resp.body)
|
||||
|
||||
def test_network_checking(self):
|
||||
resp = self.update_networks(self.cluster.id, self.nets)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
@ -222,30 +194,17 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
|
||||
def test_network_checking_fails_if_network_is_at_admin_iface(self):
|
||||
node_db = self.env.nodes[0]
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={
|
||||
'node_id': node_db.id
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
resp = self.env.node_nics_get(node_db.id)
|
||||
|
||||
ifaces = json.loads(resp.body)
|
||||
ifaces[1]["assigned_networks"], ifaces[0]["assigned_networks"] = \
|
||||
ifaces[0]["assigned_networks"], ifaces[1]["assigned_networks"]
|
||||
self.app.put(
|
||||
reverse('NodeCollectionNICsHandler', kwargs={
|
||||
'node_id': node_db.id
|
||||
}),
|
||||
json.dumps([{"interfaces": ifaces, "id": node_db.id}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse(
|
||||
'ClusterChangesHandler',
|
||||
kwargs={'cluster_id': self.cluster.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.env.node_collection_nics_put(
|
||||
node_db.id,
|
||||
[{"interfaces": ifaces, "id": node_db.id}])
|
||||
|
||||
resp = self.env.cluster_changes_put(self.cluster.id)
|
||||
self.assertEquals(resp.status, 200)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -262,10 +221,10 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
|
||||
def test_network_checking_fails_if_admin_intersection(self):
|
||||
admin_ng = self.env.network_manager.get_admin_network_group()
|
||||
self.nets['networks'][-1]["cidr"] = admin_ng.cidr
|
||||
self.nets['networks'][2]["cidr"] = admin_ng.cidr
|
||||
|
||||
resp = self.update_networks(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -273,52 +232,16 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
"Intersection with admin "
|
||||
"network(s) '{0}' found".format(
|
||||
admin_ng.cidr
|
||||
)
|
||||
)
|
||||
|
||||
def test_network_checking_fails_if_admin_intersection_ip_range(self):
|
||||
admin_ng = self.env.network_manager.get_admin_network_group()
|
||||
base = IPNetwork(admin_ng.cidr)
|
||||
base.prefixlen += 1
|
||||
start_range = str(base[0])
|
||||
end_range = str(base[-1])
|
||||
self.nets['networks'][1]['ip_ranges'] = [
|
||||
[start_range, end_range]
|
||||
]
|
||||
|
||||
resp = self.update_networks(
|
||||
self.cluster.id, self.nets, expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
self.assertEquals(task['progress'], 100)
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
"IP range {0} - {1} in {2} network intersects with admin "
|
||||
"range of {3}".format(
|
||||
start_range, end_range,
|
||||
self.nets['networks'][1]['name'],
|
||||
admin_ng.cidr
|
||||
)
|
||||
"Intersection with admin network(s) '10.20.0.0/24' found"
|
||||
)
|
||||
|
||||
def test_network_checking_fails_if_untagged_intersection(self):
|
||||
for n in self.nets['networks']:
|
||||
n['vlan_start'] = None
|
||||
|
||||
self.update_networks(self.cluster.id, self.nets)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse(
|
||||
'ClusterChangesHandler',
|
||||
kwargs={'cluster_id': self.cluster.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.env.neutron_networks_put(self.cluster.id, self.nets)
|
||||
|
||||
resp = self.env.cluster_changes_put(self.cluster.id)
|
||||
self.assertEquals(resp.status, 200)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -338,8 +261,8 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
if n['name'] == 'public':
|
||||
n['gateway'] = '172.16.10.1'
|
||||
|
||||
resp = self.update_networks(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -348,7 +271,7 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
"Public gateway 172.16.10.1 is not in "
|
||||
"Public address space 172.16.1.0/24."
|
||||
"Public address space 172.16.0.0/24."
|
||||
)
|
||||
|
||||
def test_network_checking_fails_if_public_float_range_not_in_cidr(self):
|
||||
@ -357,8 +280,8 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
n['cidr'] = '172.16.10.0/24'
|
||||
n['gateway'] = '172.16.10.1'
|
||||
|
||||
resp = self.update_networks(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -366,7 +289,7 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
self.assertEquals(task['name'], 'check_networks')
|
||||
self.assertEquals(
|
||||
task['message'],
|
||||
"Floating address range 172.16.1.131:172.16.1.254 is not in "
|
||||
"Floating address range 172.16.0.130:172.16.0.254 is not in "
|
||||
"Public address space 172.16.10.0/24."
|
||||
)
|
||||
|
||||
@ -375,8 +298,8 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
if n['name'] == 'management':
|
||||
n['cidr'] = '192.168.1.0/24'
|
||||
|
||||
resp = self.update_networks(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -392,8 +315,8 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
int = self.nets['neutron_parameters']['predefined_networks']['net04']
|
||||
int['L3']['gateway'] = '172.16.10.1'
|
||||
|
||||
resp = self.update_networks(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -407,11 +330,11 @@ class TestNeutronHandlersGre(BaseIntegrationTest):
|
||||
|
||||
def test_network_checking_fails_if_internal_w_floating_intersection(self):
|
||||
int = self.nets['neutron_parameters']['predefined_networks']['net04']
|
||||
int['L3']['cidr'] = '172.16.1.128/26'
|
||||
int['L3']['gateway'] = '172.16.1.129'
|
||||
int['L3']['cidr'] = '172.16.0.128/26'
|
||||
int['L3']['gateway'] = '172.16.0.129'
|
||||
|
||||
resp = self.update_networks(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets,
|
||||
expect_errors=True)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -459,18 +382,11 @@ class TestNeutronHandlersVlan(BaseIntegrationTest):
|
||||
]
|
||||
)
|
||||
self.cluster = self.env.clusters[0]
|
||||
self.nets = self.env.generate_ui_neutron_networks(self.cluster.id)
|
||||
|
||||
def update_networks(self, cluster_id, networks, expect_errors=False):
|
||||
return self.app.put(
|
||||
reverse('NeutronNetworkConfigurationHandler',
|
||||
kwargs={'cluster_id': cluster_id}),
|
||||
json.dumps(networks),
|
||||
headers=self.default_headers,
|
||||
expect_errors=expect_errors)
|
||||
resp = self.env.neutron_networks_get(self.cluster.id)
|
||||
self.nets = json.loads(resp.body)
|
||||
|
||||
def test_network_checking(self):
|
||||
resp = self.update_networks(self.cluster.id, self.nets)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'ready')
|
||||
@ -483,12 +399,8 @@ class TestNeutronHandlersVlan(BaseIntegrationTest):
|
||||
|
||||
def test_network_checking_failed_if_private_paired_w_other_network(self):
|
||||
node_db = self.env.nodes[0]
|
||||
resp = self.app.get(
|
||||
reverse('NodeNICsHandler', kwargs={
|
||||
'node_id': node_db.id
|
||||
}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
resp = self.env.node_nics_get(node_db.id)
|
||||
|
||||
ifaces = json.loads(resp.body)
|
||||
priv_net = filter(
|
||||
lambda nic: (nic["name"] in ["private"]),
|
||||
@ -496,21 +408,12 @@ class TestNeutronHandlersVlan(BaseIntegrationTest):
|
||||
)
|
||||
ifaces[1]["assigned_networks"].remove(priv_net[0])
|
||||
ifaces[2]["assigned_networks"].append(priv_net[0])
|
||||
self.app.put(
|
||||
reverse('NodeCollectionNICsHandler', kwargs={
|
||||
'node_id': node_db.id
|
||||
}),
|
||||
json.dumps([{"interfaces": ifaces, "id": node_db.id}]),
|
||||
headers=self.default_headers
|
||||
)
|
||||
|
||||
resp = self.app.put(
|
||||
reverse(
|
||||
'ClusterChangesHandler',
|
||||
kwargs={'cluster_id': self.cluster.id}),
|
||||
headers=self.default_headers
|
||||
)
|
||||
self.env.node_collection_nics_put(
|
||||
node_db.id,
|
||||
[{"interfaces": ifaces, "id": node_db.id}])
|
||||
|
||||
resp = self.env.cluster_changes_put(self.cluster.id)
|
||||
self.assertEquals(resp.status, 200)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
@ -527,9 +430,10 @@ class TestNeutronHandlersVlan(BaseIntegrationTest):
|
||||
|
||||
def test_network_checking_failed_if_networks_tags_in_neutron_range(self):
|
||||
for n in self.nets['networks']:
|
||||
n['vlan_start'] += 1000
|
||||
if n['vlan_start']:
|
||||
n['vlan_start'] += 1000
|
||||
|
||||
resp = self.update_networks(self.cluster.id, self.nets)
|
||||
resp = self.env.neutron_networks_put(self.cluster.id, self.nets)
|
||||
self.assertEquals(resp.status, 202)
|
||||
task = json.loads(resp.body)
|
||||
self.assertEquals(task['status'], 'error')
|
||||
|
@ -96,10 +96,8 @@ class TestVerifyNetworkTaskManagers(BaseIntegrationTest):
|
||||
self.env.wait_error(task, 30)
|
||||
self.assertIn(
|
||||
task.message,
|
||||
"Intersection with admin "
|
||||
"network(s) '{0}' found".format(
|
||||
admin_ng.cidr
|
||||
)
|
||||
"Address space intersection between networks: "
|
||||
"admin (PXE), fixed."
|
||||
)
|
||||
self.assertEquals(mocked_rpc.called, False)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user