Add IP assignment optimized for network template

IPs were allocated for every network on every node
regardless of network to nodes mapping which is set in template.
It led to excessive IP usage.
Now it is fixed so that IPs are allocated only for nodes where they are required.
And IP allocation is optimized for the case with network template and for other
cases partially.
A number of small fixes were made on the base of comments in earlier RCs.

IP allocation comparison in DB queries count (only for replaced part):
old version: 1 query per IP to allocate + 1 query per occupied IP in that range
new version: 1 queries per network (for all IPs) if (occupied IPs)+(IPs to allocate) <= MIN_IPS_PER_DB_QUERY
             2 queries per network otherwise (typically).
New version is much better for large numbers of nodes.
nodes |      queries count        |
count |   old   |   new           |
      |min| max |min|  max        |
    1 |  1|  1+N|  1|1+ceil(N/5)**|
   10 | 10| 10+N|  1|1+ceil(N/10) |
  100 |100|100+N|  1|1+ceil(N/100)|
where N is a number of occupied IPs.
** 5 here is value of MIN_IPS_PER_DB_QUERY.

Change-Id: I4479bcae9e80f7b96750f9fdb2868d6df164baf6
Closes-Bug: #1480345
This commit is contained in:
Aleksey Kasatkin 2015-08-11 19:56:04 +03:00
parent 69e41cb78d
commit bbb918473b
10 changed files with 437 additions and 91 deletions

View File

@ -248,7 +248,7 @@ class BaseHandler(object):
def content_json(func, cls, *args, **kwargs):
json_resp = lambda data: (
jsonutils.dumps(data)
if isinstance(data, (dict, list)) or not data else data
if isinstance(data, (dict, list)) or data is None else data
)
request_validate_needed = True

View File

@ -351,3 +351,6 @@ NETWORK_NOTATION = Enum(
"ip_ranges",
"cidr",
)
# Minimal quantity of IPs to be fetched and checked within one request to DB.
MIN_IPS_PER_DB_QUERY = 5

View File

@ -212,7 +212,7 @@ class NetworkManager(object):
filter(NodeBondInterface.node_id == node.id)
for bond_assignment in bond_assignments:
bond_assignment.network_id = \
netgroups_mapping[bond_assignment.network_id]
netgroups_id_mapping[bond_assignment.network_id]
@classmethod
def assign_ips(cls, nodes, network_name):
@ -482,28 +482,58 @@ class NetworkManager(object):
return True
@classmethod
def is_ip_usable(cls, network_group, ip):
return (ip != network_group.gateway
and db().query(IPAddr).filter_by(ip_addr=ip).first() is None)
def _iter_free_ips(cls, ip_ranges, ips_in_use):
"""Iterator over free IP addresses in given IP ranges.
IP addresses which exist in ips_in_use are excluded from output.
"""
for ip_range in ip_ranges:
for ip_addr in ip_range:
ip_str = str(ip_addr)
if ip_str not in ips_in_use:
yield ip_str
@classmethod
def _iter_free_ips(cls, network_group):
"""Represents iterator over free IP addresses
in all ranges for given Network Group
def get_free_ips_from_ranges(cls, net_name, ip_ranges, ips_in_use, count):
"""Returns list of free IP addresses for given IP ranges. Required
quantity of IPs is set in "count". IP addresses which exist in
ips_in_use or exist in DB are excluded.
"""
for ip_range in network_group.ip_ranges:
for ip in IPRange(ip_range.first, ip_range.last):
if cls.is_ip_usable(network_group, str(ip)):
yield str(ip)
result = []
ip_iterator = cls._iter_free_ips(ip_ranges, ips_in_use)
while count > 0:
# Eager IP mining to not run DB query on every single IP when just
# 1 or 2 IPs are required and a long series of IPs from this range
# are occupied already.
free_ips = list(islice(ip_iterator,
max(count, consts.MIN_IPS_PER_DB_QUERY)))
if not free_ips:
ranges_str = ','.join(str(r) for r in ip_ranges)
raise errors.OutOfIPs(
"Not enough free IP addresses in ranges [{0}] of '{1}' "
"network".format(ranges_str, net_name))
ips_in_db = db().query(
IPAddr.ip_addr.distinct()
).filter(
IPAddr.ip_addr.in_(free_ips)
)
for ip in ips_in_db:
free_ips.remove(ip[0])
result.extend(free_ips[:count])
count -= len(free_ips[:count])
return result
@classmethod
def get_free_ips(cls, network_group, num=1):
"""Returns list of free IP addresses for given Network Group
"""
free_ips = list(islice(cls._iter_free_ips(network_group), 0, num))
if len(free_ips) < num:
raise errors.OutOfIPs()
return free_ips
ip_ranges = [IPRange(r.first, r.last)
for r in network_group.ip_ranges]
return cls.get_free_ips_from_ranges(
network_group.name, ip_ranges, set(), num)
@classmethod
def _get_ips_except_admin(cls, node_id=None,
@ -1490,17 +1520,16 @@ class AllocateVIPs70Mixin(object):
def get_end_point_ip(cls, cluster_id):
cluster_db = objects.Cluster.get_by_uid(cluster_id)
net_role = cls.find_network_role_by_id(cluster_db, 'public/vip')
if net_role:
node_group = objects.Cluster.get_controllers_node_group(cluster_db)
net_group_mapping = cls.build_role_to_network_group_mapping(
cluster_db, node_group.name)
net_group = cls.get_network_group_for_role(
net_role, net_group_mapping)
return cls.assign_vip(cluster_db, net_group, vip_type='public')
else:
if not net_role:
raise errors.CanNotDetermineEndPointIP(
u'Can not determine end point IP for cluster %s' %
cluster_db.full_name)
u'Can not determine end point IP for cluster {0}'.format(
cluster_db.full_name))
node_group = objects.Cluster.get_controllers_node_group(cluster_db)
net_group_mapping = cls.build_role_to_network_group_mapping(
cluster_db, node_group.name)
net_group = cls.get_network_group_for_role(
net_role, net_group_mapping)
return cls.assign_vip(cluster_db, net_group, vip_type='public')
@classmethod
def _assign_vips_for_net_groups(cls, cluster):

View File

@ -14,13 +14,15 @@
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import netaddr
import six
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import NeutronConfig
from nailgun.db.sqlalchemy import models
from nailgun.logger import logger
from nailgun.network.manager import AllocateVIPs70Mixin
from nailgun.network.manager import NetworkManager
@ -32,7 +34,7 @@ class NeutronManager(NetworkManager):
def create_neutron_config(
cls, cluster, segmentation_type,
net_l23_provider=consts.NEUTRON_L23_PROVIDERS.ovs):
neutron_config = NeutronConfig(
neutron_config = models.NeutronConfig(
cluster_id=cluster.id,
segmentation_type=segmentation_type,
net_l23_provider=net_l23_provider
@ -117,18 +119,17 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
network_role['id'], network_role['default_mapping'])
@classmethod
def get_node_ips(cls, node):
"""Returns node's IP and gateway's IP for each network of
particular node.
def get_node_networks_with_ips(cls, node):
"""Returns node's IP and network's data (meta, gateway) for
each network of particular node.
"""
if not node.group_id:
return {}
ngs = db().query(NetworkGroup, IPAddr.ip_addr).\
filter(NetworkGroup.group_id == node.group_id). \
filter(IPAddr.network == NetworkGroup.id). \
filter(IPAddr.node == node.id). \
all()
ngs = db().query(models.NetworkGroup, models.IPAddr.ip_addr).\
filter(models.NetworkGroup.group_id == node.group_id). \
filter(models.IPAddr.network == models.NetworkGroup.id). \
filter(models.IPAddr.node == node.id)
if not ngs:
return {}
@ -136,6 +137,7 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
for ng, ip in ngs:
networks[ng.name] = {
'ip': cls.get_ip_w_cidr_prefix_len(ip, ng),
'meta': ng.meta,
'gateway': ng.gateway
}
admin_ng = cls.get_admin_network_group(node.id)
@ -143,6 +145,167 @@ class NeutronManager70(AllocateVIPs70Mixin, NeutronManager):
networks[admin_ng.name] = {
'ip': cls.get_ip_w_cidr_prefix_len(
cls.get_admin_ip_for_node(node.id), admin_ng),
'meta': admin_ng.meta,
'gateway': admin_ng.gateway
}
return networks
@classmethod
def get_node_endpoints(cls, node):
"""Returns a set of endpoints for particular node for the case when
template is loaded. Endpoints are taken from 'endpoints' field
of templates for every node role.
"""
endpoints = set()
template = node.network_template
for role in node.all_roles:
role_templates = template['templates_for_node_role'][role]
for role_template in role_templates:
endpoints.update(
template['templates'][role_template]['endpoints'])
return endpoints
@classmethod
def get_node_network_mapping(cls, node):
"""Returns a list of pairs (network, endpoint) for particular node
for the case when template is loaded. Networks are aggregated for all
node roles assigned to node. Endpoints are taken from 'endpoints' field
of templates for every node role and they are mapped to networks from
'network_assignments' field.
"""
output = []
endpoints = cls.get_node_endpoints(node)
mappings = node.network_template['network_assignments']
for netgroup, endpoint in six.iteritems(mappings):
if endpoint['ep'] in endpoints:
output.append((netgroup, endpoint['ep']))
return output
@classmethod
def get_network_name_to_endpoint_mappings(cls, cluster):
"""Returns dict of endpoint-to-network mappings for every node group
of the cluster::
{
"node_group1": {
"endpoint1": "network_name1",
"endpoint2": "network_name2",
...
},
...
}
"""
output = {}
template = cluster.network_config.configuration_template[
'adv_net_template']
for ng in cluster.node_groups:
output[ng.id] = {}
mappings = template[ng.name]['network_assignments']
for network, endpoint in six.iteritems(mappings):
output[ng.id][endpoint['ep']] = network
return output
@classmethod
def assign_ips_in_node_group(
cls, net_id, net_name, node_ids, ip_ranges):
"""Assigns IP addresses for nodes with IDs listed in "node_ids" in
given network.
"""
ips_by_node_id = db().query(
models.IPAddr.ip_addr,
models.IPAddr.node
).filter_by(
network=net_id
)
nodes_dont_need_ip = set()
ips_in_use = set()
for ip_str, node_id in ips_by_node_id:
ip_addr = netaddr.IPAddress(ip_str)
for ip_range in ip_ranges:
if ip_addr in ip_range:
nodes_dont_need_ip.add(node_id)
ips_in_use.add(ip_str)
nodes_need_ip = node_ids - nodes_dont_need_ip
free_ips = cls.get_free_ips_from_ranges(
net_name, ip_ranges, ips_in_use, len(nodes_need_ip))
for ip, node_id in zip(free_ips, nodes_need_ip):
logger.info(
"Assigning IP for node '{0}' in network '{1}'".format(
node_id,
net_name
)
)
ip_db = models.IPAddr(node=node_id,
ip_addr=ip,
network=net_id)
db().add(ip_db)
db().flush()
@classmethod
def assign_ips_for_nodes_w_template(cls, cluster, nodes):
"""Assign IPs for the case when network template is applied. IPs for
every node are allocated only for networks which are mapped to the
particular node according to the template.
"""
network_by_group = db().query(
models.NetworkGroup.id,
models.NetworkGroup.name,
models.NetworkGroup.meta,
).join(
models.NetworkGroup.nodegroup
).filter(
models.NodeGroup.cluster_id == cluster.id,
models.NetworkGroup.name != consts.NETWORKS.fuelweb_admin
)
ip_ranges_by_network = db().query(
models.IPAddrRange.first,
models.IPAddrRange.last,
).join(
models.NetworkGroup.ip_ranges,
models.NetworkGroup.nodegroup
).filter(
models.NodeGroup.cluster_id == cluster.id
)
net_name_by_ep = cls.get_network_name_to_endpoint_mappings(cluster)
for group_id, nodes_in_group in itertools.groupby(
nodes, lambda n: n.group_id):
net_names = net_name_by_ep[group_id]
net_names_by_node = {}
for node in nodes_in_group:
eps = cls.get_node_endpoints(node)
net_names_by_node[node.id] = set(net_names[ep] for ep in eps)
networks = network_by_group.filter(
models.NetworkGroup.group_id == group_id)
for net_id, net_name, net_meta in networks:
if not net_meta.get('notation'):
continue
node_ids = set(node_id
for node_id, net_names
in six.iteritems(net_names_by_node)
if net_name in net_names)
ip_ranges_ng = ip_ranges_by_network.filter(
models.IPAddrRange.network_group_id == net_id
)
ip_ranges = [netaddr.IPRange(r.first, r.last)
for r in ip_ranges_ng]
cls.assign_ips_in_node_group(
net_id, net_name, node_ids, ip_ranges)
cls.assign_admin_ips(nodes)

View File

@ -976,6 +976,10 @@ class NodeCollection(NailgunCollection):
cluster = instances[0].cluster
netmanager = Cluster.get_network_manager(cluster)
if cluster.network_config.configuration_template:
return netmanager.assign_ips_for_nodes_w_template(cluster,
instances)
nodes_by_id = dict((n.id, n) for n in instances)
query = (

View File

@ -983,25 +983,6 @@ class NeutronNetworkTemplateSerializer70(
return roles
@classmethod
def _get_netgroup_mapping_by_role(cls, node):
output = []
endpoints = set()
template = node.network_template
for role in node.all_roles:
role_templates = template['templates_for_node_role'][role]
for role_template in role_templates:
endpoints.update(template['templates'][role_template]
['endpoints'])
mappings = template['network_assignments']
for netgroup, endpoint in six.iteritems(mappings):
if endpoint['ep'] in endpoints:
output.append((netgroup, endpoint['ep']))
return output
@classmethod
def generate_transformations(cls, node, *args):
"""Overrides default transformation generation.
@ -1043,8 +1024,8 @@ class NeutronNetworkTemplateSerializer70(
nm = Cluster.get_network_manager(node.cluster)
netgroups = nm.get_node_ips(node)
netgroup_mapping = cls._get_netgroup_mapping_by_role(node)
netgroups = nm.get_node_networks_with_ips(node)
netgroup_mapping = nm.get_node_network_mapping(node)
for ngname, brname in netgroup_mapping:
ip_addr = netgroups.get(ngname, {}).get('ip')
if ip_addr:
@ -1094,12 +1075,12 @@ class NeutronNetworkTemplateSerializer70(
return attrs
@classmethod
def _get_endpoint_to_ip_mapping(cls, node, networks):
def _get_endpoint_to_ip_mapping(cls, node):
nm = Cluster.get_network_manager(node.cluster)
net_to_ips = nm.get_node_ips(node)
net_to_ips = nm.get_node_networks_with_ips(node)
mapping = dict()
net_to_ep = cls._get_netgroup_mapping_by_role(node)
net_to_ep = nm.get_node_network_mapping(node)
for network, ep in net_to_ep:
netgroup = net_to_ips.get(network, {})
if netgroup.get('ip'):
@ -1114,10 +1095,8 @@ class NeutronNetworkTemplateSerializer70(
:param node: instance of db.sqlalchemy.models.node.Node
:return: dict of network roles mapping
"""
nm = Cluster.get_network_manager(node.cluster)
networks = nm.get_node_networks(node)
network_roles = cls._get_network_roles(node)
ip_per_ep = cls._get_endpoint_to_ip_mapping(node, networks)
ip_per_ep = cls._get_endpoint_to_ip_mapping(node)
roles = {}
for role, ep in network_roles.items():
roles[role] = ip_per_ep.get(ep)
@ -1131,19 +1110,18 @@ class NeutronNetworkTemplateSerializer70(
nm = Cluster.get_network_manager(cluster)
for node in Cluster.get_nodes_not_for_deletion(cluster):
netw_data = []
for name, data in six.iteritems(nm.get_node_ips(node)):
for name, data in six.iteritems(
nm.get_node_networks_with_ips(node)):
data['name'] = name
netw_data.append(data)
addresses = {}
for net in node.cluster.network_groups:
if net.name == 'public' and \
not Node.should_have_public_with_ip(node):
continue
if net.meta.get('render_addr_mask'):
for net in netw_data:
render_addr_mask = net['meta'].get('render_addr_mask')
if render_addr_mask:
addresses.update(cls.get_addr_mask(
netw_data,
net.name,
net.meta.get('render_addr_mask')))
net['name'],
render_addr_mask))
[n.update(addresses) for n in nodes
if n['uid'] == str(node.uid)]
return nodes

View File

@ -118,6 +118,31 @@ class TestNetworkManager(BaseNetworkManagerTest):
self.assertEqual(False, gateway in assigned_ips)
self.assertEqual(False, broadcast in assigned_ips)
def test_get_free_ips_from_ranges(self):
ranges = [IPRange("192.168.33.2", "192.168.33.222")]
ips = self.env.network_manager.get_free_ips_from_ranges(
'management', ranges, set(), 3
)
self.assertItemsEqual(["192.168.33.2", "192.168.33.3", "192.168.33.4"],
ips)
self.db.add(IPAddr(ip_addr="192.168.33.3"))
self.db.flush()
ips = self.env.network_manager.get_free_ips_from_ranges(
'management', ranges, set(), 3
)
self.assertItemsEqual(["192.168.33.2", "192.168.33.4", "192.168.33.5"],
ips)
ips = self.env.network_manager.get_free_ips_from_ranges(
'management', ranges, set(["192.168.33.5", "192.168.33.8"]), 7
)
self.assertItemsEqual(
["192.168.33.2", "192.168.33.4", "192.168.33.6", "192.168.33.7",
"192.168.33.9", "192.168.33.10", "192.168.33.11"],
ips)
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
def test_assign_ips_idempotent(self, mocked_rpc):

View File

@ -15,6 +15,7 @@
# under the License.
import mock
import netaddr
import six
import yaml
@ -852,7 +853,7 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
def setUp(self, *args):
super(TestNetworkTemplateSerializer70, self).setUp()
self.cluster = self.create_env('ha_compact')
self.cluster = self.create_env(consts.NEUTRON_SEGMENT_TYPES.vlan)
self.net_template = self.env.read_fixtures(['network_template'])[0]
objects.Cluster.set_network_template(
@ -866,14 +867,14 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
self.serialized_for_astute = serializer(
AstuteGraph(cluster_db)).serialize(self.cluster, cluster_db.nodes)
def create_env(self, mode):
def create_env(self, segment_type):
return self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'api': False,
'mode': mode,
'net_provider': 'neutron',
'net_segment_type': 'vlan'},
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': segment_type},
nodes_kwargs=[
{'roles': ['controller'],
'pending_addition': True,
@ -883,6 +884,20 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
'name': self.node_name}
])
def create_more_nodes(self):
self.env.create_node(
roles=['cinder'], cluster_id=self.cluster.id)
self.env.create_node(
roles=['cinder', 'controller'], cluster_id=self.cluster.id)
self.env.create_node(
roles=['compute'], cluster_id=self.cluster.id)
def check_node_ips_on_certain_networks(self, node, net_names):
ips = db().query(models.IPAddr).filter_by(node=node.id)
self.assertEqual(ips.count(), len(net_names))
for ip in ips:
self.assertIn(ip.network_data.name, net_names)
def test_get_net_provider_serializer(self):
serializer = get_serializer_for_cluster(self.cluster)
self.cluster.network_config.configuration_template = None
@ -895,6 +910,50 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
net_serializer = serializer.get_net_provider_serializer(self.cluster)
self.assertIs(net_serializer, NeutronNetworkTemplateSerializer70)
def test_ip_assignment_according_to_template(self):
self.create_more_nodes()
# according to the template different node roles have different sets of
# networks
node_roles_vs_net_names = [
(['controller'], ['public', 'management', 'fuelweb_admin']),
(['compute'], ['management', 'fuelweb_admin']),
(['cinder'], ['storage', 'management', 'fuelweb_admin']),
(['compute', 'cinder'],
['storage', 'management', 'fuelweb_admin']),
(['controller', 'cinder'],
['public', 'storage', 'management', 'fuelweb_admin'])]
template_meta = self.net_template["adv_net_template"]["default"]
# wipe out 'storage' template for 'compute' node role to make
# node roles more distinct
for node_role, template_list in six.iteritems(
template_meta["templates_for_node_role"]):
if node_role == 'compute':
template_list.remove('storage')
objects.Cluster.set_network_template(
self.cluster,
self.net_template
)
self.prepare_for_deployment(self.env.nodes)
cluster_db = objects.Cluster.get_by_uid(self.cluster['id'])
serializer = get_serializer_for_cluster(self.cluster)
serialized_for_astute = serializer(
AstuteGraph(cluster_db)).serialize(self.cluster, cluster_db.nodes)
# 7 node roles on 5 nodes
self.assertEqual(len(serialized_for_astute), 7)
for node_data in serialized_for_astute:
node = objects.Node.get_by_uid(node_data['uid'])
for node_roles, net_names in node_roles_vs_net_names:
if node.all_roles == set(node_roles):
self.check_node_ips_on_certain_networks(node, net_names)
break
else:
self.fail("Unexpected combination of node roles: {0}".format(
node.all_roles))
def test_multiple_node_roles_network_roles(self):
expected_roles = {
# controller node
@ -1083,7 +1142,7 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
# download default template and fix it
net_template = self.env.read_fixtures(['network_template'])[0]
template_meta = net_template["adv_net_template"]["default"]
# wide out network from template
# wipe out network from template
del(template_meta["network_assignments"][net_name])
for k, v in template_meta["templates_for_node_role"].iteritems():
if net_name in v:
@ -1165,10 +1224,21 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
]
for node_data in self.serialized_for_astute:
for n in node_data['nodes']:
self.assertTrue(
set(['storage_address', 'internal_address',
'storage_netmask', 'internal_netmask']) <=
set(n.keys()))
n_db = objects.Node.get_by_uid(n['uid'])
if 'controller' in n_db.roles:
self.assertIn('internal_address', n)
self.assertIn('internal_netmask', n)
self.assertIn('public_address', n)
self.assertIn('public_netmask', n)
self.assertNotIn('storage_address', n)
self.assertNotIn('storage_netmask', n)
else:
self.assertIn('internal_address', n)
self.assertIn('internal_netmask', n)
self.assertNotIn('public_address', n)
self.assertNotIn('public_netmask', n)
self.assertIn('storage_address', n)
self.assertIn('storage_netmask', n)
nodes = node_data['network_metadata']['nodes']
for node_name, node_attrs in nodes.items():
# IPs must be serialized for these roles which are tied to
@ -1200,6 +1270,81 @@ class TestNetworkTemplateSerializer70(BaseDeploymentSerializer):
endpoints = node_data["network_scheme"]["endpoints"]
self.assertEqual(endpoints["br-ex"]["IP"], [ng.get('ip')])
def test_get_node_network_mapping(self):
self.create_more_nodes()
nm = objects.Cluster.get_network_manager(self.cluster)
# according to the template different node roles have different sets of
# networks (endpoints and network names here)
node_roles_vs_networks = [
(['controller'], [('public', 'br-ex'),
('management', 'br-mgmt'),
('fuelweb_admin', 'br-fw-admin')]),
(['compute'], [('private', 'br-prv'),
('storage', 'br-storage'),
('management', 'br-mgmt'),
('fuelweb_admin', 'br-fw-admin')]),
(['cinder'], [('storage', 'br-storage'),
('management', 'br-mgmt'),
('fuelweb_admin', 'br-fw-admin')]),
(['compute', 'cinder'], [('private', 'br-prv'),
('storage', 'br-storage'),
('management', 'br-mgmt'),
('fuelweb_admin', 'br-fw-admin')]),
(['controller', 'cinder'], [('public', 'br-ex'),
('storage', 'br-storage'),
('management', 'br-mgmt'),
('fuelweb_admin', 'br-fw-admin')])]
for node in self.env.nodes:
net_names_and_eps = nm.get_node_network_mapping(node)
for node_roles, networks in node_roles_vs_networks:
if node.all_roles == set(node_roles):
self.assertItemsEqual(net_names_and_eps, networks)
def test_get_network_name_to_endpoint_mappings(self):
nm = objects.Cluster.get_network_manager(self.cluster)
group_id = objects.Cluster.get_default_group(self.cluster).id
self.assertEqual(
nm.get_network_name_to_endpoint_mappings(self.cluster),
{
group_id: {
'br-ex': 'public',
'br-mgmt': 'management',
'br-fw-admin': 'fuelweb_admin',
'br-prv': 'private',
'br-storage': 'storage',
}
}
)
def test_assign_ips_in_node_group(self):
mgmt = self.db.query(models.NetworkGroup).\
filter_by(name='management').first()
ips_2_db = self.db.query(models.IPAddr.ip_addr).\
filter(models.IPAddr.network == mgmt.id,
models.IPAddr.node.isnot(None))
# two nodes now
self.assertEqual(ips_2_db.count(), 2)
ips_2_str = set(ips_2_db)
# add three nodes
self.create_more_nodes()
node_ids = set(n.id for n in self.env.nodes)
ip_ranges = [netaddr.IPRange(r.first, r.last)
for r in mgmt.ip_ranges]
nm = objects.Cluster.get_network_manager(self.cluster)
nm.assign_ips_in_node_group(
mgmt.id, mgmt.name, node_ids, ip_ranges)
ips_5_db = self.db.query(models.IPAddr.ip_addr). \
filter(models.IPAddr.network == mgmt.id,
models.IPAddr.node.isnot(None))
self.assertEqual(ips_5_db.count(), 5)
ips_5_str = set(ips_5_db)
# old IPs are the same
self.assertEqual(len(ips_5_str.difference(ips_2_str)), 3)
class TestCustomNetGroupIpAllocation(BaseDeploymentSerializer):

View File

@ -517,7 +517,8 @@ class TestVerifyNovaFlatDHCP(BaseIntegrationTest):
resp = self.env.launch_verify_networks(expect_errors=True)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.json_body['message'],
'Not enough free IP addresses in pool')
"Not enough free IP addresses in ranges "
"[172.16.0.35-172.16.0.38] of 'public' network")
class TestVerifyNeutronVlan(BaseIntegrationTest):

View File

@ -224,8 +224,7 @@ class TestAssignmentHandlers(BaseIntegrationTest):
self.assertEquals(200, resp.status_code, resp.body)
def test_add_node_with_cluster_network_template(self):
net_template = """
{
net_template = {
"adv_net_template": {
"default": {
"network_assignments": {
@ -281,9 +280,8 @@ class TestAssignmentHandlers(BaseIntegrationTest):
}
}
}
}
"""
net_template = jsonutils.loads(net_template)
}
cluster = self.env.create_cluster(api=False)
cluster.network_config.configuration_template = net_template
@ -313,9 +311,9 @@ class TestAssignmentHandlers(BaseIntegrationTest):
self.assertEquals('eth1', net_scheme['transformations'][1]['name'])
class TestClusterStateUnassigment(BaseIntegrationTest):
class TestClusterStateUnassignment(BaseIntegrationTest):
def test_delete_bond_and_networks_state_on_unassigmnet(self):
def test_delete_bond_and_networks_state_on_unassignment(self):
"""Test verifies that
1. bond configuration will be deleted
2. network unassigned from node interfaces