Don't assign VIPs for GET requests

Since VIPs make no sense before a cluster is deployed
there's no need to assign them when network configuration
is generated for GET requests.

Co-authored by: Maciej Kwiek <mkwiek@mirantis.com>
Co-authored by: Roman Prykhodchenko<me@romcheg.me>

Change-Id: I382066cc62a9d98f728f5cd5edf771a5a980922f
Closes-bug: #1504572
Closes-bug: #1499291
Partial-bug: #1524320
This commit is contained in:
Roman Prykhodchenko 2016-02-01 18:13:17 +01:00 committed by tatyana-leontovich
parent b46960133e
commit 77f5eaf896
8 changed files with 189 additions and 115 deletions

View File

@ -99,10 +99,6 @@ class ProviderHandler(BaseHandler):
# some corner cases, and it should be fixed. in order
# to simplify troubleshootng, let's print traceback to log.
return self.serializer.serialize_for_cluster(cluster)
except errors.OutOfIPs as exc:
raise self.http(400, six.text_type(exc))
except errors.DuplicatedVIPNames as exc:
raise self.http(400, six.text_type(exc))
except Exception:
logger.exception('Serialization failed')
raise
@ -136,7 +132,10 @@ class ProviderHandler(BaseHandler):
nm.update(cluster, data)
try:
network_config = self.serializer.serialize_for_cluster(cluster)
network_config = self.serializer.serialize_for_cluster(cluster,
True)
except errors.DuplicatedVIPNames as exc:
raise self.http(400, six.text_type(exc))
except errors.OutOfIPs as exc:
network_id = getattr(exc, 'network_id', None)
raise self.http(

View File

@ -300,6 +300,30 @@ class NetworkManager(object):
db().add(ip_db)
db().flush()
@classmethod
def get_assigned_vip(cls, nodegroup, network_name, vip_type):
"""Get VIP address, if it was assigned already
:param nodegroup: Name of the node group.
:param nerwork_name: Name of a network the VIP is allocated in.
:param vip_type: Type of a required VIP.
:returns: IP address of a VIP that matches specified criterias.
None, if no VIP matches specificied criterias.
"""
network = cls.get_network_by_name_and_nodegroup(network_name,
nodegroup)
cluster_vip_q = db().query(IPAddr).filter_by(vip_type=vip_type)
if network is not None:
cluster_vip_q = cluster_vip_q.filter_by(network=network.id)
cluster_vip = cluster_vip_q.first()
if cluster_vip:
return cluster_vip.ip_addr
@classmethod
def assign_vip(cls, nodegroup, network_name,
vip_type=consts.NETWORK_VIP_TYPES.haproxy):
@ -322,20 +346,18 @@ class NetworkManager(object):
:type vip_type: str
:returns: assigned VIP (string)
:raises: Exception
"""
network = db().query(NetworkGroup).\
filter_by(name=network_name, group_id=nodegroup.id).first()
ips_in_use = None
already_assigned = cls.get_assigned_vip(nodegroup,
network_name, vip_type)
network = cls.get_network_by_name_and_nodegroup(network_name,
nodegroup)
# FIXME:
# Built-in fuelweb_admin network doesn't belong to any node
# group, since it's shared between all clusters. So we need
# to handle this very special case if we want to be able
# to allocate VIP in default admin network.
if not network and network_name == consts.NETWORKS.fuelweb_admin:
network = cls.get_admin_network_group()
if already_assigned is not None and \
cls.check_ip_belongs_to_net(already_assigned, network):
return already_assigned
if not network:
if network is None:
raise errors.CanNotFindNetworkForNodeGroup(
u"Network '{0}' for nodegroup='{1}' not found.".format(
network_name, nodegroup.name))
@ -345,10 +367,8 @@ class NetworkManager(object):
node=None,
vip_type=vip_type
).first()
# check if cluster_vip is in required cidr: network.cidr
if cluster_vip and cls.check_ip_belongs_to_net(cluster_vip.ip_addr,
network):
return cluster_vip.ip_addr
ips_in_use = None
if network_name == consts.NETWORKS.fuelweb_admin:
# Nodes not currently assigned to a cluster will still
@ -463,7 +483,7 @@ class NetworkManager(object):
db().flush()
@classmethod
def assign_vips_for_net_groups_for_api(cls, cluster):
def assign_vips_for_net_groups_for_api(cls, cluster, allocate=True):
return cls.assign_vips_for_net_groups(cluster)
@classmethod
@ -1721,6 +1741,31 @@ class NetworkManager(object):
result.append(net_info)
return result
@classmethod
def get_network_by_name_and_nodegroup(cls, name, nodegroup):
"""Find a network that matches a specified name and a nodegroup.
:param name: Name of a network
:param nodegroup: The nodegroup object
:return: Network object that matches a specified name and a nodegroup.
Admin network, if nothing found.
"""
network = db().query(NetworkGroup).\
filter_by(name=name, group_id=nodegroup.id).first()
network = next((net for net in nodegroup.networks if net.name == name),
None)
# FIXME:
# Built-in fuelweb_admin network doesn't belong to any node
# group, since it's shared between all clusters. So we need
# to handle this very special case if we want to be able
# to allocate VIP in default admin network.
if not network and name == consts.NETWORKS.fuelweb_admin:
network = cls.get_admin_network_group()
return network
class AllocateVIPs70Mixin(object):
@ -1765,51 +1810,17 @@ class AllocateVIPs70Mixin(object):
@classmethod
def _assign_vips_for_net_groups(cls, cluster):
# noderole -> nodegroup mapping
# is used for determine nodegroup where VIP should be allocated
noderole_nodegroup = {}
# nodegroup -> role-to-network mapping
# is used for determine role-to-network mapping that is needed
# for choosing proper network for VIP allocation
nodegroup_networks = {}
for nodegroup, net_group, vip_name, role, vip_info\
in cls.get_node_groups_info(cluster):
vip_addr = cls.assign_vip(nodegroup, net_group, vip_name)
# iterate over all network roles, assign vip and yield information
# about assignment
for role in objects.Cluster.get_network_roles(cluster):
properties = role.get('properties', {})
for vip_info in properties.get('vip', ()):
noderoles = tuple(vip_info.get('node_roles', ['controller']))
if vip_addr is None:
continue
# Since we're iterating over all VIP requests, we most
# likely meet the same noderoles again and again. Let's
# calculate node group just once, cache and use cached
# value in order to reduce number of SQL queries.
if noderoles not in noderole_nodegroup:
noderole_nodegroup[noderoles] = \
objects.Cluster.get_common_node_group(cluster,
noderoles)
nodegroup = noderole_nodegroup[noderoles]
# Since different node roles may have the same node group,
# it'd be ridiculous to build "role-to-network-group" mapping
# each time we retrieve the group. So let's save mapping
# in cache and retrieve it if necessary.
if nodegroup.name not in nodegroup_networks:
nodegroup_networks[nodegroup.name] = \
cls.build_role_to_network_group_mapping(
cluster, nodegroup.name)
net_group = cls.get_network_group_for_role(
role,
nodegroup_networks[nodegroup.name])
vip_name = vip_info['name']
# do allocation
vip_addr = cls.assign_vip(nodegroup, net_group, vip_name)
yield role, vip_info, vip_addr
yield role, vip_info, vip_addr
@classmethod
def assign_vips_for_net_groups_for_api(cls, cluster):
def assign_vips_for_net_groups_for_api(cls, cluster, allocate=True):
"""Calls cls.assign_vip for all vips in network roles.
Returns dict with vip definitions in API compatible format::
@ -1822,13 +1833,17 @@ class AllocateVIPs70Mixin(object):
:type cluster: Cluster model
:return: dict with vip definitions
"""
# check VIPs names overlapping before assigning them
cls.check_unique_vip_names_for_cluster(cluster)
vips = {}
vips['vips'] = {}
for role, vip_info, vip_addr in cls._assign_vips_for_net_groups(
cluster):
if allocate:
# check VIPs names overlapping before assigning them
cls.check_unique_vip_names_for_cluster(cluster)
allocated_vips_data = cls._assign_vips_for_net_groups(cluster)
else:
allocated_vips_data = cls._get_vips_for_net_groups(cluster)
for role, vip_info, vip_addr in allocated_vips_data:
vip_name = vip_info['name']
@ -1911,18 +1926,21 @@ class AllocateVIPs70Mixin(object):
.format(cluster.id, ', '.join(duplicate_vip_names))
)
@classmethod
def _get_vips_for_net_groups(cls, cluster):
for nodegroup, net_group, vip_name, role, vip_info \
in cls.get_node_groups_info(cluster):
class AllocateVIPs80Mixin(object):
net_mgr = objects.Cluster.get_network_manager(cluster)
vip_addr = net_mgr.get_assigned_vip(nodegroup, net_group, vip_name)
if vip_addr is None:
continue
yield role, vip_info, vip_addr
@classmethod
def _build_advanced_vip_info(cls, vip_info, role, address):
info = AllocateVIPs70Mixin._build_advanced_vip_info(
vip_info, role, address)
info['vendor_specific'] = vip_info.get('vendor_specific')
return info
@classmethod
def _assign_vips_for_net_groups(cls, cluster):
def get_node_groups_info(cls, cluster):
# noderole -> nodegroup mapping
# is used for determine nodegroup where VIP should be allocated
noderole_nodegroup = {}
@ -1972,10 +1990,17 @@ class AllocateVIPs80Mixin(object):
"Skip VIP '{0}' which is mapped to non-existing"
" network '{1}'".format(vip_name, net_group))
continue
yield nodegroup, net_group, vip_name, role, vip_info
# do allocation
vip_addr = cls.assign_vip(nodegroup, net_group, vip_name)
yield role, vip_info, vip_addr
class AllocateVIPs80Mixin(object):
@classmethod
def _build_advanced_vip_info(cls, vip_info, role, address):
info = AllocateVIPs70Mixin._build_advanced_vip_info(
vip_info, role, address)
info['vendor_specific'] = vip_info.get('vendor_specific')
return info
class AssignIPsLegacyMixin(object):

View File

@ -1175,6 +1175,17 @@ class Cluster(NailgunObject):
return [x[0] for x in db().query(models.Node.id).filter(
models.Node.cluster_id == instance.id).all()]
@classmethod
def get_vips(cls, instance):
net_roles = cls.get_network_roles(instance)
cluster_vips = []
for nr in net_roles:
cluster_vips.extend(nr['properties']['vip'])
return cluster_vips
@classmethod
def get_assigned_roles(cls, instance):
"""Get list of all roles currently assigned to nodes in cluster

View File

@ -36,7 +36,7 @@ class NetworkConfigurationSerializer(BasicSerializer):
return data_dict
@classmethod
def serialize_net_groups_and_vips(cls, cluster):
def serialize_net_groups_and_vips(cls, cluster, allocate=False):
result = {}
net_manager = objects.Cluster.get_network_manager(cluster)
nets = cluster.network_groups + [net_manager.get_admin_network_group()]
@ -45,9 +45,11 @@ class NetworkConfigurationSerializer(BasicSerializer):
cls.serialize_network_group,
nets
)
if cluster.is_ha_mode:
result.update(
net_manager.assign_vips_for_net_groups_for_api(cluster))
net_manager.assign_vips_for_net_groups_for_api(cluster,
allocate))
return result
@ -71,8 +73,9 @@ class NovaNetworkConfigurationSerializer(NetworkConfigurationSerializer):
)
@classmethod
def serialize_for_cluster(cls, cluster):
result = cls.serialize_net_groups_and_vips(cluster)
def serialize_for_cluster(cls, cluster, allocate_vips=False):
result = cls.serialize_net_groups_and_vips(cluster,
allocate=allocate_vips)
result['networking_parameters'] = cls.serialize_network_params(
cluster)
return result
@ -104,7 +107,7 @@ class NeutronNetworkConfigurationSerializer(NetworkConfigurationSerializer):
return BasicSerializer.serialize(cluster.network_config, fields)
@classmethod
def serialize_for_cluster(cls, cluster):
result = cls.serialize_net_groups_and_vips(cluster)
def serialize_for_cluster(cls, cluster, allocate_vips=False):
result = cls.serialize_net_groups_and_vips(cluster, allocate_vips)
result['networking_parameters'] = cls.serialize_network_params(cluster)
return result

View File

@ -99,7 +99,10 @@ class TaskManager(object):
def serialize_network_cfg(self, cluster):
serializer = {'nova_network': NovaNetworkConfigurationSerializer,
'neutron': NeutronNetworkConfigurationSerializer}
return serializer[cluster.net_provider].serialize_for_cluster(cluster)
return serializer[cluster.net_provider].serialize_for_cluster(
cluster,
allocate_vips=True
)
class DeploymentCheckMixin(object):

View File

@ -14,10 +14,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import copy
from mock import patch
import netaddr
from oslo_serialization import jsonutils
from sqlalchemy.sql import not_
@ -286,13 +286,21 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
for key in keys:
self.assertEqual(network[key], getattr(network_group, key))
def test_get_request_should_return_vips(self):
def test_get_request_should_return_vips_after_put(self):
self.env.neutron_networks_put(self.cluster.id, {})
resp = self.env.neutron_networks_get(self.cluster.id)
data = resp.json_body
self.assertIn('public_vip', data)
self.assertIn('management_vip', data)
def test_get_request_should_not_return_vips_before_assignment(self):
resp = self.env.neutron_networks_get(self.cluster.id)
data = resp.json_body
self.assertNotIn('public_vip', data)
self.assertNotIn('management_vip', data)
def test_not_found_cluster(self):
resp = self.env.neutron_networks_get(self.cluster.id + 999,
expect_errors=True)
@ -523,7 +531,9 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
self.assertIsNone(strg['gateway'])
def test_admin_vip_reservation(self):
self.cluster.release.network_roles_metadata.append({
nrm_copy = copy.deepcopy(
self.cluster.release.network_roles_metadata)
nrm_copy.append({
'id': 'admin/vip',
'default_mapping': consts.NETWORKS.fuelweb_admin,
'properties': {
@ -534,10 +544,11 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
}]
}
})
self.cluster.release.network_roles_metadata = nrm_copy
self.cluster.release.version = '2015.1-8.0'
self.db.flush()
resp = self.env.neutron_networks_get(self.cluster.id)
resp = self.env.neutron_networks_put(self.cluster.id, {})
self.assertEqual(200, resp.status_code)
nm = objects.Cluster.get_network_manager(self.cluster)
@ -546,7 +557,7 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
nm.assign_vip(nodegroup, consts.NETWORKS.fuelweb_admin, 'my-vip'),
resp.json_body['vips']['my-vip']['ipaddr'])
def test_not_enough_ip_addresses_return_400_on_get(self):
def test_not_enough_ip_addresses_return_200_on_get(self):
# restrict public network to have only 2 ip addresses
netconfig = self.env.neutron_networks_get(self.cluster.id).json_body
public = next((
@ -572,15 +583,10 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
})
self.db.flush()
# check that we return 400 Bad Request
resp = self.env.neutron_networks_get(
self.cluster.id,
expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertEqual(
"Not enough free IP addresses in ranges [172.16.0.2-172.16.0.4] "
"of 'public' network",
resp.json_body['message'])
self.assertEqual(200, resp.status_code)
def test_not_enough_ip_addresses_return_400_on_put(self):
netconfig = self.env.neutron_networks_get(self.cluster.id).json_body
@ -618,7 +624,9 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
# populate release with a network role that requests a VIP
# for compute nodes
self.env.clusters[0].release.network_roles_metadata.append({
nrm_copy = copy.deepcopy(
self.env.clusters[0].release.network_roles_metadata)
nrm_copy.append({
'id': 'mymgmt/vip',
'default_mapping': consts.NETWORKS.management,
'properties': {
@ -629,24 +637,19 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
'node_roles': ['compute'],
}]
}})
self.env.clusters[0].release.network_roles_metadata = nrm_copy
self.db.flush()
self.env.neutron_networks_put(self.cluster.id, {})
resp = self.env.neutron_networks_get(self.cluster.id)
self.assertEqual(200, resp.status_code)
ipaddr = resp.json_body['vips']['my-vip']['ipaddr']
self.assertEqual('10.42.0.2', ipaddr)
def test_get_returns_error_if_vip_names_are_intersected(self):
cluster = self.env.create(
release_kwargs={'version': '2015.1.0-7.0'},
cluster_kwargs={
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.gre,
'api': False,
},
nodes_kwargs=[{'roles': ['controller']}]
)
cluster.release.network_roles_metadata.append({
def test_put_returns_error_if_vip_names_are_intersected(self):
nrm_copy = copy.deepcopy(self.cluster.release.network_roles_metadata)
nrm_copy.append({
'id': 'mymgmt/vip',
'default_mapping': consts.NETWORKS.management,
'properties': {
@ -657,14 +660,44 @@ class TestNeutronNetworkConfigurationHandler(BaseIntegrationTest):
'node_roles': ['compute'],
}]
}})
self.cluster.release.network_roles_metadata = nrm_copy
self.db.flush()
resp = self.env.neutron_networks_get(cluster.id, expect_errors=True)
resp = self.env.neutron_networks_put(self.cluster.id, {},
expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertIn(
'Duplicate VIP names found in network configuration',
resp.json_body['message']
)
@patch('nailgun.network.manager.AllocateVIPs70Mixin.'
'_assign_vips_for_net_groups')
def test_network_conf_not_applied_on_vip_allocation_error(self, m_assign):
m_assign.side_effect = Exception
resp = self.env.neutron_networks_get(self.cluster.id)
net_conf = resp.json_body
old_cidr = ''
for net in net_conf['networks']:
if net['name'] == 'management':
old_cidr = net['cidr']
net['cidr'] = net['cidr'].partition('/')[0] + '/25'
resp = self.env.neutron_networks_put(self.cluster.id,
net_conf,
expect_errors=True)
self.assertEqual(resp.status_code, 500)
self.db.refresh(self.cluster)
net = [ng for ng in self.cluster.network_groups
if ng.name == 'management'].pop()
self.assertEqual(net.cidr, old_cidr)
self.assertTrue(m_assign.called) # Verifies no other error occured
class TestNovaNetworkConfigurationHandlerHA(BaseIntegrationTest):

View File

@ -206,11 +206,8 @@ class TestNetworkManager(BaseNetworkManagerTest):
nodegroup = objects.Cluster.get_controllers_node_group(
self.env.clusters[0])
self.assertNotRaises(
Exception, # make sure there's no exceptions at all
self.env.network_manager.assign_vip,
nodegroup,
consts.NETWORKS.fuelweb_admin)
self.env.network_manager.assign_vip(nodegroup,
consts.NETWORKS.fuelweb_admin)
def test_assign_vip_throws_not_found_exception(self):
self.env.create_cluster(api=True)

View File

@ -538,6 +538,8 @@ class TestNodeGroups(BaseIntegrationTest):
self.env.clusters[0].release.network_roles_metadata = net_roles
self.db.flush()
# VIPs are allocated on this call
self.env.neutron_networks_put(self.cluster.id, {})
config = self.env.neutron_networks_get(self.cluster.id).json_body
# Storage network has no GW by default
vip_config = config['vips']['my-vip']['ipaddr']
@ -548,6 +550,7 @@ class TestNodeGroups(BaseIntegrationTest):
resp = self.env.create_node_group()
self.assertEquals(resp.status_code, 201)
self.env.neutron_networks_put(self.cluster.id, {})
# VIP address was deleted
self.assertEqual(