Add Scaling IP API extension

JIRA: NCP-1774

The current Floating IPs extension allows a one-to-one association of an IP
address to a port's fixed ip address.  This is aligned with neutron's
Floating IP API and should not be changed.  Scaling IPs is essentially a
floating IP with a one-to-many association of an IP address to a ports'
fixed ip addresses.  Due to this simple, but fundamentally different, change
a new extension and API resource makes the most sense.

A new database enum value is needed to differentiate between a floating
and scaling IP address.  This ensures that when a user retrieves a list of
floating IPs, scaling IPs are not returned in that list.  It also ensures the
inverse as well.

The consolidation of code to reduce repeated code has been strived for, but
it could always be better.  The current state tries to strike a balance between
complete DRY and drastically changing the exisitng floating IP code.
This commit is contained in:
Brandon Logan
2016-01-24 02:42:17 -06:00
parent 9225b4dea6
commit db244246d4
15 changed files with 1159 additions and 249 deletions

View File

@@ -0,0 +1,139 @@
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def _validate_list_of_port_dicts(values, data):
if not isinstance(values, list):
msg = _("'%s' is not a list") % data
return msg
for item in values:
msg = _validate_port_dict(item)
if msg:
return msg
items = [tuple(entry.items()) for entry in values]
if len(items) != len(set(items)):
msg = _("Duplicate items in the list: '%s'") % values
return msg
def _validate_port_dict(values):
if not isinstance(values, dict):
msg = _("%s is not a valid dictionary") % values
LOG.debug(msg)
return msg
port_id = values.get('port_id')
fixed_ip = values.get('fixed_ip_address')
msg = attr._validate_uuid(port_id)
if msg:
return msg
if fixed_ip is None:
return
msg = attr._validate_ip_address(fixed_ip)
if msg:
return msg
attr.validators['type:validate_list_of_port_dicts'] = (
_validate_list_of_port_dicts
)
RESOURCE_NAME = "scalingip"
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
RESOURCE_ATTRIBUTE_MAP = {
RESOURCE_COLLECTION: {
'id': {
'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True
},
"scaling_ip_address": {
'allow_post': True, 'allow_put': False,
'validate': {'type:ip_address_or_none': None},
'is_visible': True, 'default': None,
'enforce_policy': True
},
"tenant_id": {
'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': attr.TENANT_ID_MAX_LEN},
'is_visible': True
},
"scaling_network_id": {
'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True
},
"ports": {
'allow_post': True, 'allow_put': True,
'validate': {
'type:validate_list_of_port_dicts': None
},
'is_visible': True,
'required_by_policy': True
}
}
}
class Scalingip(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return RESOURCE_NAME
@classmethod
def get_alias(cls):
return RESOURCE_NAME
@classmethod
def get_description(cls):
return "Scaling IPs"
@classmethod
def get_namespace(cls):
return ("http://docs.openstack.org/network/ext/"
"networks_quark/api/v2.0")
@classmethod
def get_updated(cls):
return "2016-01-20T19:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
None,
register_quota=True)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}

View File

@@ -1010,12 +1010,22 @@ def floating_ip_find(context, lock_mode=False, limit=None, sorts=None,
def floating_ip_associate_fixed_ip(context, floating_ip, fixed_ip):
floating_ip.fixed_ip = fixed_ip
floating_ip.fixed_ips.append(fixed_ip)
return floating_ip
def floating_ip_disassociate_fixed_ip(context, floating_ip):
floating_ip.fixed_ip = None
def floating_ip_disassociate_fixed_ip(context, floating_ip, fixed_ip):
found_index = None
for index, flip_fixed_ip in enumerate(floating_ip.fixed_ips):
if flip_fixed_ip.id == fixed_ip.id:
found_index = index
break
floating_ip.fixed_ips.pop(found_index)
return floating_ip
def floating_ip_disassociate_all_fixed_ips(context, floating_ip):
floating_ip.fixed_ips = []
return floating_ip

View File

@@ -1,3 +1,4 @@
SHARED = 'shared'
FIXED = 'fixed'
FLOATING = 'floating'
SCALING = 'scaling'

View File

@@ -0,0 +1,30 @@
"""add scaling ip address type enum
Revision ID: 3f0c11478a5d
Revises: a0798b3b7418
Create Date: 2016-01-22 23:41:03.214930
"""
# revision identifiers, used by Alembic.
revision = '3f0c11478a5d'
down_revision = 'a0798b3b7418'
from alembic import op
import sqlalchemy as sa
existing_enum = sa.Enum("shared", "floating", "fixed")
new_enum = sa.Enum("shared", "floating", "fixed", "scaling")
def upgrade():
op.alter_column("quark_ip_addresses", "address_type",
existing_type=existing_enum,
type_=new_enum)
def downgrade():
op.alter_column("quark_ip_addresses", "address_type",
existing_type=new_enum,
type_=existing_enum)

View File

@@ -1 +1 @@
a0798b3b7418
3f0c11478a5d

View File

@@ -167,7 +167,7 @@ class IPAddress(BASEV2, models.HasId):
used_by_tenant_id = sa.Column(sa.String(255))
address_type = sa.Column(sa.Enum(ip_types.FIXED, ip_types.FLOATING,
ip_types.SHARED,
ip_types.SHARED, ip_types.SCALING,
name="quark_ip_address_types"))
associations = orm.relationship(PortIpAssociation, backref="ip_address")
transaction_id = sa.Column(sa.Integer(),
@@ -246,19 +246,13 @@ flip_to_fixed_ip_assoc_tbl = sa.Table(
orm.mapper(FloatingToFixedIPAssociation, flip_to_fixed_ip_assoc_tbl)
IPAddress.fixed_ip = orm.relationship("IPAddress",
secondary=flip_to_fixed_ip_assoc_tbl,
primaryjoin=(IPAddress.id ==
flip_to_fixed_ip_assoc_tbl
.c.floating_ip_address_id
and
flip_to_fixed_ip_assoc_tbl
.c.floating_ip_address_id ==
1),
secondaryjoin=(IPAddress.id ==
flip_to_fixed_ip_assoc_tbl
.c.fixed_ip_address_id),
uselist=False)
IPAddress.fixed_ips = orm.relationship(
"IPAddress", secondary=flip_to_fixed_ip_assoc_tbl,
primaryjoin=(IPAddress.id == flip_to_fixed_ip_assoc_tbl
.c.floating_ip_address_id and flip_to_fixed_ip_assoc_tbl
.c.floating_ip_address_id == 1),
secondaryjoin=(IPAddress.id == flip_to_fixed_ip_assoc_tbl
.c.fixed_ip_address_id), uselist=True)
class Route(BASEV2, models.HasTenant, models.HasId, IsHazTags):

View File

@@ -49,10 +49,19 @@ class UnicornDriver(object):
def get_name(cls):
return "Unicorn"
def register_floating_ip(self, floating_ip, port, fixed_ip):
def register_floating_ip(self, floating_ip, port_fixed_ips):
"""Register a floating ip with Unicorn
:param floating_ip: The quark.db.models.IPAddress to register
:param port_fixed_ips: A dictionary containing the port and fixed ips
to associate the floating IP with. Has the structure of:
{"<id of port>": {"port": <quark.db.models.Port>,
"fixed_ip": "<fixed ip address>"}}
:return: None
"""
url = CONF.QUARK.floating_ip_base_url
timeout = CONF.QUARK.unicorn_api_timeout_seconds
req = self._build_request_body(floating_ip, port, fixed_ip)
req = self._build_request_body(floating_ip, port_fixed_ips)
try:
LOG.info("Calling unicorn to register floating ip: %s %s"
@@ -70,14 +79,23 @@ class UnicornDriver(object):
LOG.error("register_floating_ip: %s" % msg)
raise ex.RegisterFloatingIpFailure(id=floating_ip.id)
def update_floating_ip(self, floating_ip, port, fixed_ip):
def update_floating_ip(self, floating_ip, port_fixed_ips):
"""Update an existing floating ip with Unicorn
:param floating_ip: The quark.db.models.IPAddress to update
:param port_fixed_ips: A dictionary containing the port and fixed ips
to associate the floating IP with. Has the structure of:
{"<id of port>": {"port": <quark.db.models.Port>,
"fixed_ip": "<fixed ip address>"}}
:return: None
"""
url = "%s/%s" % (CONF.QUARK.floating_ip_base_url,
floating_ip["address_readable"])
timeout = CONF.QUARK.unicorn_api_timeout_seconds
req = self._build_request_body(floating_ip, port, fixed_ip)
req = self._build_request_body(floating_ip, port_fixed_ips)
try:
LOG.info("Calling unicorn to register floating ip: %s %s"
LOG.info("Calling unicorn to update floating ip: %s %s"
% (url, req))
r = requests.put(url, data=json.dumps(req), timeout=timeout)
except Exception as e:
@@ -93,6 +111,11 @@ class UnicornDriver(object):
raise ex.RegisterFloatingIpFailure(id=floating_ip.id)
def remove_floating_ip(self, floating_ip):
"""Register a floating ip with Unicorn
:param floating_ip: The quark.db.models.IPAddress to remove
:return: None
"""
url = "%s/%s" % (CONF.QUARK.floating_ip_base_url,
floating_ip.address_readable)
timeout = CONF.QUARK.unicorn_api_timeout_seconds
@@ -115,8 +138,8 @@ class UnicornDriver(object):
LOG.error("remove_floating_ip: %s" % msg)
raise ex.RemoveFloatingIpFailure(id=floating_ip.id)
@staticmethod
def _build_request_body(floating_ip, port, fixed_ip):
@classmethod
def _build_fixed_ips(cls, port):
fixed_ips = [{"ip_address": ip.address_readable,
"version": ip.version,
"subnet_id": ip.subnet.id,
@@ -124,14 +147,27 @@ class UnicornDriver(object):
"address_type": ip.address_type}
for ip in port.ip_addresses
if (ip.get("address_type") == ip_types.FIXED)]
return fixed_ips
@classmethod
def _build_endpoints(cls, port_fixed_ips):
endpoints = []
for port_id in port_fixed_ips:
port = port_fixed_ips[port_id]["port"]
fixed_ip = port_fixed_ips[port_id]["fixed_ip"]
endpoint_port = {"uuid": port.id,
"name": port.name,
"network_uuid": port.network_id,
"mac_address": port.mac_address,
"device_id": port.device_id,
"device_owner": port.device_owner,
"fixed_ip": cls._build_fixed_ips(port)}
endpoints.append({"port": endpoint_port,
"private_ip": fixed_ip.address_readable})
return endpoints
@classmethod
def _build_request_body(cls, floating_ip, port_fixed_ips):
content = {"public_ip": floating_ip["address_readable"],
"endpoints": [
{"port": {"uuid": port.id,
"name": port.name,
"network_uuid": port.network_id,
"mac_address": port.mac_address,
"device_id": port.device_id,
"device_owner": port.device_owner,
"fixed_ip": fixed_ips},
"private_ip": fixed_ip.address_readable}]}
"endpoints": cls._build_endpoints(port_fixed_ips)}
return {"floating_ip": content}

View File

@@ -172,6 +172,10 @@ class FloatingIpNotFound(n_exc.NotFound):
message = _("Floating IP %(id)s not found.")
class ScalingIpNotFound(n_exc.NotFound):
message = _("Scaling IP %(id)s not found.")
class RemoveFloatingIpFailure(n_exc.NeutronException):
message = _("An error occurred when trying to remove the "
"floating IP %(id)s.")
@@ -190,6 +194,10 @@ class FixedIpDoesNotExistsForPort(n_exc.BadRequest):
message = _("Fixed IP %(fixed_ip)s does not exist on Port %(port_id)s")
class PortAlreadyContainsScalingIp(n_exc.Conflict):
message = _("Port %(port_id)s already has an associated scaling IP.")
class NoAvailableFixedIpsForPort(n_exc.Conflict):
message = _("There are no available fixed IPs for port %(port_id)s")

View File

@@ -749,9 +749,9 @@ class QuarkIpam(object):
# fixed IP address.
context.session.flush()
for ip in ips_to_remove:
if ip["address_type"] == ip_types.FLOATING:
if ip.fixed_ip:
db_api.floating_ip_disassociate_fixed_ip(context, ip)
if ip["address_type"] in (ip_types.FLOATING, ip_types.SCALING):
if ip.fixed_ips:
db_api.floating_ip_disassociate_all_fixed_ips(context, ip)
driver = registry.DRIVER_REGISTRY.get_driver()
driver.remove_floating_ip(ip)
else:

View File

@@ -130,7 +130,8 @@ class Plugin(neutron_plugin_base_v2.NeutronPluginBaseV2,
"provider", "ip_policies", "quotas",
"networks_quark", "router",
"ip_availabilities", "ports_quark",
"floatingip", "segment_allocation_ranges"]
"floatingip", "segment_allocation_ranges",
"scalingip"]
def __init__(self):
LOG.info("Starting quark plugin")
@@ -472,3 +473,29 @@ class Plugin(neutron_plugin_base_v2.NeutronPluginBaseV2,
def delete_segment_allocation_range(self, context, id):
segment_allocation_ranges.delete_segment_allocation_range(
context, id)
def create_scalingip(self, context, scalingip):
self._fix_missing_tenant_id(context, scalingip["scalingip"])
return floating_ips.create_scalingip(context, scalingip["scalingip"])
@sessioned
def update_scalingip(self, context, id, scalingip):
return floating_ips.update_scalingip(context, id,
scalingip["scalingip"])
@sessioned
def get_scalingip(self, context, id, fields=None):
return floating_ips.get_scalingip(context, id, fields)
@sessioned
def delete_scalingip(self, context, id):
return floating_ips.delete_scalingip(context, id)
@sessioned
def get_scalingips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
return floating_ips.get_scalingips(context, filters=filters,
fields=fields, sorts=sorts,
limit=limit, marker=marker,
page_reverse=page_reverse)

View File

@@ -40,6 +40,276 @@ quark_router_opts = [
CONF.register_opts(quark_router_opts, 'QUARK')
def _get_network(context, network_id):
network = db_api.network_find(context, id=network_id, scope=db_api.ONE)
if not network:
raise n_exc.NetworkNotFound(net_id=network_id)
return network
def _get_port(context, port_id):
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if not port.ip_addresses or len(port.ip_addresses) == 0:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
return port
def _get_fixed_ip(context, given_fixed_ip, port):
if not given_fixed_ip:
fixed_ip = _get_next_available_fixed_ip(port)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(
port_id=port.id)
else:
fixed_ip = next((ip for ip in port.ip_addresses
if (ip['address_readable'] == given_fixed_ip and
ip.get('address_type') == ip_types.FIXED)),
None)
if not fixed_ip:
raise q_exc.FixedIpDoesNotExistsForPort(
fixed_ip=given_fixed_ip, port_id=port.id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') in (ip_types.FLOATING,
ip_types.SCALING) and
ip.fixed_ip['address_readable'] == given_fixed_ip)):
raise q_exc.PortAlreadyContainsFloatingIp(
port_id=port.id)
return fixed_ip
def _allocate_ip(context, network, port, requested_ip_address, address_type):
new_addresses = []
ip_addresses = []
if requested_ip_address:
ip_addresses.append(requested_ip_address)
seg_name = CONF.QUARK.floating_ip_segment_name
strategy_name = CONF.QUARK.floating_ip_ipam_strategy
if strategy_name.upper() == 'NETWORK':
strategy_name = network.get("ipam_strategy")
port_id = port
if port:
port_id = port.id
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name)
ipam_driver.allocate_ip_address(context, new_addresses, network.id,
port_id, CONF.QUARK.ipam_reuse_after,
seg_name, version=4,
ip_addresses=ip_addresses,
address_type=address_type)
return new_addresses[0]
def _get_next_available_fixed_ip(port):
floating_ips = [ip for ip in port.ip_addresses
if ip.get('address_type') in
(ip_types.FLOATING, ip_types.SCALING)]
fixed_ips = [ip for ip in port.ip_addresses
if ip.get('address_type') == ip_types.FIXED]
if not fixed_ips or len(fixed_ips) == 0:
return None
used = [ip.fixed_ip.address for ip in floating_ips
if ip and ip.fixed_ip]
return next((ip for ip in sorted(fixed_ips,
key=lambda ip: ip.get('allocated_at'))
if ip.address not in used), None)
def _get_ips_by_type(context, ip_type, filters=None, fields=None):
filters = filters or {}
filters['_deallocated'] = False
filters['address_type'] = ip_type
ips = db_api.floating_ip_find(context, scope=db_api.ALL, **filters)
return ips
def _create_flip(context, flip, port_fixed_ips):
"""Associates the flip with ports and creates it with the flip driver
:param context: neutron api request context.
:param flip: quark.db.models.IPAddress object representing a floating IP
:param port_fixed_ips: dictionary of the structure:
{"<id of port>": {"port": <quark.db.models.Port>,
"fixed_ip": "<fixed ip address>"}}
:return: None
"""
if port_fixed_ips:
context.session.begin()
try:
ports = [val['port'] for val in port_fixed_ips.values()]
flip = db_api.port_associate_ip(context, ports, flip,
port_fixed_ips.keys())
for port_id in port_fixed_ips:
fixed_ip = port_fixed_ips[port_id]['fixed_ip']
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
flip_driver.register_floating_ip(flip, port_fixed_ips)
context.session.commit()
except Exception:
context.session.rollback()
raise
def _get_flip_fixed_ip_by_port_id(flip, port_id):
for fixed_ip in flip.fixed_ips:
if fixed_ip.ports[0].id == port_id:
return fixed_ip
def _update_flip(context, flip_id, ip_type, requested_ports):
"""Update a flip based IPAddress
:param context: neutron api request context.
:param flip_id: id of the flip or scip
:param ip_type: ip_types.FLOATING | ip_types.SCALING
:param requested_ports: dictionary of the structure:
{"port_id": "<id of port>", "fixed_ip": "<fixed ip address>"}
:return: quark.models.IPAddress
"""
context.session.begin()
try:
flip = db_api.floating_ip_find(context, id=flip_id, scope=db_api.ONE)
if not flip:
if ip_type == ip_types.SCALING:
raise q_exc.ScalingIpNotFound(id=flip_id)
raise q_exc.FloatingIpNotFound(id=flip_id)
current_ports = flip.ports
# Determine what ports are being removed, being added, and remain
req_port_ids = [request_port.get('port_id')
for request_port in requested_ports]
curr_port_ids = [curr_port.id for curr_port in current_ports]
added_port_ids = [port_id for port_id in req_port_ids
if port_id and port_id not in curr_port_ids]
removed_port_ids = [port_id for port_id in curr_port_ids
if port_id not in req_port_ids]
remaining_port_ids = set(curr_port_ids) - set(removed_port_ids)
# Validations just for floating ip types
if (ip_type == ip_types.FLOATING and curr_port_ids and
curr_port_ids == req_port_ids):
d = dict(flip_id=flip_id, port_id=curr_port_ids[0])
raise q_exc.PortAlreadyAssociatedToFloatingIp(**d)
if (ip_type == ip_types.FLOATING and
not curr_port_ids and not req_port_ids):
raise q_exc.FloatingIpUpdateNoPortIdSupplied()
port_fixed_ips = {}
# Keep the ports and fixed ips that have not changed
for port_id in remaining_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)
port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}
# Disassociate the ports and fixed ips from the flip that were
# associated to the flip but are not anymore
for port_id in removed_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
flip = db_api.port_disassociate_ip(context, [port], flip)
fixed_ip = _get_flip_fixed_ip_by_port_id(flip, port_id)
if fixed_ip:
flip = db_api.floating_ip_disassociate_fixed_ip(
context, flip, fixed_ip)
# Validate the new ports with the flip and associate the new ports
# and fixed ips with the flip
for port_id in added_port_ids:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.FLOATING)):
raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.SCALING)):
raise q_exc.PortAlreadyContainsScalingIp(port_id=port_id)
fixed_ip = _get_next_available_fixed_ip(port)
LOG.info('new fixed ip: %s' % fixed_ip)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
port_fixed_ips[port_id] = {'port': port, 'fixed_ip': fixed_ip}
flip = db_api.port_associate_ip(context, [port], flip, [port_id])
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
# If there are not any remaining ports and no new ones are being added,
# remove the floating ip from unicorn
if not remaining_port_ids and not added_port_ids:
flip_driver.remove_floating_ip(flip)
# If new ports are being added but there previously was not any ports,
# then register a new floating ip with the driver because it is
# assumed it does not exist
elif added_port_ids and not curr_port_ids:
flip_driver.register_floating_ip(flip, port_fixed_ips)
else:
flip_driver.update_floating_ip(flip, port_fixed_ips)
context.session.commit()
except Exception:
context.session.rollback()
raise
# NOTE(blogan): ORM does not seem to update the model to the real state
# of the database, so I'm doing an explicit refresh for now.
context.session.refresh(flip)
return flip
def _delete_flip(context, id, address_type):
filters = {'address_type': address_type, '_deallocated': False}
flip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters)
if not flip:
raise q_exc.FloatingIpNotFound(id=id)
current_ports = flip.ports
if address_type == ip_types.FLOATING:
if current_ports:
current_ports = [flip.ports[0]]
elif address_type == ip_types.SCALING:
current_ports = flip.ports
context.session.begin()
try:
strategy_name = flip.network.get('ipam_strategy')
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name)
ipam_driver.deallocate_ip_address(context, flip)
if current_ports:
db_api.port_disassociate_ip(context, current_ports, flip)
if flip.fixed_ips:
db_api.floating_ip_disassociate_all_fixed_ips(context, flip)
context.session.commit()
except Exception:
context.session.rollback()
raise
try:
driver = registry.DRIVER_REGISTRY.get_driver()
driver.remove_floating_ip(flip)
except Exception as e:
LOG.error('There was an error when trying to delete the floating ip '
'on the unicorn API. The ip has been cleaned up, but '
'may need to be handled manually in the unicorn API. '
'Error: %s' % e.message)
def create_floatingip(context, content):
"""Allocate or reallocate a floating IP.
@@ -54,91 +324,25 @@ def create_floatingip(context, content):
"""
LOG.info('create_floatingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
tenant_id = content.get('tenant_id')
network_id = content.get('floating_network_id')
fixed_ip_address = content.get('fixed_ip_address')
ip_address = content.get('floating_ip_address')
port_id = content.get('port_id')
if not tenant_id:
tenant_id = context.tenant_id
# TODO(blogan): Since the extension logic will reject any requests without
# floating_network_id, is this still needed?
if not network_id:
raise n_exc.BadRequest(resource='floating_ip',
msg='floating_network_id is required.')
network = db_api.network_find(context, id=network_id, scope=db_api.ONE)
if not network:
raise n_exc.NetworkNotFound(net_id=network_id)
fixed_ip = None
fixed_ip_address = content.get('fixed_ip_address')
ip_address = content.get('floating_ip_address')
port_id = content.get('port_id')
port = None
port_fixed_ip = {}
network = _get_network(context, network_id)
if port_id:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if not port.ip_addresses or len(port.ip_addresses) == 0:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
if not fixed_ip_address:
fixed_ip = _get_next_available_fixed_ip(port)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(
port_id=port_id)
else:
fixed_ip = next((ip for ip in port.ip_addresses
if (ip['address_readable'] == fixed_ip_address and
ip.get('address_type') == ip_types.FIXED)),
None)
if not fixed_ip:
raise q_exc.FixedIpDoesNotExistsForPort(
fixed_ip=fixed_ip_address, port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.FLOATING and
ip.fixed_ip['address_readable'] == fixed_ip_address)):
raise q_exc.PortAlreadyContainsFloatingIp(
port_id=port_id)
new_addresses = []
ip_addresses = []
if ip_address:
ip_addresses.append(ip_address)
seg_name = CONF.QUARK.floating_ip_segment_name
strategy_name = CONF.QUARK.floating_ip_ipam_strategy
if strategy_name.upper() == 'NETWORK':
strategy_name = network.get("ipam_strategy")
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name)
ipam_driver.allocate_ip_address(context, new_addresses, network_id,
port_id, CONF.QUARK.ipam_reuse_after,
seg_name, version=4,
ip_addresses=ip_addresses,
address_type=ip_types.FLOATING)
flip = new_addresses[0]
if fixed_ip and port:
context.session.begin()
try:
flip = db_api.port_associate_ip(context, [port], flip, [port_id])
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
flip_driver.register_floating_ip(flip, port, fixed_ip)
context.session.commit()
except Exception:
context.session.rollback()
raise
port = _get_port(context, port_id)
fixed_ip = _get_fixed_ip(context, fixed_ip_address, port)
port_fixed_ip = {port.id: {'port': port, 'fixed_ip': fixed_ip}}
flip = _allocate_ip(context, network, port, ip_address, ip_types.FLOATING)
_create_flip(context, flip, port_fixed_ip)
return v._make_floating_ip_dict(flip, port_id)
@@ -164,78 +368,11 @@ def update_floatingip(context, id, content):
raise n_exc.BadRequest(resource='floating_ip',
msg='port_id is required.')
port_id = content.get('port_id')
port = None
fixed_ip = None
current_port = None
context.session.begin()
try:
flip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE)
if not flip:
raise q_exc.FloatingIpNotFound(id=id)
current_ports = flip.ports
if current_ports and len(current_ports) > 0:
current_port = current_ports[0]
if not port_id and not current_port:
raise q_exc.FloatingIpUpdateNoPortIdSupplied()
if port_id:
port = db_api.port_find(context, id=port_id, scope=db_api.ONE)
if not port:
raise n_exc.PortNotFound(port_id=port_id)
if any(ip for ip in port.ip_addresses
if (ip.get('address_type') == ip_types.FLOATING)):
raise q_exc.PortAlreadyContainsFloatingIp(port_id=port_id)
if current_port and current_port.id == port_id:
d = dict(flip_id=id, port_id=port_id)
raise q_exc.PortAlreadyAssociatedToFloatingIp(**d)
fixed_ip = _get_next_available_fixed_ip(port)
LOG.info('new fixed ip: %s' % fixed_ip)
if not fixed_ip:
raise q_exc.NoAvailableFixedIpsForPort(port_id=port_id)
LOG.info('current ports: %s' % current_ports)
if current_port:
flip = db_api.port_disassociate_ip(context, [current_port], flip)
if flip.fixed_ip:
flip = db_api.floating_ip_disassociate_fixed_ip(context, flip)
if port:
flip = db_api.port_associate_ip(context, [port], flip, [port_id])
flip = db_api.floating_ip_associate_fixed_ip(context, flip,
fixed_ip)
flip_driver = registry.DRIVER_REGISTRY.get_driver()
if port:
if current_port:
flip_driver.update_floating_ip(flip, port, fixed_ip)
else:
flip_driver.register_floating_ip(flip, port, fixed_ip)
else:
flip_driver.remove_floating_ip(flip)
context.session.commit()
except (q_exc.RegisterFloatingIpFailure, q_exc.RemoveFloatingIpFailure):
context.session.rollback()
raise
# Note(alanquillin) The ports parameters on the model is not
# properly getting cleaned up when removed. Manually cleaning them up.
# Need to fix the db api to correctly update the model.
if not port:
flip.ports = []
return v._make_floating_ip_dict(flip, port_id)
requested_ports = []
if content.get('port_id'):
requested_ports = [{'port_id': content.get('port_id')}]
flip = _update_flip(context, id, ip_types.FLOATING, requested_ports)
return v._make_floating_ip_dict(flip)
def delete_floatingip(context, id):
@@ -247,43 +384,7 @@ def delete_floatingip(context, id):
LOG.info('delete_floatingip %s for tenant %s' % (id, context.tenant_id))
filters = {'address_type': ip_types.FLOATING, '_deallocated': False}
flip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE, **filters)
if not flip:
raise q_exc.FloatingIpNotFound(id=id)
current_ports = flip.ports
current_port = None
if current_ports and len(current_ports) > 0:
current_port = current_ports[0]
context.session.begin()
try:
strategy_name = flip.network.get('ipam_strategy')
ipam_driver = ipam.IPAM_REGISTRY.get_strategy(strategy_name)
ipam_driver.deallocate_ip_address(context, flip)
if current_port:
flip = db_api.port_disassociate_ip(context, [current_port],
flip)
if flip.fixed_ip:
flip = db_api.floating_ip_disassociate_fixed_ip(context, flip)
context.session.commit()
except Exception:
context.session.rollback()
raise
try:
driver = registry.DRIVER_REGISTRY.get_driver()
driver.remove_floating_ip(flip)
except Exception as e:
LOG.error('There was an error when trying to delete the floating ip '
'on the unicorn API. The ip has been cleaned up, but '
'may need to be handled manually in the unicorn API. '
'Error: %s' % e.message)
_delete_flip(context, id, ip_types.FLOATING)
def get_floatingip(context, id, fields=None):
@@ -337,14 +438,8 @@ def get_floatingips(context, filters=None, fields=None, sorts=None, limit=None,
LOG.info('get_floatingips for tenant %s filters %s fields %s' %
(context.tenant_id, filters, fields))
if filters is None:
filters = {}
filters['_deallocated'] = False
filters['address_type'] = ip_types.FLOATING
floating_ips = db_api.floating_ip_find(context, scope=db_api.ALL,
**filters)
floating_ips = _get_ips_by_type(context, ip_types.FLOATING,
filters=filters, fields=fields)
return [v._make_floating_ip_dict(flip) for flip in floating_ips]
@@ -383,18 +478,113 @@ def get_floatingips_count(context, filters=None):
return count
def _get_next_available_fixed_ip(port):
floating_ips = [ip for ip in port.ip_addresses
if ip.get('address_type') == ip_types.FLOATING]
fixed_ips = [ip for ip in port.ip_addresses
if ip.get('address_type') == ip_types.FIXED]
def create_scalingip(context, content):
"""Allocate or reallocate a scaling IP.
if not fixed_ips or len(fixed_ips) == 0:
return None
:param context: neutron api request context.
:param content: dictionary describing the scaling ip, with keys
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py. All keys will be populated.
used = [ip.fixed_ip.address for ip in floating_ips
if ip and ip.fixed_ip]
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('create_scalingip for tenant %s and body %s',
context.tenant_id, content)
network_id = content.get('scaling_network_id')
ip_address = content.get('scaling_ip_address')
requested_ports = content.get('ports', [])
return next((ip for ip in sorted(fixed_ips,
key=lambda ip: ip.get('allocated_at'))
if ip.address not in used), None)
network = _get_network(context, network_id)
port_fixed_ips = {}
for req_port in requested_ports:
port = _get_port(context, req_port['port_id'])
fixed_ip = _get_fixed_ip(context, req_port.get('fixed_ip_address'),
port)
port_fixed_ips[port.id] = {"port": port, "fixed_ip": fixed_ip}
scip = _allocate_ip(context, network, None, ip_address, ip_types.SCALING)
_create_flip(context, scip, port_fixed_ips)
return v._make_scaling_ip_dict(scip)
def update_scalingip(context, id, content):
"""Update an existing scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
:param content: dictionary with keys indicating fields to update.
valid keys are those that have a value of True for 'allow_put'
as listed in the RESOURCE_ATTRIBUTE_MAP object in
neutron/api/v2/attributes.py.
:returns: Dictionary containing details for the new scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('update_scalingip %s for tenant %s and body %s' %
(id, context.tenant_id, content))
requested_ports = content.get('ports', [])
flip = _update_flip(context, id, ip_types.SCALING, requested_ports)
return v._make_scaling_ip_dict(flip)
def delete_scalingip(context, id):
"""Deallocate a scaling IP.
:param context: neutron api request context.
:param id: id of the scaling ip
"""
LOG.info('delete_scalingip %s for tenant %s' % (id, context.tenant_id))
_delete_flip(context, id, ip_types.SCALING)
def get_scalingip(context, id, fields=None):
"""Retrieve a scaling IP.
:param context: neutron api request context.
:param id: The UUID of the scaling IP.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: Dictionary containing details for the scaling IP. If values
are declared in the fields parameter, then only those keys will be
present.
"""
LOG.info('get_scalingip %s for tenant %s' % (id, context.tenant_id))
filters = {'address_type': ip_types.SCALING, '_deallocated': False}
scaling_ip = db_api.floating_ip_find(context, id=id, scope=db_api.ONE,
**filters)
if not scaling_ip:
raise q_exc.ScalingIpNotFound(id=id)
return v._make_scaling_ip_dict(scaling_ip)
def get_scalingips(context, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
"""Retrieve a list of scaling ips.
:param context: neutron api request context.
:param filters: a dictionary with keys that are valid keys for
a scaling ip as listed in the RESOURCE_ATTRIBUTE_MAP object
in neutron/api/v2/attributes.py. Values in this dictionary
are an iterable containing values that will be used for an exact
match comparison for that value. Each result returned by this
function will have matched one of the values for each key in
filters.
:param fields: a list of strings that are valid keys in a
scaling IP dictionary as listed in the RESOURCE_ATTRIBUTE_MAP
object in neutron/api/v2/attributes.py. Only these fields
will be returned.
:returns: List of scaling IPs that are accessible to the tenant who
submits the request (as indicated by the tenant id of the context)
as well as any filters.
"""
LOG.info('get_scalingips for tenant %s filters %s fields %s' %
(context.tenant_id, filters, fields))
scaling_ips = _get_ips_by_type(context, ip_types.SCALING,
filters=filters, fields=fields)
return [v._make_scaling_ip_dict(scip) for scip in scaling_ips]

View File

@@ -330,7 +330,7 @@ def _make_floating_ip_dict(flip, port_id=None):
if ports and len(ports) > 0:
port_id = None if not ports[0] else ports[0].id
fixed_ip = flip.fixed_ip
fixed_ip = flip.fixed_ips[0] if flip.fixed_ips else None
return {"id": flip.get("id"),
"floating_network_id": flip.get("network_id"),
@@ -340,3 +340,16 @@ def _make_floating_ip_dict(flip, port_id=None):
"tenant_id": flip.get("used_by_tenant_id"),
"status": "RESERVED" if not port_id else "ASSOCIATED",
"port_id": port_id}
def _make_scaling_ip_dict(flip):
# Can an IPAddress.fixed_ip have more than one port associated with it?
ports = [{"port_id": fixed_ip.ports[0].id,
"fixed_ip_address": fixed_ip.address_readable}
for fixed_ip in flip.fixed_ips]
return {"id": flip.get("id"),
"scaling_ip_address": None if not flip else flip.formatted(),
"scaling_network_id": flip.get("network_id"),
"tenant_id": flip.get("used_by_tenant_id"),
"status": flip.get("status"),
"ports": ports}

View File

@@ -0,0 +1,201 @@
import json
import mock
import netaddr
from neutron import context
from oslo_config import cfg
from quark.db import api as db_api
from quark import exceptions as qexceptions
import quark.ipam
from quark import network_strategy
import quark.plugin
import quark.plugin_modules.mac_address_ranges as macrng_api
from quark.tests.functional import base
class BaseFloatingIPTest(base.BaseFunctionalTest):
FAKE_UNICORN_URL = 'http://unicorn.xxx'
def _setup_mock_requests(self):
cfg.CONF.set_override('floating_ip_base_url', self.FAKE_UNICORN_URL,
group='QUARK')
patcher = mock.patch('quark.drivers.unicorn_driver.requests')
self.mock_requests = patcher.start()
self.addCleanup(patcher.stop)
self.mock_requests.post.return_value.status_code = 200
self.mock_requests.delete.return_value.status_code = 204
self.mock_requests.put.return_value.status_code = 200
def _build_expected_unicorn_request_body(self, floating_ip_address, ports,
actual_body=None):
if actual_body:
# Since the port order is non-deterministic, we need to ensure
# that the order is correct
actual_port_ids = [endpoint['port']['uuid'] for endpoint in
actual_body['floating_ip']['endpoints']]
reordered_ports = []
for port_id in actual_port_ids:
for port in ports:
if port['id'] == port_id:
reordered_ports.append(port)
ports = reordered_ports
endpoints = []
for port in ports:
fixed_ips = []
for fixed_ip in port['fixed_ips']:
fixed_ips.append({
'ip_address': fixed_ip['ip_address'],
'version': self.user_subnet['ip_version'],
'subnet_id': self.user_subnet['id'],
'cidr': self.user_subnet['cidr'],
'address_type': 'fixed'
})
port_mac = int(netaddr.EUI(port['mac_address'].replace(':', '-')))
endpoints.append({
'port': {
'uuid': port['id'],
'name': port['name'],
'network_uuid': port['network_id'],
'mac_address': port_mac,
'device_id': port['device_id'],
'device_owner': port['device_owner'],
'fixed_ip': fixed_ips
},
'private_ip': port['fixed_ips'][0]['ip_address']
})
body = {'public_ip': floating_ip_address,
'endpoints': endpoints}
return {'floating_ip': body}
def setUp(self):
super(BaseFloatingIPTest, self).setUp()
self.public_net_id = "00000000-0000-0000-0000-000000000000"
net_stat = '{"%s": {}}' % self.public_net_id
cfg.CONF.set_override('default_net_strategy', net_stat, group='QUARK')
old_strat = db_api.STRATEGY
def reset_strategy():
db_api.STRATEGY = old_strat
db_api.STRATEGY = network_strategy.JSONStrategy()
self.addCleanup(reset_strategy)
admin_ctx = context.get_admin_context()
self._setup_mock_requests()
self.plugin = quark.plugin.Plugin()
mac = {'mac_address_range': dict(cidr="AA:BB:CC")}
macrng_api.create_mac_address_range(admin_ctx, mac)
with admin_ctx.session.begin():
tenant = 'rackspace'
floating_net = dict(name='publicnet', tenant_id=tenant,
id=self.public_net_id)
self.floating_network = db_api.network_create(
self.context, **floating_net)
self.pub_net_cidr = "10.1.1.0/24"
floating_subnet = dict(id=self.public_net_id,
cidr=self.pub_net_cidr,
ip_policy=None, tenant_id=tenant,
segment_id='floating_ip',
network_id=self.floating_network.id)
self.floating_subnet = db_api.subnet_create(
self.context, **floating_subnet)
user_net = dict(name='user_network', tenant_id='fake')
self.user_network = self.plugin.create_network(
self.context, {'network': user_net})
user_subnet = dict(cidr="192.168.1.0/24",
ip_policy=None, tenant_id="fake",
network_id=self.user_network['id'])
self.user_subnet = self.plugin.create_subnet(
self.context, {'subnet': user_subnet})
user_port1 = dict(name='user_port1',
network_id=self.user_network['id'])
self.user_port1 = self.plugin.create_port(
self.context, {'port': user_port1})
user_port2 = dict(name='user_port2',
network_id=self.user_network['id'])
self.user_port2 = self.plugin.create_port(
self.context, {'port': user_port2})
class TestFloatingIPs(BaseFloatingIPTest):
def test_create(self):
flip_req = dict(
floating_network_id=self.floating_network['id'],
port_id=self.user_port1['id']
)
flip_req = {'floatingip': flip_req}
flip = self.plugin.create_floatingip(self.context, flip_req)
self.assertIn(netaddr.IPAddress(flip['floating_ip_address']),
list(netaddr.IPNetwork(self.pub_net_cidr)))
self.assertEqual(self.floating_network['id'],
flip['floating_network_id'])
self.assertEqual(self.user_port1['id'], flip['port_id'])
self.assertEqual(self.user_port1['fixed_ips'][0]['ip_address'],
flip['fixed_ip_address'])
self.mock_requests.post.assert_called_once_with(
self.FAKE_UNICORN_URL, data=mock.ANY, timeout=2
)
actual_body = json.loads(self.mock_requests.post.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
flip['floating_ip_address'], [self.user_port1]
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
get_flip = self.plugin.get_floatingip(self.context, flip['id'])
self.assertEqual(flip['floating_ip_address'],
get_flip['floating_ip_address'])
def test_update_floating_ip(self):
floating_ip = dict(
floating_network_id=self.floating_network.id,
port_id=self.user_port1['id']
)
floating_ip = {'floatingip': floating_ip}
flip = self.plugin.create_floatingip(self.context, floating_ip)
fixed_ip_address2 = self.user_port2['fixed_ips'][0]['ip_address']
floating_ip = dict(port_id=self.user_port2['id'],
fixed_ip_address=fixed_ip_address2)
updated_flip = self.plugin.update_floatingip(
self.context, flip['id'], {"floatingip": floating_ip})
self.assertEqual(self.floating_network['id'],
updated_flip['floating_network_id'])
self.assertEqual(updated_flip['floating_ip_address'],
flip['floating_ip_address'])
self.assertEqual(self.user_port2['id'], updated_flip['port_id'])
self.assertEqual(self.user_port2['fixed_ips'][0]['ip_address'],
updated_flip['fixed_ip_address'])
expected_url = '/'.join([self.FAKE_UNICORN_URL,
flip['floating_ip_address']])
self.mock_requests.put.assert_called_once_with(
expected_url, data=mock.ANY, timeout=2
)
actual_body = json.loads(self.mock_requests.put.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
flip['floating_ip_address'], [self.user_port2]
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
get_flip = self.plugin.get_floatingip(self.context, flip['id'])
self.assertEqual(flip['floating_ip_address'],
get_flip['floating_ip_address'])
def test_delete_floating_ip(self):
floating_ip = dict(
floating_network_id=self.floating_network.id,
port_id=self.user_port1['id']
)
flip = self.plugin.create_floatingip(
self.context, {"floatingip": floating_ip})
self.plugin.delete_floatingip(self.context, flip['id'])
expected_url = '/'.join([self.FAKE_UNICORN_URL,
flip['floating_ip_address']])
self.mock_requests.delete.assert_called_once_with(
expected_url, timeout=2
)
self.assertRaises(qexceptions.FloatingIpNotFound,
self.plugin.get_floatingip, self.context, flip['id'])
flips = self.plugin.get_floatingips(self.context)
self.assertEqual(0, len(flips))

View File

@@ -0,0 +1,248 @@
import json
import mock
import netaddr
from neutron_lib import exceptions as n_exc
from quark import exceptions as qexceptions
from quark.tests.functional.plugin_modules import test_floating_ips
class TestScalingIP(test_floating_ips.BaseFloatingIPTest):
def setUp(self):
super(TestScalingIP, self).setUp()
self.scaling_network = self.floating_network
def test_create_scaling_ip(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.assertIn(netaddr.IPAddress(scip['scaling_ip_address']),
list(netaddr.IPNetwork(self.pub_net_cidr)))
self.assertEqual(self.scaling_network['id'],
scip['scaling_network_id'])
self.assertEqual(2, len(scip['ports']))
scip_ports = {scip_port['port_id']: scip_port['fixed_ip_address']
for scip_port in scip['ports']}
port1_fixed_ip = self.user_port1['fixed_ips'][0]['ip_address']
port2_fixed_ip = self.user_port2['fixed_ips'][0]['ip_address']
self.assertIn(self.user_port1['id'], scip_ports)
self.assertIn(self.user_port2['id'], scip_ports)
self.assertIn(port1_fixed_ip, scip_ports.values())
self.assertIn(port2_fixed_ip, scip_ports.values())
self.mock_requests.post.assert_called_once_with(
self.FAKE_UNICORN_URL, data=mock.ANY, timeout=2
)
actual_body = json.loads(self.mock_requests.post.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port1, self.user_port2],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
get_scip = self.plugin.get_scalingip(self.context, scip['id'])
self.assertEqual(scip['scaling_ip_address'],
get_scip['scaling_ip_address'])
def test_create_with_invalid_scaling_network_id(self):
scaling_ip = dict(
scaling_network_id='some-wrong-network-id',
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
self.assertRaises(n_exc.NetworkNotFound,
self.plugin.create_scalingip,
self.context, {"scalingip": scaling_ip})
def test_create_with_scaling_network_invalid_segment(self):
scaling_ip = dict(
scaling_network_id=self.user_network['id'],
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
self.assertRaises(n_exc.IpAddressGenerationFailure,
self.plugin.create_scalingip,
self.context, {"scalingip": scaling_ip})
def test_update_scaling_ip_add_port(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.mock_requests.reset_mock()
scaling_ip = dict(ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])])
updated_scip = self.plugin.update_scalingip(
self.context, scip['id'], {"scalingip": scaling_ip})
self.assertEqual(self.scaling_network['id'],
updated_scip['scaling_network_id'])
self.assertEqual(updated_scip['scaling_ip_address'],
scip['scaling_ip_address'])
self.assertEqual(2, len(updated_scip['ports']))
scip_ports = {scip_port['port_id']: scip_port['fixed_ip_address']
for scip_port in updated_scip['ports']}
port1_fixed_ip = self.user_port1['fixed_ips'][0]['ip_address']
port2_fixed_ip = self.user_port2['fixed_ips'][0]['ip_address']
self.assertIn(self.user_port1['id'], scip_ports)
self.assertIn(self.user_port2['id'], scip_ports)
self.assertIn(port1_fixed_ip, scip_ports.values())
self.assertIn(port2_fixed_ip, scip_ports.values())
self.assertFalse(self.mock_requests.post.called)
self.assertFalse(self.mock_requests.delete.called)
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.mock_requests.put.assert_called_once_with(
expected_url, data=mock.ANY, timeout=2)
actual_body = json.loads(self.mock_requests.put.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port1, self.user_port2],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
def test_update_scaling_ip_remove_port_with_remaining_ports(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.mock_requests.reset_mock()
scaling_ip = dict(ports=[dict(port_id=self.user_port1['id'])])
updated_scip = self.plugin.update_scalingip(
self.context, scip['id'], {"scalingip": scaling_ip})
self.assertEqual(self.scaling_network['id'],
updated_scip['scaling_network_id'])
self.assertEqual(updated_scip['scaling_ip_address'],
scip['scaling_ip_address'])
self.assertEqual(1, len(updated_scip['ports']))
scip_ports = {scip_port['port_id']: scip_port['fixed_ip_address']
for scip_port in updated_scip['ports']}
port1_fixed_ip = self.user_port1['fixed_ips'][0]['ip_address']
self.assertIn(self.user_port1['id'], scip_ports)
self.assertIn(port1_fixed_ip, scip_ports.values())
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.assertFalse(self.mock_requests.post.called)
self.assertFalse(self.mock_requests.delete.called)
self.mock_requests.put.assert_called_once_with(
expected_url, data=mock.ANY, timeout=2)
actual_body = json.loads(self.mock_requests.put.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port1],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
def test_update_scaling_ip_clear_ports(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.mock_requests.reset_mock()
scaling_ip = dict(ports=[])
updated_scip = self.plugin.update_scalingip(
self.context, scip['id'], {"scalingip": scaling_ip})
self.assertEqual(self.scaling_network['id'],
updated_scip['scaling_network_id'])
self.assertEqual(updated_scip['scaling_ip_address'],
scip['scaling_ip_address'])
self.assertEqual(0, len(updated_scip['ports']))
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.assertFalse(self.mock_requests.post.called)
self.assertFalse(self.mock_requests.put.called)
self.mock_requests.delete.assert_called_once_with(
expected_url, timeout=2)
def test_update_scaling_ip_add_ports_from_none(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.mock_requests.reset_mock()
scaling_ip = dict(ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])])
updated_scip = self.plugin.update_scalingip(
self.context, scip['id'], {"scalingip": scaling_ip})
self.assertEqual(self.scaling_network['id'],
updated_scip['scaling_network_id'])
self.assertEqual(updated_scip['scaling_ip_address'],
scip['scaling_ip_address'])
self.assertEqual(2, len(updated_scip['ports']))
scip_ports = {scip_port['port_id']: scip_port['fixed_ip_address']
for scip_port in updated_scip['ports']}
port1_fixed_ip = self.user_port1['fixed_ips'][0]['ip_address']
port2_fixed_ip = self.user_port2['fixed_ips'][0]['ip_address']
self.assertIn(self.user_port1['id'], scip_ports)
self.assertIn(self.user_port2['id'], scip_ports)
self.assertIn(port1_fixed_ip, scip_ports.values())
self.assertIn(port2_fixed_ip, scip_ports.values())
self.assertFalse(self.mock_requests.put.called)
self.assertFalse(self.mock_requests.delete.called)
self.mock_requests.post.assert_called_once_with(
self.FAKE_UNICORN_URL, data=mock.ANY, timeout=2)
actual_body = json.loads(self.mock_requests.post.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port1, self.user_port2],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
def test_delete_scaling_ip(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scip = self.plugin.create_scalingip(
self.context, {"scalingip": scaling_ip})
self.plugin.delete_scalingip(self.context, scip['id'])
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.mock_requests.delete.assert_called_once_with(
expected_url, timeout=2
)
self.assertRaises(qexceptions.ScalingIpNotFound,
self.plugin.get_scalingip, self.context, scip['id'])
scips = self.plugin.get_scalingips(self.context)
self.assertEqual(0, len(scips))
def test_scaling_ip_not_in_floating_ip_list(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
self.plugin.create_scalingip(self.context, scaling_ip)
flips = self.plugin.get_floatingips(self.context)
self.assertEqual(0, len(flips))
def test_floating_ip_not_in_scaling_ip_list(self):
floating_ip = dict(
floating_network_id=self.scaling_network.id,
port_id=self.user_port1['id']
)
floating_ip = {'floatingip': floating_ip}
self.plugin.create_floatingip(self.context, floating_ip)
scips = self.plugin.get_scalingips(self.context)
self.assertEqual(0, len(scips))

View File

@@ -222,7 +222,7 @@ class TestCreateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
return addr
def _flip_fixed_ip_assoc(context, addr, fixed_ip):
addr.fixed_ip = fixed_ip
addr.fixed_ips.append(fixed_ip)
return addr
with contextlib.nested(
@@ -463,6 +463,19 @@ class TestCreateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
class TestUpdateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
def setUp(self):
super(TestUpdateFloatingIPs, self).setUp()
# NOTE(blogan): yuck yuck yuck, but since the models are being mocked
# and not attached to the session, the refresh call will fail.
old_refresh = self.context.session.refresh
def reset_refresh(context):
context.session.refresh = old_refresh
self.context.session.refresh = mock.Mock()
self.addCleanup(reset_refresh, self.context)
@contextlib.contextmanager
def _stubs(self, flip=None, curr_port=None, new_port=None, ips=None):
curr_port_model = None
@@ -508,7 +521,7 @@ class TestUpdateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
else new_port_model)
def _flip_assoc(context, addr, fixed_ip):
addr.fixed_ip = fixed_ip
addr.fixed_ips.append(fixed_ip)
return addr
def _flip_disassoc(context, addr):