Default net assignment

Adds default network assignment to Quark, along with yanking out
repoze.tm2 for compatibility while reinstating the standard
sqlalchemy-style transaction sessions to eliminate the race condition in
mass-assigning IP addresses under load.

Also fixes a typo with ipam_reuse_after which was previously, and
incorrectly, a boolean config value.
This commit is contained in:
Matt Dietz and John Yolo Perkins
2013-09-24 16:20:59 +00:00
committed by Matt Dietz
parent da5f7b4425
commit c758f21558
16 changed files with 625 additions and 551 deletions

View File

@@ -25,9 +25,9 @@ quark_opts = [
help=_('The client to use to talk to the backend')),
cfg.StrOpt('ipam_driver', default='quark.ipam.QuarkIpam',
help=_('IPAM Implementation to use')),
cfg.BoolOpt('ipam_reuse_after', default=7200,
help=_("Time in seconds til IP and MAC reuse"
"after deallocation.")),
cfg.IntOpt('ipam_reuse_after', default=7200,
help=_("Time in seconds til IP and MAC reuse"
"after deallocation.")),
cfg.StrOpt("strategy_driver",
default='quark.network_strategy.JSONStrategy',
help=_("Tree of network assignment strategy")),

View File

@@ -116,7 +116,7 @@ def _model_query(context, model, filters, fields=None):
# This works even when a non-shared, other-tenant owned network is passed
# in because the authZ checks that happen in Neutron above us yank it back
# out of the result set.
if "tenant_id" not in filters and not context.is_admin:
if not filters and not context.is_admin:
filters["tenant_id"] = [context.tenant_id]
if filters.get("tenant_id"):
@@ -215,7 +215,7 @@ def ip_address_create(context, **address_dict):
@scoped
def ip_address_find(context, **filters):
def ip_address_find(context, lock_mode=False, **filters):
query = context.session.query(models.IPAddress)
ip_shared = filters.pop("shared", None)
@@ -223,6 +223,8 @@ def ip_address_find(context, **filters):
cnt = sql_func.count(models.port_ip_association_table.c.port_id)
stmt = context.session.query(models.IPAddress,
cnt.label("ports_count"))
if lock_mode:
stmt = stmt.with_lockmode("update")
stmt = stmt.outerjoin(models.port_ip_association_table)
stmt = stmt.group_by(models.IPAddress).subquery()
@@ -239,13 +241,14 @@ def ip_address_find(context, **filters):
if filters.get("device_id"):
model_filters.append(models.IPAddress.ports.any(
models.Port.device_id.in_(filters["device_id"])))
return query.filter(*model_filters)
@scoped
def mac_address_find(context, **filters):
def mac_address_find(context, lock_mode=False, **filters):
query = context.session.query(models.MacAddress)
if lock_mode:
query.with_lockmode("update")
model_filters = _model_query(context, models.MacAddress, filters)
return query.filter(*model_filters)
@@ -253,7 +256,7 @@ def mac_address_find(context, **filters):
def mac_address_range_find_allocation_counts(context, address=None):
query = context.session.query(models.MacAddressRange,
sql_func.count(models.MacAddress.address).
label("count"))
label("count")).with_lockmode("update")
query = query.outerjoin(models.MacAddress)
query = query.group_by(models.MacAddressRange)
query = query.order_by("count DESC")
@@ -362,7 +365,7 @@ def network_delete(context, network):
def subnet_find_allocation_counts(context, net_id, **filters):
query = context.session.query(models.Subnet,
sql_func.count(models.IPAddress.address).
label("count"))
label("count")).with_lockmode('update')
query = query.outerjoin(models.Subnet.allocated_ips)
query = query.group_by(models.Subnet)
query = query.order_by("count DESC")

View File

@@ -24,11 +24,14 @@ from neutron.openstack.common import log as logging
from neutron.openstack.common.notifier import api as notifier_api
from neutron.openstack.common import timeutils
from oslo.config import cfg
from quark.db import api as db_api
from quark.db import models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class QuarkIpam(object):
@@ -60,36 +63,40 @@ class QuarkIpam(object):
if mac_address:
mac_address = netaddr.EUI(mac_address).value
deallocated_mac = db_api.mac_address_find(
context, reuse_after=reuse_after, scope=db_api.ONE,
address=mac_address)
if deallocated_mac:
return db_api.mac_address_update(
context, deallocated_mac, deallocated=False,
deallocated_at=None)
with context.session.begin(subtransactions=True):
deallocated_mac = db_api.mac_address_find(
context, lock_mode=True, reuse_after=reuse_after,
scope=db_api.ONE, address=mac_address)
if deallocated_mac:
return db_api.mac_address_update(
context, deallocated_mac, deallocated=False,
deallocated_at=None)
ranges = db_api.mac_address_range_find_allocation_counts(
context, address=mac_address)
for result in ranges:
rng, addr_count = result
if rng["last_address"] - rng["first_address"] <= addr_count:
continue
with context.session.begin(subtransactions=True):
ranges = db_api.mac_address_range_find_allocation_counts(
context, address=mac_address)
for result in ranges:
rng, addr_count = result
last = rng["last_address"]
first = rng["first_address"]
if last - first <= addr_count:
continue
next_address = None
if mac_address:
next_address = mac_address
else:
address = True
while address:
next_address = rng["next_auto_assign_mac"]
rng["next_auto_assign_mac"] = next_address + 1
address = db_api.mac_address_find(
context, tenant_id=context.tenant_id,
scope=db_api.ONE, address=next_address)
next_address = None
if mac_address:
next_address = mac_address
else:
address = True
while address:
next_address = rng["next_auto_assign_mac"]
rng["next_auto_assign_mac"] = next_address + 1
address = db_api.mac_address_find(
context, tenant_id=context.tenant_id,
scope=db_api.ONE, address=next_address)
address = db_api.mac_address_create(context, address=next_address,
mac_address_range_id=rng["id"])
return address
address = db_api.mac_address_create(
context, address=next_address,
mac_address_range_id=rng["id"])
return address
raise exceptions.MacAddressGenerationFailure(net_id=net_id)
@@ -99,44 +106,52 @@ class QuarkIpam(object):
if ip_address:
ip_address = netaddr.IPAddress(ip_address)
address = db_api.ip_address_find(
elevated, network_id=net_id, reuse_after=reuse_after,
deallocated=True, scope=db_api.ONE, ip_address=ip_address)
if address:
return db_api.ip_address_update(
elevated, address, deallocated=False, deallocated_at=None)
subnet = self._choose_available_subnet(
elevated, net_id, ip_address=ip_address, version=version)
ip_policy_rules = models.IPPolicy.get_ip_policy_rule_set(subnet)
# Creating this IP for the first time
next_ip = None
if ip_address:
next_ip = ip_address
with context.session.begin(subtransactions=True):
address = db_api.ip_address_find(
elevated, network_id=net_id, ip_address=next_ip,
tenant_id=elevated.tenant_id, scope=db_api.ONE)
elevated, network_id=net_id, reuse_after=reuse_after,
deallocated=True, scope=db_api.ONE, ip_address=ip_address,
lock_mode=True)
if address:
raise exceptions.IpAddressGenerationFailure(net_id=net_id)
else:
address = True
while address:
next_ip_int = int(subnet["next_auto_assign_ip"])
next_ip = netaddr.IPAddress(next_ip_int)
if subnet["ip_version"] == 4:
next_ip = next_ip.ipv4()
subnet["next_auto_assign_ip"] = next_ip_int + 1
if ip_policy_rules and next_ip in ip_policy_rules:
continue
updated_address = db_api.ip_address_update(
elevated, address, deallocated=False,
deallocated_at=None)
return updated_address
with context.session.begin(subtransactions=True):
subnet = self._choose_available_subnet(
elevated, net_id, ip_address=ip_address, version=version)
ip_policy_rules = models.IPPolicy.get_ip_policy_rule_set(
subnet)
# Creating this IP for the first time
next_ip = None
if ip_address:
next_ip = ip_address
address = db_api.ip_address_find(
elevated, network_id=net_id, ip_address=next_ip,
tenant_id=elevated.tenant_id, scope=db_api.ONE)
address = db_api.ip_address_create(
elevated, address=next_ip, subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id)
address["deallocated"] = 0
if address:
raise exceptions.IpAddressGenerationFailure(
net_id=net_id)
else:
address = True
while address:
next_ip_int = int(subnet["next_auto_assign_ip"])
next_ip = netaddr.IPAddress(next_ip_int)
if subnet["ip_version"] == 4:
next_ip = next_ip.ipv4()
subnet["next_auto_assign_ip"] = next_ip_int + 1
if ip_policy_rules and next_ip in ip_policy_rules:
continue
address = db_api.ip_address_find(
elevated, network_id=net_id, ip_address=next_ip,
tenant_id=elevated.tenant_id, scope=db_api.ONE)
context.session.add(subnet)
address = db_api.ip_address_create(
elevated, address=next_ip, subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id)
address["deallocated"] = 0
payload = dict(tenant_id=address["tenant_id"],
ip_block_id=address["subnet_id"],
@@ -167,17 +182,19 @@ class QuarkIpam(object):
payload)
def deallocate_ip_address(self, context, port, **kwargs):
for addr in port["ip_addresses"]:
# Note: only deallocate ip if this is the only port mapped to it
if len(addr["ports"]) == 1:
self._deallocate_ip_address(context, addr)
port["ip_addresses"] = []
with context.session.begin(subtransactions=True):
for addr in port["ip_addresses"]:
# Note: only deallocate ip if this is the only port mapped
if len(addr["ports"]) == 1:
self._deallocate_ip_address(context, addr)
port["ip_addresses"] = []
def deallocate_mac_address(self, context, address):
mac = db_api.mac_address_find(context, address=address,
scope=db_api.ONE)
if not mac:
raise exceptions.NotFound(
message="No MAC address %s found" % netaddr.EUI(address))
db_api.mac_address_update(context, mac, deallocated=True,
deallocated_at=timeutils.utcnow())
with context.session.begin(subtransactions=True):
mac = db_api.mac_address_find(context, address=address,
scope=db_api.ONE)
if not mac:
raise exceptions.NotFound(
message="No MAC address %s found" % netaddr.EUI(address))
db_api.mac_address_update(context, mac, deallocated=True,
deallocated_at=timeutils.utcnow())

View File

@@ -18,13 +18,9 @@ v2 Neutron Plug-in API Quark Implementation
"""
from oslo.config import cfg
from sqlalchemy.orm import sessionmaker, scoped_session
from zope import sqlalchemy as zsa
from neutron.db import api as neutron_db_api
from neutron.extensions import securitygroup as sg_ext
from neutron import neutron_plugin_base_v2
from neutron.openstack.common.db.sqlalchemy import session as neutron_session
from neutron import quota
from quark.api import extensions
@@ -79,15 +75,8 @@ class Plugin(neutron_plugin_base_v2.NeutronPluginBaseV2,
"subnets_quark", "provider",
"ip_policies", "quotas"]
def _initDBMaker(self):
# This needs to be called after _ENGINE is configured
session_maker = sessionmaker(bind=neutron_session._ENGINE,
extension=zsa.ZopeTransactionExtension())
neutron_session._MAKER = scoped_session(session_maker)
def __init__(self):
neutron_db_api.configure_db()
self._initDBMaker()
neutron_db_api.register_models(base=models.BASEV2)
def get_mac_address_range(self, context, id, fields=None):

View File

@@ -60,33 +60,34 @@ def create_ip_address(context, ip_address):
raise exceptions.BadRequest(
resource="ip_addresses",
msg="network_id is required if device_ids are supplied.")
if network_id and device_ids:
for device_id in device_ids:
port = db_api.port_find(
context, network_id=network_id, device_id=device_id,
tenant_id=context.tenant_id, scope=db_api.ONE)
ports.append(port)
elif port_ids:
for port_id in port_ids:
port = db_api.port_find(context, id=port_id,
tenant_id=context.tenant_id,
scope=db_api.ONE)
ports.append(port)
with context.session.begin():
if network_id and device_ids:
for device_id in device_ids:
port = db_api.port_find(
context, network_id=network_id, device_id=device_id,
tenant_id=context.tenant_id, scope=db_api.ONE)
ports.append(port)
elif port_ids:
for port_id in port_ids:
port = db_api.port_find(context, id=port_id,
tenant_id=context.tenant_id,
scope=db_api.ONE)
ports.append(port)
if not ports:
raise exceptions.PortNotFound(port_id=port_ids,
net_id=network_id)
if not ports:
raise exceptions.PortNotFound(port_id=port_ids,
net_id=network_id)
address = ipam_driver.allocate_ip_address(
context,
port['network_id'],
port['id'],
CONF.QUARK.ipam_reuse_after,
ip_version,
ip_address)
address = ipam_driver.allocate_ip_address(
context,
port['network_id'],
port['id'],
CONF.QUARK.ipam_reuse_after,
ip_version,
ip_address)
for port in ports:
port["ip_addresses"].append(address)
for port in ports:
port["ip_addresses"].append(address)
return v._make_ip_dict(address)
@@ -95,34 +96,35 @@ def update_ip_address(context, id, ip_address):
LOG.info("update_ip_address %s for tenant %s" %
(id, context.tenant_id))
address = db_api.ip_address_find(
context, id=id, tenant_id=context.tenant_id, scope=db_api.ONE)
with context.session.begin():
address = db_api.ip_address_find(
context, id=id, tenant_id=context.tenant_id, scope=db_api.ONE)
if not address:
raise exceptions.NotFound(
message="No IP address found with id=%s" % id)
old_ports = address['ports']
port_ids = ip_address['ip_address'].get('port_ids')
if port_ids is None:
return v._make_ip_dict(address)
for port in old_ports:
port['ip_addresses'].remove(address)
if port_ids:
ports = db_api.port_find(
context, tenant_id=context.tenant_id, id=port_ids,
scope=db_api.ALL)
# NOTE: could be considered inefficient because we're converting
# to a list to check length. Maybe revisit
if len(ports) != len(port_ids):
if not address:
raise exceptions.NotFound(
message="No ports not found with ids=%s" % port_ids)
for port in ports:
port['ip_addresses'].extend([address])
else:
address["deallocated"] = 1
message="No IP address found with id=%s" % id)
old_ports = address['ports']
port_ids = ip_address['ip_address'].get('port_ids')
if port_ids is None:
return v._make_ip_dict(address)
for port in old_ports:
port['ip_addresses'].remove(address)
if port_ids:
ports = db_api.port_find(
context, tenant_id=context.tenant_id, id=port_ids,
scope=db_api.ALL)
# NOTE: could be considered inefficient because we're converting
# to a list to check length. Maybe revisit
if len(ports) != len(port_ids):
raise exceptions.NotFound(
message="No ports not found with ids=%s" % port_ids)
for port in ports:
port['ip_addresses'].extend([address])
else:
address["deallocated"] = 1
return v._make_ip_dict(address)

View File

@@ -42,26 +42,27 @@ def create_ip_policy(context, ip_policy):
resource="ip_policy",
msg="network_ids or subnet_ids not specified")
models = []
if subnet_ids:
subnets = db_api.subnet_find(
context, id=subnet_ids, scope=db_api.ALL)
if not subnets:
raise exceptions.SubnetNotFound(id=subnet_ids)
models.extend(subnets)
with context.session.begin():
models = []
if subnet_ids:
subnets = db_api.subnet_find(
context, id=subnet_ids, scope=db_api.ALL)
if not subnets:
raise exceptions.SubnetNotFound(id=subnet_ids)
models.extend(subnets)
if network_ids:
nets = db_api.network_find(
context, id=network_ids, scope=db_api.ALL)
if not nets:
raise exceptions.NetworkNotFound(net_id=network_ids)
models.extend(nets)
if network_ids:
nets = db_api.network_find(
context, id=network_ids, scope=db_api.ALL)
if not nets:
raise exceptions.NetworkNotFound(net_id=network_ids)
models.extend(nets)
for model in models:
if model["ip_policy"]:
raise quark_exceptions.IPPolicyAlreadyExists(
id=model["ip_policy"]["id"], n_id=model["id"])
model["ip_policy"] = db_api.ip_policy_create(context, **ipp)
for model in models:
if model["ip_policy"]:
raise quark_exceptions.IPPolicyAlreadyExists(
id=model["ip_policy"]["id"], n_id=model["id"])
model["ip_policy"] = db_api.ip_policy_create(context, **ipp)
return v._make_ip_policy_dict(model["ip_policy"])
@@ -85,46 +86,49 @@ def update_ip_policy(context, id, ip_policy):
ipp = ip_policy["ip_policy"]
ipp_db = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
if not ipp_db:
raise quark_exceptions.IPPolicyNotFound(id=id)
with context.session.begin():
ipp_db = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
if not ipp_db:
raise quark_exceptions.IPPolicyNotFound(id=id)
network_ids = ipp.get("network_ids")
subnet_ids = ipp.get("subnet_ids")
network_ids = ipp.get("network_ids")
subnet_ids = ipp.get("subnet_ids")
models = []
if subnet_ids:
for subnet in ipp_db["subnets"]:
subnet["ip_policy"] = None
subnets = db_api.subnet_find(
context, id=subnet_ids, scope=db_api.ALL)
if len(subnets) != len(subnet_ids):
raise exceptions.SubnetNotFound(id=subnet_ids)
models.extend(subnets)
models = []
if subnet_ids:
for subnet in ipp_db["subnets"]:
subnet["ip_policy"] = None
subnets = db_api.subnet_find(
context, id=subnet_ids, scope=db_api.ALL)
if len(subnets) != len(subnet_ids):
raise exceptions.SubnetNotFound(id=subnet_ids)
models.extend(subnets)
if network_ids:
for network in ipp_db["networks"]:
network["ip_policy"] = None
nets = db_api.network_find(context, id=network_ids, scope=db_api.ALL)
if len(nets) != len(network_ids):
raise exceptions.NetworkNotFound(net_id=network_ids)
models.extend(nets)
if network_ids:
for network in ipp_db["networks"]:
network["ip_policy"] = None
nets = db_api.network_find(context, id=network_ids,
scope=db_api.ALL)
if len(nets) != len(network_ids):
raise exceptions.NetworkNotFound(net_id=network_ids)
models.extend(nets)
for model in models:
if model["ip_policy"]:
raise quark_exceptions.IPPolicyAlreadyExists(
id=model["ip_policy"]["id"], n_id=model["id"])
model["ip_policy"] = ipp_db
for model in models:
if model["ip_policy"]:
raise quark_exceptions.IPPolicyAlreadyExists(
id=model["ip_policy"]["id"], n_id=model["id"])
model["ip_policy"] = ipp_db
ipp_db = db_api.ip_policy_update(context, ipp_db, **ipp)
ipp_db = db_api.ip_policy_update(context, ipp_db, **ipp)
return v._make_ip_policy_dict(ipp_db)
def delete_ip_policy(context, id):
LOG.info("delete_ip_policy %s for tenant %s" % (id, context.tenant_id))
ipp = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
if not ipp:
raise quark_exceptions.IPPolicyNotFound(id=id)
if ipp["networks"] or ipp["subnets"]:
raise quark_exceptions.IPPolicyInUse(id=id)
db_api.ip_policy_delete(context, ipp)
with context.session.begin():
ipp = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
if not ipp:
raise quark_exceptions.IPPolicyNotFound(id=id)
if ipp["networks"] or ipp["subnets"]:
raise quark_exceptions.IPPolicyInUse(id=id)
db_api.ip_policy_delete(context, ipp)

View File

@@ -82,9 +82,10 @@ def create_mac_address_range(context, mac_range):
LOG.info("create_mac_address_range for tenant %s" % context.tenant_id)
cidr = mac_range["mac_address_range"]["cidr"]
cidr, first_address, last_address = _to_mac_range(cidr)
new_range = db_api.mac_address_range_create(
context, cidr=cidr, first_address=first_address,
last_address=last_address, next_auto_assign_mac=first_address)
with context.session.begin():
new_range = db_api.mac_address_range_create(
context, cidr=cidr, first_address=first_address,
last_address=last_address, next_auto_assign_mac=first_address)
return v._make_mac_range_dict(new_range)
@@ -103,8 +104,9 @@ def delete_mac_address_range(context, id):
"""
LOG.info("delete_mac_address_range %s for tenant %s" %
(id, context.tenant_id))
mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE)
if not mar:
raise quark_exceptions.MacAddressRangeNotFound(
mac_address_range_id=id)
_delete_mac_address_range(context, mar)
with context.session.begin():
mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE)
if not mar:
raise quark_exceptions.MacAddressRangeNotFound(
mac_address_range_id=id)
_delete_mac_address_range(context, mar)

View File

@@ -60,46 +60,47 @@ def create_network(context, network):
"""
LOG.info("create_network for tenant %s" % context.tenant_id)
# Generate a uuid that we're going to hand to the backend and db
net_uuid = uuidutils.generate_uuid()
with context.session.begin():
# Generate a uuid that we're going to hand to the backend and db
net_uuid = uuidutils.generate_uuid()
#TODO(mdietz) this will be the first component registry hook, but
# lets make it work first
pnet_type, phys_net, seg_id = _adapt_provider_nets(context, network)
net_attrs = network["network"]
#TODO(mdietz) this will be the first component registry hook, but
# lets make it work first
pnet_type, phys_net, seg_id = _adapt_provider_nets(context, network)
net_attrs = network["network"]
# NOTE(mdietz) I think ideally we would create the providernet
# elsewhere as a separate driver step that could be
# kept in a plugin and completely removed if desired. We could
# have a pre-callback/observer on the netdriver create_network
# that gathers any additional parameters from the network dict
# NOTE(mdietz) I think ideally we would create the providernet
# elsewhere as a separate driver step that could be
# kept in a plugin and completely removed if desired. We could
# have a pre-callback/observer on the netdriver create_network
# that gathers any additional parameters from the network dict
#TODO(dietz or perkins): Allow this to be overridden later with CLI
default_net_type = CONF.QUARK.default_network_type
net_driver = registry.DRIVER_REGISTRY.get_driver(default_net_type)
net_driver.create_network(context, net_attrs["name"], network_id=net_uuid,
phys_type=pnet_type, phys_net=phys_net,
segment_id=seg_id)
#TODO(dietz or perkins): Allow this to be overridden later with CLI
default_net_type = CONF.QUARK.default_network_type
net_driver = registry.DRIVER_REGISTRY.get_driver(default_net_type)
net_driver.create_network(context, net_attrs["name"],
network_id=net_uuid, phys_type=pnet_type,
phys_net=phys_net, segment_id=seg_id)
subs = net_attrs.pop("subnets", [])
subs = net_attrs.pop("subnets", [])
net_attrs["id"] = net_uuid
net_attrs["tenant_id"] = context.tenant_id
net_attrs["network_plugin"] = default_net_type
new_net = db_api.network_create(context, **net_attrs)
net_attrs["id"] = net_uuid
net_attrs["tenant_id"] = context.tenant_id
net_attrs["network_plugin"] = default_net_type
new_net = db_api.network_create(context, **net_attrs)
new_subnets = []
for sub in subs:
sub["subnet"]["network_id"] = new_net["id"]
sub["subnet"]["tenant_id"] = context.tenant_id
s = db_api.subnet_create(context, **sub["subnet"])
new_subnets.append(s)
new_net["subnets"] = new_subnets
new_subnets = []
for sub in subs:
sub["subnet"]["network_id"] = new_net["id"]
sub["subnet"]["tenant_id"] = context.tenant_id
s = db_api.subnet_create(context, **sub["subnet"])
new_subnets.append(s)
new_net["subnets"] = new_subnets
#if not security_groups.get_security_groups(
# context,
# filters={"id": security_groups.DEFAULT_SG_UUID}):
# security_groups._create_default_security_group(context)
#if not security_groups.get_security_groups(
# context,
# filters={"id": security_groups.DEFAULT_SG_UUID}):
# security_groups._create_default_security_group(context)
return v._make_network_dict(new_net)
@@ -115,10 +116,11 @@ def update_network(context, id, network):
"""
LOG.info("update_network %s for tenant %s" %
(id, context.tenant_id))
net = db_api.network_find(context, id=id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=id)
net = db_api.network_update(context, net, **network["network"])
with context.session.begin():
net = db_api.network_find(context, id=id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=id)
net = db_api.network_update(context, net, **network["network"])
return v._make_network_dict(net)
@@ -198,16 +200,17 @@ def delete_network(context, id):
: param id: UUID representing the network to delete.
"""
LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id))
net = db_api.network_find(context, id=id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=id)
if net.ports:
raise exceptions.NetworkInUse(net_id=id)
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
net_driver.delete_network(context, id)
for subnet in net["subnets"]:
subnets._delete_subnet(context, subnet)
db_api.network_delete(context, net)
with context.session.begin():
net = db_api.network_find(context, id=id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=id)
if net.ports:
raise exceptions.NetworkInUse(net_id=id)
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
net_driver.delete_network(context, id)
for subnet in net["subnets"]:
subnets._delete_subnet(context, subnet)
db_api.network_delete(context, net)
def _diag_network(context, network, fields):

View File

@@ -52,61 +52,63 @@ def create_port(context, port):
net_id = port_attrs["network_id"]
addresses = []
port_id = uuidutils.generate_uuid()
with context.session.begin():
port_id = uuidutils.generate_uuid()
net = db_api.network_find(context, id=net_id, shared=True,
segment_id=segment_id, scope=db_api.ONE)
if not net:
# Maybe it's a tenant network
net = db_api.network_find(context, id=net_id, scope=db_api.ONE)
net = db_api.network_find(context, id=net_id,
segment_id=segment_id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=net_id)
# Maybe it's a tenant network
net = db_api.network_find(context, id=net_id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=net_id)
quota.QUOTAS.limit_check(
context, context.tenant_id,
ports_per_network=len(net.get('ports', [])) + 1)
quota.QUOTAS.limit_check(
context, context.tenant_id,
ports_per_network=len(net.get('ports', [])) + 1)
if fixed_ips:
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not (subnet_id and ip_address):
raise exceptions.BadRequest(
resource="fixed_ips",
msg="subnet_id and ip_address required")
if fixed_ips:
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not (subnet_id and ip_address):
raise exceptions.BadRequest(
resource="fixed_ips",
msg="subnet_id and ip_address required")
addresses.append(ipam_driver.allocate_ip_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after,
ip_address=ip_address))
else:
addresses.append(ipam_driver.allocate_ip_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after,
ip_address=ip_address))
else:
addresses.append(ipam_driver.allocate_ip_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after))
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after))
group_ids, security_groups = v.make_security_group_list(
context, port["port"].pop("security_groups", None))
mac = ipam_driver.allocate_mac_address(context, net["id"], port_id,
CONF.QUARK.ipam_reuse_after,
mac_address=mac_address)
mac_address_string = str(netaddr.EUI(mac['address'],
dialect=netaddr.mac_unix))
address_pairs = [{'mac_address': mac_address_string,
'ip_address': address.get('address_readable', '')}
for address in addresses]
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
backend_port = net_driver.create_port(context, net["id"], port_id=port_id,
security_groups=group_ids,
allowed_pairs=address_pairs)
group_ids, security_groups = v.make_security_group_list(
context, port["port"].pop("security_groups", None))
mac = ipam_driver.allocate_mac_address(context, net["id"], port_id,
CONF.QUARK.ipam_reuse_after,
mac_address=mac_address)
mac_address_string = str(netaddr.EUI(mac['address'],
dialect=netaddr.mac_unix))
address_pairs = [{'mac_address': mac_address_string,
'ip_address': address.get('address_readable', '')}
for address in addresses]
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
backend_port = net_driver.create_port(context, net["id"],
port_id=port_id,
security_groups=group_ids,
allowed_pairs=address_pairs)
port_attrs["network_id"] = net["id"]
port_attrs["id"] = port_id
port_attrs["security_groups"] = security_groups
port_attrs["network_id"] = net["id"]
port_attrs["id"] = port_id
port_attrs["security_groups"] = security_groups
LOG.info("Including extra plugin attrs: %s" % backend_port)
port_attrs.update(backend_port)
new_port = db_api.port_create(
context, addresses=addresses, mac_address=mac["address"],
backend_key=backend_port["uuid"], **port_attrs)
LOG.info("Including extra plugin attrs: %s" % backend_port)
port_attrs.update(backend_port)
new_port = db_api.port_create(
context, addresses=addresses, mac_address=mac["address"],
backend_key=backend_port["uuid"], **port_attrs)
# Include any driver specific bits
# Include any driver specific bits
return v._make_port_dict(new_port)
@@ -121,46 +123,47 @@ def update_port(context, id, port):
neutron/api/v2/attributes.py.
"""
LOG.info("update_port %s for tenant %s" % (id, context.tenant_id))
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port_db:
raise exceptions.PortNotFound(port_id=id)
with context.session.begin():
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port_db:
raise exceptions.PortNotFound(port_id=id)
address_pairs = []
fixed_ips = port["port"].pop("fixed_ips", None)
if fixed_ips:
ipam_driver.deallocate_ip_address(
context, port_db, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
addresses = []
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not (subnet_id and ip_address):
raise exceptions.BadRequest(
resource="fixed_ips",
msg="subnet_id and ip_address required")
# Note: we don't allow overlapping subnets, thus subnet_id is
# ignored.
addresses.append(ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after, ip_address=ip_address))
port["port"]["addresses"] = addresses
mac_address_string = str(netaddr.EUI(port_db.mac_address,
dialect=netaddr.mac_unix))
address_pairs = [{'mac_address': mac_address_string,
'ip_address':
address.get('address_readable', '')}
for address in addresses]
address_pairs = []
fixed_ips = port["port"].pop("fixed_ips", None)
if fixed_ips:
ipam_driver.deallocate_ip_address(
context, port_db, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
addresses = []
for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address")
if not (subnet_id and ip_address):
raise exceptions.BadRequest(
resource="fixed_ips",
msg="subnet_id and ip_address required")
# Note: we don't allow overlapping subnets, thus subnet_id is
# ignored.
addresses.append(ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after, ip_address=ip_address))
port["port"]["addresses"] = addresses
mac_address_string = str(netaddr.EUI(port_db.mac_address,
dialect=netaddr.mac_unix))
address_pairs = [{'mac_address': mac_address_string,
'ip_address':
address.get('address_readable', '')}
for address in addresses]
group_ids, security_groups = v.make_security_group_list(
context, port["port"].pop("security_groups", None))
net_driver = registry.DRIVER_REGISTRY.get_driver(
port_db.network["network_plugin"])
net_driver.update_port(context, port_id=port_db.backend_key,
security_groups=group_ids,
allowed_pairs=address_pairs)
group_ids, security_groups = v.make_security_group_list(
context, port["port"].pop("security_groups", None))
net_driver = registry.DRIVER_REGISTRY.get_driver(
port_db.network["network_plugin"])
net_driver.update_port(context, port_id=port_db.backend_key,
security_groups=group_ids,
allowed_pairs=address_pairs)
port["port"]["security_groups"] = security_groups
port = db_api.port_update(context, port_db, **port["port"])
port["port"]["security_groups"] = security_groups
port = db_api.port_update(context, port_db, **port["port"])
return v._make_port_dict(port)
@@ -170,46 +173,48 @@ def post_update_port(context, id, port):
raise exceptions.BadRequest(resource="ports",
msg="Port body required")
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port_db:
raise exceptions.PortNotFound(port_id=id, net_id="")
with context.session.begin():
port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
if not port_db:
raise exceptions.PortNotFound(port_id=id, net_id="")
port = port["port"]
if "fixed_ips" in port and port["fixed_ips"]:
for ip in port["fixed_ips"]:
address = None
if ip:
if "ip_id" in ip:
ip_id = ip["ip_id"]
address = db_api.ip_address_find(
context, id=ip_id, tenant_id=context.tenant_id,
scope=db_api.ONE)
elif "ip_address" in ip:
ip_address = ip["ip_address"]
net_address = netaddr.IPAddress(ip_address)
address = db_api.ip_address_find(
context, ip_address=net_address,
network_id=port_db["network_id"],
tenant_id=context.tenant_id, scope=db_api.ONE)
if not address:
address = ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after, ip_address=ip_address)
else:
address = ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after)
port = port["port"]
if "fixed_ips" in port and port["fixed_ips"]:
for ip in port["fixed_ips"]:
address = None
if ip:
if "ip_id" in ip:
ip_id = ip["ip_id"]
address = db_api.ip_address_find(
context, id=ip_id, tenant_id=context.tenant_id,
scope=db_api.ONE)
elif "ip_address" in ip:
ip_address = ip["ip_address"]
net_address = netaddr.IPAddress(ip_address)
address = db_api.ip_address_find(
context, ip_address=net_address,
network_id=port_db["network_id"],
tenant_id=context.tenant_id, scope=db_api.ONE)
if not address:
address = ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after,
ip_address=ip_address)
else:
address = ipam_driver.allocate_ip_address(
context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after)
address["deallocated"] = 0
address["deallocated"] = 0
already_contained = False
for port_address in port_db["ip_addresses"]:
if address["id"] == port_address["id"]:
already_contained = True
break
already_contained = False
for port_address in port_db["ip_addresses"]:
if address["id"] == port_address["id"]:
already_contained = True
break
if not already_contained:
port_db["ip_addresses"].append(address)
if not already_contained:
port_db["ip_addresses"].append(address)
return v._make_port_dict(port_db)
@@ -296,15 +301,16 @@ def delete_port(context, id):
if not port:
raise exceptions.PortNotFound(net_id=id)
backend_key = port["backend_key"]
mac_address = netaddr.EUI(port["mac_address"]).value
ipam_driver.deallocate_mac_address(context, mac_address)
ipam_driver.deallocate_ip_address(
context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
db_api.port_delete(context, port)
net_driver = registry.DRIVER_REGISTRY.get_driver(
port.network["network_plugin"])
net_driver.delete_port(context, backend_key)
with context.session.begin():
backend_key = port["backend_key"]
mac_address = netaddr.EUI(port["mac_address"]).value
ipam_driver.deallocate_mac_address(context, mac_address)
ipam_driver.deallocate_ip_address(
context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
db_api.port_delete(context, port)
net_driver = registry.DRIVER_REGISTRY.get_driver(
port.network["network_plugin"])
net_driver.delete_port(context, backend_key)
def disassociate_port(context, id, ip_address_id):
@@ -317,19 +323,20 @@ def disassociate_port(context, id, ip_address_id):
"""
LOG.info("disassociate_port %s for tenant %s ip_address_id %s" %
(id, context.tenant_id, ip_address_id))
port = db_api.port_find(context, id=id, ip_address_id=[ip_address_id],
scope=db_api.ONE)
with context.session.begin():
port = db_api.port_find(context, id=id, ip_address_id=[ip_address_id],
scope=db_api.ONE)
if not port:
raise exceptions.PortNotFound(port_id=id, net_id='')
if not port:
raise exceptions.PortNotFound(port_id=id, net_id='')
the_address = [address for address in port["ip_addresses"]
if address["id"] == ip_address_id][0]
port["ip_addresses"] = [address for address in port["ip_addresses"]
if address.id != ip_address_id]
the_address = [address for address in port["ip_addresses"]
if address["id"] == ip_address_id][0]
port["ip_addresses"] = [address for address in port["ip_addresses"]
if address.id != ip_address_id]
if len(the_address["ports"]) == 0:
the_address["deallocated"] = 1
if len(the_address["ports"]) == 0:
the_address["deallocated"] = 1
return v._make_port_dict(port)

View File

@@ -49,23 +49,24 @@ def create_route(context, route):
LOG.info("create_route for tenant %s" % context.tenant_id)
route = route["route"]
subnet_id = route["subnet_id"]
subnet = db_api.subnet_find(context, id=subnet_id, scope=db_api.ONE)
if not subnet:
raise exceptions.SubnetNotFound(subnet_id=subnet_id)
with context.session.begin():
subnet = db_api.subnet_find(context, id=subnet_id, scope=db_api.ONE)
if not subnet:
raise exceptions.SubnetNotFound(subnet_id=subnet_id)
# TODO(anyone): May want to denormalize the cidr values into columns
# to achieve single db lookup on conflict check
route_cidr = netaddr.IPNetwork(route["cidr"])
subnet_routes = db_api.route_find(context, subnet_id=subnet_id,
scope=db_api.ALL)
for sub_route in subnet_routes:
sub_route_cidr = netaddr.IPNetwork(sub_route["cidr"])
if sub_route_cidr.value == DEFAULT_ROUTE.value:
continue
if route_cidr in sub_route_cidr or sub_route_cidr in route_cidr:
raise quark_exceptions.RouteConflict(
route_id=sub_route["id"], cidr=str(route_cidr))
new_route = db_api.route_create(context, **route)
# TODO(anyone): May want to denormalize the cidr values into columns
# to achieve single db lookup on conflict check
route_cidr = netaddr.IPNetwork(route["cidr"])
subnet_routes = db_api.route_find(context, subnet_id=subnet_id,
scope=db_api.ALL)
for sub_route in subnet_routes:
sub_route_cidr = netaddr.IPNetwork(sub_route["cidr"])
if sub_route_cidr.value == DEFAULT_ROUTE.value:
continue
if route_cidr in sub_route_cidr or sub_route_cidr in route_cidr:
raise quark_exceptions.RouteConflict(
route_id=sub_route["id"], cidr=str(route_cidr))
new_route = db_api.route_create(context, **route)
return v._make_route_dict(new_route)
@@ -74,7 +75,8 @@ def delete_route(context, id):
# admin and only filter on tenant if they aren't. Correct
# for all the above later
LOG.info("delete_route %s for tenant %s" % (id, context.tenant_id))
route = db_api.route_find(context, id, scope=db_api.ONE)
if not route:
raise quark_exceptions.RouteNotFound(route_id=id)
db_api.route_delete(context, route)
with context.session.begin():
route = db_api.route_find(context, id, scope=db_api.ONE)
if not route:
raise quark_exceptions.RouteNotFound(route_id=id)
db_api.route_delete(context, route)

View File

@@ -76,16 +76,17 @@ def create_security_group(context, security_group, net_driver):
raise sg_ext.SecurityGroupDefaultAlreadyExists()
group_id = uuidutils.generate_uuid()
net_driver.create_security_group(
context,
group_name,
group_id=group_id,
**group)
with context.session.begin():
net_driver.create_security_group(
context,
group_name,
group_id=group_id,
**group)
group["id"] = group_id
group["name"] = group_name
group["tenant_id"] = context.tenant_id
dbgroup = db_api.security_group_create(context, **group)
group["id"] = group_id
group["name"] = group_name
group["tenant_id"] = context.tenant_id
dbgroup = db_api.security_group_create(context, **group)
return v._make_security_group_dict(dbgroup)
@@ -121,21 +122,23 @@ def _create_default_security_group(context, net_driver):
def create_security_group_rule(context, security_group_rule, net_driver):
LOG.info("create_security_group for tenant %s" %
(context.tenant_id))
rule = _validate_security_group_rule(
context, security_group_rule["security_group_rule"])
rule["id"] = uuidutils.generate_uuid()
group_id = rule["security_group_id"]
group = db_api.security_group_find(context, id=group_id,
scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(group_id=group_id)
with context.session.begin():
rule = _validate_security_group_rule(
context, security_group_rule["security_group_rule"])
rule["id"] = uuidutils.generate_uuid()
quota.QUOTAS.limit_check(
context, context.tenant_id,
security_rules_per_group=len(group.get("rules", [])) + 1)
group_id = rule["security_group_id"]
group = db_api.security_group_find(context, id=group_id,
scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(group_id=group_id)
net_driver.create_security_group_rule(context, group_id, rule)
quota.QUOTAS.limit_check(
context, context.tenant_id,
security_rules_per_group=len(group.get("rules", [])) + 1)
net_driver.create_security_group_rule(context, group_id, rule)
return v._make_security_group_rule_dict(
db_api.security_group_rule_create(context, **rule))
@@ -145,37 +148,39 @@ def delete_security_group(context, id, net_driver):
LOG.info("delete_security_group %s for tenant %s" %
(id, context.tenant_id))
group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
with context.session.begin():
group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
#TODO(anyone): name and ports are lazy-loaded. Could be good op later
if not group:
raise sg_ext.SecurityGroupNotFound(group_id=id)
if id == DEFAULT_SG_UUID or group.name == "default":
raise sg_ext.SecurityGroupCannotRemoveDefault()
if group.ports:
raise sg_ext.SecurityGroupInUse(id=id)
net_driver.delete_security_group(context, id)
db_api.security_group_delete(context, group)
#TODO(anyone): name and ports are lazy-loaded. Could be good op later
if not group:
raise sg_ext.SecurityGroupNotFound(group_id=id)
if id == DEFAULT_SG_UUID or group.name == "default":
raise sg_ext.SecurityGroupCannotRemoveDefault()
if group.ports:
raise sg_ext.SecurityGroupInUse(id=id)
net_driver.delete_security_group(context, id)
db_api.security_group_delete(context, group)
def delete_security_group_rule(context, id, net_driver):
LOG.info("delete_security_group %s for tenant %s" %
(id, context.tenant_id))
rule = db_api.security_group_rule_find(context, id=id,
with context.session.begin():
rule = db_api.security_group_rule_find(context, id=id,
scope=db_api.ONE)
if not rule:
raise sg_ext.SecurityGroupRuleNotFound(group_id=id)
group = db_api.security_group_find(context, id=rule["group_id"],
scope=db_api.ONE)
if not rule:
raise sg_ext.SecurityGroupRuleNotFound(group_id=id)
if not group:
raise sg_ext.SecurityGroupNotFound(id=id)
group = db_api.security_group_find(context, id=rule["group_id"],
scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(id=id)
net_driver.delete_security_group_rule(
context, group.id, v._make_security_group_rule_dict(rule))
net_driver.delete_security_group_rule(
context, group.id, v._make_security_group_rule_dict(rule))
rule["id"] = id
db_api.security_group_rule_delete(context, rule)
rule["id"] = id
db_api.security_group_rule_delete(context, rule)
def get_security_group(context, id, fields=None):
@@ -219,8 +224,9 @@ def update_security_group(context, id, security_group, net_driver):
if id == DEFAULT_SG_UUID:
raise sg_ext.SecurityGroupCannotUpdateDefault()
new_group = security_group["security_group"]
group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
net_driver.update_security_group(context, id, **new_group)
with context.session.begin():
group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
net_driver.update_security_group(context, id, **new_group)
db_group = db_api.security_group_update(context, group, **new_group)
db_group = db_api.security_group_update(context, group, **new_group)
return v._make_security_group_dict(db_group)

View File

@@ -85,53 +85,54 @@ def create_subnet(context, subnet):
LOG.info("create_subnet for tenant %s" % context.tenant_id)
net_id = subnet["subnet"]["network_id"]
net = db_api.network_find(context, id=net_id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=net_id)
with context.session.begin():
net = db_api.network_find(context, id=net_id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=net_id)
sub_attrs = subnet["subnet"]
sub_attrs = subnet["subnet"]
_validate_subnet_cidr(context, net_id, sub_attrs["cidr"])
_validate_subnet_cidr(context, net_id, sub_attrs["cidr"])
cidr = netaddr.IPNetwork(sub_attrs["cidr"])
gateway_ip = utils.pop_param(sub_attrs, "gateway_ip", str(cidr[1]))
dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", [])
host_routes = utils.pop_param(sub_attrs, "host_routes", [])
allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None)
sub_attrs["network"] = net
cidr = netaddr.IPNetwork(sub_attrs["cidr"])
gateway_ip = utils.pop_param(sub_attrs, "gateway_ip", str(cidr[1]))
dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", [])
host_routes = utils.pop_param(sub_attrs, "host_routes", [])
allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None)
sub_attrs["network"] = net
new_subnet = db_api.subnet_create(context, **sub_attrs)
new_subnet = db_api.subnet_create(context, **sub_attrs)
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route
gateway_ip = default_route["nexthop"]
new_subnet["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route
gateway_ip = default_route["nexthop"]
new_subnet["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
if default_route is None:
new_subnet["routes"].append(db_api.route_create(
context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip))
if default_route is None:
new_subnet["routes"].append(db_api.route_create(
context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip))
for dns_ip in dns_ips:
new_subnet["dns_nameservers"].append(db_api.dns_create(
context, ip=netaddr.IPAddress(dns_ip)))
for dns_ip in dns_ips:
new_subnet["dns_nameservers"].append(db_api.dns_create(
context, ip=netaddr.IPAddress(dns_ip)))
if isinstance(allocation_pools, list):
ranges = []
cidrset = netaddr.IPSet([netaddr.IPNetwork(new_subnet["cidr"])])
for p in allocation_pools:
cidrset -= netaddr.IPSet(netaddr.IPRange(p["start"], p["end"]))
non_allocation_pools = v._pools_from_cidr(cidrset)
for p in non_allocation_pools:
r = netaddr.IPRange(p["start"], p["end"])
ranges.append(dict(
length=len(r),
offset=int(r[0]) - int(cidr[0])))
new_subnet["ip_policy"] = db_api.ip_policy_create(context,
exclude=ranges)
if isinstance(allocation_pools, list):
ranges = []
cidrset = netaddr.IPSet([netaddr.IPNetwork(new_subnet["cidr"])])
for p in allocation_pools:
cidrset -= netaddr.IPSet(netaddr.IPRange(p["start"], p["end"]))
non_allocation_pools = v._pools_from_cidr(cidrset)
for p in non_allocation_pools:
r = netaddr.IPRange(p["start"], p["end"])
ranges.append(dict(
length=len(r),
offset=int(r[0]) - int(cidr[0])))
new_subnet["ip_policy"] = db_api.ip_policy_create(context,
exclude=ranges)
subnet_dict = v._make_subnet_dict(new_subnet,
default_route=routes.DEFAULT_ROUTE)
@@ -161,49 +162,50 @@ def update_subnet(context, id, subnet):
LOG.info("update_subnet %s for tenant %s" %
(id, context.tenant_id))
subnet_db = db_api.subnet_find(context, id=id, scope=db_api.ONE)
if not subnet_db:
raise exceptions.SubnetNotFound(id=id)
with context.session.begin():
subnet_db = db_api.subnet_find(context, id=id, scope=db_api.ONE)
if not subnet_db:
raise exceptions.SubnetNotFound(id=id)
s = subnet["subnet"]
s = subnet["subnet"]
dns_ips = s.pop("dns_nameservers", [])
host_routes = s.pop("host_routes", [])
gateway_ip = s.pop("gateway_ip", None)
dns_ips = s.pop("dns_nameservers", [])
host_routes = s.pop("host_routes", [])
gateway_ip = s.pop("gateway_ip", None)
if gateway_ip:
default_route = None
if gateway_ip:
default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route
break
if default_route is None:
route_model = db_api.route_find(
context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id,
scope=db_api.ONE)
if route_model:
db_api.route_update(context, route_model,
gateway=gateway_ip)
else:
db_api.route_create(context,
cidr=str(routes.DEFAULT_ROUTE),
gateway=gateway_ip, subnet_id=id)
if dns_ips:
subnet_db["dns_nameservers"] = []
for dns_ip in dns_ips:
subnet_db["dns_nameservers"].append(db_api.dns_create(
context,
ip=netaddr.IPAddress(dns_ip)))
if host_routes:
subnet_db["routes"] = []
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route
break
if default_route is None:
route_model = db_api.route_find(
context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id,
scope=db_api.ONE)
if route_model:
db_api.route_update(context, route_model,
gateway=gateway_ip)
else:
db_api.route_create(context,
cidr=str(routes.DEFAULT_ROUTE),
gateway=gateway_ip, subnet_id=id)
subnet_db["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
if dns_ips:
subnet_db["dns_nameservers"] = []
for dns_ip in dns_ips:
subnet_db["dns_nameservers"].append(db_api.dns_create(
context,
ip=netaddr.IPAddress(dns_ip)))
if host_routes:
subnet_db["routes"] = []
for route in host_routes:
subnet_db["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
subnet = db_api.subnet_update(context, subnet_db, **s)
subnet = db_api.subnet_update(context, subnet_db, **s)
return v._make_subnet_dict(subnet, default_route=routes.DEFAULT_ROUTE)
@@ -292,22 +294,23 @@ def delete_subnet(context, id):
: param id: UUID representing the subnet to delete.
"""
LOG.info("delete_subnet %s for tenant %s" % (id, context.tenant_id))
subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE)
if not subnet:
raise exceptions.SubnetNotFound(subnet_id=id)
with context.session.begin():
subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE)
if not subnet:
raise exceptions.SubnetNotFound(subnet_id=id)
payload = dict(tenant_id=subnet["tenant_id"],
ip_block_id=subnet["id"],
created_at=subnet["created_at"],
deleted_at=timeutils.utcnow())
payload = dict(tenant_id=subnet["tenant_id"],
ip_block_id=subnet["id"],
created_at=subnet["created_at"],
deleted_at=timeutils.utcnow())
_delete_subnet(context, subnet)
_delete_subnet(context, subnet)
notifier_api.notify(context,
notifier_api.publisher_id("network"),
"ip_block.delete",
notifier_api.CONF.default_notification_level,
payload)
notifier_api.notify(context,
notifier_api.publisher_id("network"),
"ip_block.delete",
notifier_api.CONF.default_notification_level,
payload)
def diagnose_subnet(context, id, fields):

View File

@@ -87,6 +87,7 @@ def _make_subnet_dict(subnet, default_route=None, fields=None):
"allocation_pools": _allocation_pools(subnet),
"dns_nameservers": dns_nameservers or [],
"cidr": subnet.get("cidr"),
"shared": STRATEGY.is_parent_network(net_id),
"enable_dhcp": None}
def _host_route(route):

View File

@@ -15,6 +15,7 @@
import contextlib
import copy
import time
import uuid
import mock
@@ -697,7 +698,17 @@ class TestQuarkDeleteSubnet(test_quark_plugin.TestQuarkPlugin):
class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, s, deleted_at=None):
class FakeContext(object):
def __enter__(*args, **kwargs):
pass
def __exit__(*args, **kwargs):
pass
self.context.session.begin = FakeContext
s["network"] = models.Network()
s["network"]["created_at"] = s["created_at"]
subnet = models.Subnet(**s)
db_mod = "quark.db.api"
api_mod = "neutron.openstack.common.notifier.api"
@@ -706,13 +717,15 @@ class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.network_find" % db_mod),
mock.patch("%s.subnet_create" % db_mod),
mock.patch("%s.ip_policy_create" % db_mod),
mock.patch("%s.subnet_delete" % db_mod),
mock.patch("%s.notify" % api_mod),
mock.patch("%s.utcnow" % time_mod)
) as (sub_find, net_find, sub_create, sub_del, notify, time):
) as (sub_find, net_find, sub_create, pol_cre, sub_del, notify,
time_func):
sub_create.return_value = subnet
sub_find.return_value = subnet
time.return_value = deleted_at
time_func.return_value = deleted_at
yield notify
def test_create_subnet_notification(self):
@@ -730,8 +743,10 @@ class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
created_at=s["created_at"]))
def test_delete_subnet_notification(self):
s = dict(tenant_id=1, id=1, created_at="123")
with self._stubs(s, deleted_at="456") as notify:
now = time.strftime('%Y-%m-%d %H:%M:%S')
later = time.strftime('%Y-%m-%d %H:%M:%S')
s = dict(tenant_id=1, id=1, created_at=now)
with self._stubs(s, deleted_at=later) as notify:
self.plugin.delete_subnet(self.context, 1)
notify.assert_called_once_with(
self.context,
@@ -741,7 +756,7 @@ class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
dict(tenant_id=s["tenant_id"],
created_at=s["created_at"],
ip_block_id=s["id"],
deleted_at="456"))
deleted_at=later))
class TestQuarkDiagnoseSubnets(test_quark_plugin.TestQuarkPlugin):

View File

@@ -24,3 +24,15 @@ class TestBase(unittest2.TestCase):
def setUp(self):
super(TestBase, self).setUp()
self.context = context.Context('fake', 'fake', is_admin=False)
class FakeContext(object):
def __new__(cls, *args, **kwargs):
return super(FakeContext, cls).__new__(cls)
def __enter__(*args, **kwargs):
pass
def __exit__(*args, **kwargs):
pass
self.context.session.begin = FakeContext

View File

@@ -202,6 +202,7 @@ class QuarkNewIPAddressAllocation(QuarkIpamBaseTest):
if not addresses:
addresses = [None]
db_mod = "quark.db.api"
self.context.session.add = mock.Mock()
with contextlib.nested(
mock.patch("%s.ip_address_find" % db_mod),
mock.patch("%s.subnet_find_allocation_counts" % db_mod)
@@ -310,6 +311,7 @@ class QuarkIPAddressAllocateDeallocated(QuarkIpamBaseTest):
@contextlib.contextmanager
def _stubs(self, ip_find, subnet, address, addresses_found):
db_mod = "quark.db.api"
self.context.session.add = mock.Mock()
with contextlib.nested(
mock.patch("%s.ip_address_find" % db_mod),
mock.patch("%s.ip_address_update" % db_mod),
@@ -360,13 +362,17 @@ class QuarkIPAddressAllocateDeallocated(QuarkIpamBaseTest):
This edge case occurs because users are allowed to select a specific IP
address to create.
"""
network_mod = models.Network()
network_mod.update(dict(ip_policy=None))
subnet = dict(id=1, ip_version=4, next_auto_assign_ip=0,
cidr="0.0.0.0/24", first_ip=0, last_ip=255,
network=dict(ip_policy=None), ip_policy=None)
network=network_mod, ip_policy=None)
address0 = dict(id=1, address=0)
addresses_found = [None, None]
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
with self._stubs(
False, subnet, address0, addresses_found
False, subnet_mod, address0, addresses_found
) as (choose_subnet):
ipaddress = self.ipam.allocate_ip_address(self.context, 0, 0, 0)
self.assertEqual(ipaddress["address"], 2)
@@ -380,6 +386,7 @@ class TestQuarkIpPoliciesIpAllocation(QuarkIpamBaseTest):
if not addresses:
addresses = [None]
db_mod = "quark.db.api"
self.context.session.add = mock.Mock()
with contextlib.nested(
mock.patch("%s.ip_address_find" % db_mod),
mock.patch("%s.subnet_find_allocation_counts" % db_mod)
@@ -491,6 +498,7 @@ class QuarkIPAddressAllocationNotifications(QuarkIpamBaseTest):
db_mod = "quark.db.api"
api_mod = "neutron.openstack.common.notifier.api"
time_mod = "neutron.openstack.common.timeutils"
self.context.session.add = mock.Mock()
with contextlib.nested(
mock.patch("%s.ip_address_find" % db_mod),
mock.patch("%s.ip_address_create" % db_mod),