Default net assignment

Adds default network assignment to Quark, along with yanking out
repoze.tm2 for compatibility while reinstating the standard
sqlalchemy-style transaction sessions to eliminate the race condition in
mass-assigning IP addresses under load.

Also fixes a typo with ipam_reuse_after which was previously, and
incorrectly, a boolean config value.
This commit is contained in:
Matt Dietz and John Yolo Perkins
2013-09-24 16:20:59 +00:00
committed by Matt Dietz
parent da5f7b4425
commit c758f21558
16 changed files with 625 additions and 551 deletions

View File

@@ -25,9 +25,9 @@ quark_opts = [
help=_('The client to use to talk to the backend')), help=_('The client to use to talk to the backend')),
cfg.StrOpt('ipam_driver', default='quark.ipam.QuarkIpam', cfg.StrOpt('ipam_driver', default='quark.ipam.QuarkIpam',
help=_('IPAM Implementation to use')), help=_('IPAM Implementation to use')),
cfg.BoolOpt('ipam_reuse_after', default=7200, cfg.IntOpt('ipam_reuse_after', default=7200,
help=_("Time in seconds til IP and MAC reuse" help=_("Time in seconds til IP and MAC reuse"
"after deallocation.")), "after deallocation.")),
cfg.StrOpt("strategy_driver", cfg.StrOpt("strategy_driver",
default='quark.network_strategy.JSONStrategy', default='quark.network_strategy.JSONStrategy',
help=_("Tree of network assignment strategy")), help=_("Tree of network assignment strategy")),

View File

@@ -116,7 +116,7 @@ def _model_query(context, model, filters, fields=None):
# This works even when a non-shared, other-tenant owned network is passed # This works even when a non-shared, other-tenant owned network is passed
# in because the authZ checks that happen in Neutron above us yank it back # in because the authZ checks that happen in Neutron above us yank it back
# out of the result set. # out of the result set.
if "tenant_id" not in filters and not context.is_admin: if not filters and not context.is_admin:
filters["tenant_id"] = [context.tenant_id] filters["tenant_id"] = [context.tenant_id]
if filters.get("tenant_id"): if filters.get("tenant_id"):
@@ -215,7 +215,7 @@ def ip_address_create(context, **address_dict):
@scoped @scoped
def ip_address_find(context, **filters): def ip_address_find(context, lock_mode=False, **filters):
query = context.session.query(models.IPAddress) query = context.session.query(models.IPAddress)
ip_shared = filters.pop("shared", None) ip_shared = filters.pop("shared", None)
@@ -223,6 +223,8 @@ def ip_address_find(context, **filters):
cnt = sql_func.count(models.port_ip_association_table.c.port_id) cnt = sql_func.count(models.port_ip_association_table.c.port_id)
stmt = context.session.query(models.IPAddress, stmt = context.session.query(models.IPAddress,
cnt.label("ports_count")) cnt.label("ports_count"))
if lock_mode:
stmt = stmt.with_lockmode("update")
stmt = stmt.outerjoin(models.port_ip_association_table) stmt = stmt.outerjoin(models.port_ip_association_table)
stmt = stmt.group_by(models.IPAddress).subquery() stmt = stmt.group_by(models.IPAddress).subquery()
@@ -239,13 +241,14 @@ def ip_address_find(context, **filters):
if filters.get("device_id"): if filters.get("device_id"):
model_filters.append(models.IPAddress.ports.any( model_filters.append(models.IPAddress.ports.any(
models.Port.device_id.in_(filters["device_id"]))) models.Port.device_id.in_(filters["device_id"])))
return query.filter(*model_filters) return query.filter(*model_filters)
@scoped @scoped
def mac_address_find(context, **filters): def mac_address_find(context, lock_mode=False, **filters):
query = context.session.query(models.MacAddress) query = context.session.query(models.MacAddress)
if lock_mode:
query.with_lockmode("update")
model_filters = _model_query(context, models.MacAddress, filters) model_filters = _model_query(context, models.MacAddress, filters)
return query.filter(*model_filters) return query.filter(*model_filters)
@@ -253,7 +256,7 @@ def mac_address_find(context, **filters):
def mac_address_range_find_allocation_counts(context, address=None): def mac_address_range_find_allocation_counts(context, address=None):
query = context.session.query(models.MacAddressRange, query = context.session.query(models.MacAddressRange,
sql_func.count(models.MacAddress.address). sql_func.count(models.MacAddress.address).
label("count")) label("count")).with_lockmode("update")
query = query.outerjoin(models.MacAddress) query = query.outerjoin(models.MacAddress)
query = query.group_by(models.MacAddressRange) query = query.group_by(models.MacAddressRange)
query = query.order_by("count DESC") query = query.order_by("count DESC")
@@ -362,7 +365,7 @@ def network_delete(context, network):
def subnet_find_allocation_counts(context, net_id, **filters): def subnet_find_allocation_counts(context, net_id, **filters):
query = context.session.query(models.Subnet, query = context.session.query(models.Subnet,
sql_func.count(models.IPAddress.address). sql_func.count(models.IPAddress.address).
label("count")) label("count")).with_lockmode('update')
query = query.outerjoin(models.Subnet.allocated_ips) query = query.outerjoin(models.Subnet.allocated_ips)
query = query.group_by(models.Subnet) query = query.group_by(models.Subnet)
query = query.order_by("count DESC") query = query.order_by("count DESC")

View File

@@ -24,11 +24,14 @@ from neutron.openstack.common import log as logging
from neutron.openstack.common.notifier import api as notifier_api from neutron.openstack.common.notifier import api as notifier_api
from neutron.openstack.common import timeutils from neutron.openstack.common import timeutils
from oslo.config import cfg
from quark.db import api as db_api from quark.db import api as db_api
from quark.db import models from quark.db import models
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class QuarkIpam(object): class QuarkIpam(object):
@@ -60,36 +63,40 @@ class QuarkIpam(object):
if mac_address: if mac_address:
mac_address = netaddr.EUI(mac_address).value mac_address = netaddr.EUI(mac_address).value
deallocated_mac = db_api.mac_address_find( with context.session.begin(subtransactions=True):
context, reuse_after=reuse_after, scope=db_api.ONE, deallocated_mac = db_api.mac_address_find(
address=mac_address) context, lock_mode=True, reuse_after=reuse_after,
if deallocated_mac: scope=db_api.ONE, address=mac_address)
return db_api.mac_address_update( if deallocated_mac:
context, deallocated_mac, deallocated=False, return db_api.mac_address_update(
deallocated_at=None) context, deallocated_mac, deallocated=False,
deallocated_at=None)
ranges = db_api.mac_address_range_find_allocation_counts( with context.session.begin(subtransactions=True):
context, address=mac_address) ranges = db_api.mac_address_range_find_allocation_counts(
for result in ranges: context, address=mac_address)
rng, addr_count = result for result in ranges:
if rng["last_address"] - rng["first_address"] <= addr_count: rng, addr_count = result
continue last = rng["last_address"]
first = rng["first_address"]
if last - first <= addr_count:
continue
next_address = None
if mac_address:
next_address = mac_address
else:
address = True
while address:
next_address = rng["next_auto_assign_mac"]
rng["next_auto_assign_mac"] = next_address + 1
address = db_api.mac_address_find(
context, tenant_id=context.tenant_id,
scope=db_api.ONE, address=next_address)
next_address = None address = db_api.mac_address_create(
if mac_address: context, address=next_address,
next_address = mac_address mac_address_range_id=rng["id"])
else: return address
address = True
while address:
next_address = rng["next_auto_assign_mac"]
rng["next_auto_assign_mac"] = next_address + 1
address = db_api.mac_address_find(
context, tenant_id=context.tenant_id,
scope=db_api.ONE, address=next_address)
address = db_api.mac_address_create(context, address=next_address,
mac_address_range_id=rng["id"])
return address
raise exceptions.MacAddressGenerationFailure(net_id=net_id) raise exceptions.MacAddressGenerationFailure(net_id=net_id)
@@ -99,44 +106,52 @@ class QuarkIpam(object):
if ip_address: if ip_address:
ip_address = netaddr.IPAddress(ip_address) ip_address = netaddr.IPAddress(ip_address)
address = db_api.ip_address_find( with context.session.begin(subtransactions=True):
elevated, network_id=net_id, reuse_after=reuse_after,
deallocated=True, scope=db_api.ONE, ip_address=ip_address)
if address:
return db_api.ip_address_update(
elevated, address, deallocated=False, deallocated_at=None)
subnet = self._choose_available_subnet(
elevated, net_id, ip_address=ip_address, version=version)
ip_policy_rules = models.IPPolicy.get_ip_policy_rule_set(subnet)
# Creating this IP for the first time
next_ip = None
if ip_address:
next_ip = ip_address
address = db_api.ip_address_find( address = db_api.ip_address_find(
elevated, network_id=net_id, ip_address=next_ip, elevated, network_id=net_id, reuse_after=reuse_after,
tenant_id=elevated.tenant_id, scope=db_api.ONE) deallocated=True, scope=db_api.ONE, ip_address=ip_address,
lock_mode=True)
if address: if address:
raise exceptions.IpAddressGenerationFailure(net_id=net_id) updated_address = db_api.ip_address_update(
else: elevated, address, deallocated=False,
address = True deallocated_at=None)
while address: return updated_address
next_ip_int = int(subnet["next_auto_assign_ip"])
next_ip = netaddr.IPAddress(next_ip_int) with context.session.begin(subtransactions=True):
if subnet["ip_version"] == 4: subnet = self._choose_available_subnet(
next_ip = next_ip.ipv4() elevated, net_id, ip_address=ip_address, version=version)
subnet["next_auto_assign_ip"] = next_ip_int + 1 ip_policy_rules = models.IPPolicy.get_ip_policy_rule_set(
if ip_policy_rules and next_ip in ip_policy_rules: subnet)
continue
# Creating this IP for the first time
next_ip = None
if ip_address:
next_ip = ip_address
address = db_api.ip_address_find( address = db_api.ip_address_find(
elevated, network_id=net_id, ip_address=next_ip, elevated, network_id=net_id, ip_address=next_ip,
tenant_id=elevated.tenant_id, scope=db_api.ONE) tenant_id=elevated.tenant_id, scope=db_api.ONE)
if address:
address = db_api.ip_address_create( raise exceptions.IpAddressGenerationFailure(
elevated, address=next_ip, subnet_id=subnet["id"], net_id=net_id)
version=subnet["ip_version"], network_id=net_id) else:
address["deallocated"] = 0 address = True
while address:
next_ip_int = int(subnet["next_auto_assign_ip"])
next_ip = netaddr.IPAddress(next_ip_int)
if subnet["ip_version"] == 4:
next_ip = next_ip.ipv4()
subnet["next_auto_assign_ip"] = next_ip_int + 1
if ip_policy_rules and next_ip in ip_policy_rules:
continue
address = db_api.ip_address_find(
elevated, network_id=net_id, ip_address=next_ip,
tenant_id=elevated.tenant_id, scope=db_api.ONE)
context.session.add(subnet)
address = db_api.ip_address_create(
elevated, address=next_ip, subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id)
address["deallocated"] = 0
payload = dict(tenant_id=address["tenant_id"], payload = dict(tenant_id=address["tenant_id"],
ip_block_id=address["subnet_id"], ip_block_id=address["subnet_id"],
@@ -167,17 +182,19 @@ class QuarkIpam(object):
payload) payload)
def deallocate_ip_address(self, context, port, **kwargs): def deallocate_ip_address(self, context, port, **kwargs):
for addr in port["ip_addresses"]: with context.session.begin(subtransactions=True):
# Note: only deallocate ip if this is the only port mapped to it for addr in port["ip_addresses"]:
if len(addr["ports"]) == 1: # Note: only deallocate ip if this is the only port mapped
self._deallocate_ip_address(context, addr) if len(addr["ports"]) == 1:
port["ip_addresses"] = [] self._deallocate_ip_address(context, addr)
port["ip_addresses"] = []
def deallocate_mac_address(self, context, address): def deallocate_mac_address(self, context, address):
mac = db_api.mac_address_find(context, address=address, with context.session.begin(subtransactions=True):
scope=db_api.ONE) mac = db_api.mac_address_find(context, address=address,
if not mac: scope=db_api.ONE)
raise exceptions.NotFound( if not mac:
message="No MAC address %s found" % netaddr.EUI(address)) raise exceptions.NotFound(
db_api.mac_address_update(context, mac, deallocated=True, message="No MAC address %s found" % netaddr.EUI(address))
deallocated_at=timeutils.utcnow()) db_api.mac_address_update(context, mac, deallocated=True,
deallocated_at=timeutils.utcnow())

View File

@@ -18,13 +18,9 @@ v2 Neutron Plug-in API Quark Implementation
""" """
from oslo.config import cfg from oslo.config import cfg
from sqlalchemy.orm import sessionmaker, scoped_session
from zope import sqlalchemy as zsa
from neutron.db import api as neutron_db_api from neutron.db import api as neutron_db_api
from neutron.extensions import securitygroup as sg_ext from neutron.extensions import securitygroup as sg_ext
from neutron import neutron_plugin_base_v2 from neutron import neutron_plugin_base_v2
from neutron.openstack.common.db.sqlalchemy import session as neutron_session
from neutron import quota from neutron import quota
from quark.api import extensions from quark.api import extensions
@@ -79,15 +75,8 @@ class Plugin(neutron_plugin_base_v2.NeutronPluginBaseV2,
"subnets_quark", "provider", "subnets_quark", "provider",
"ip_policies", "quotas"] "ip_policies", "quotas"]
def _initDBMaker(self):
# This needs to be called after _ENGINE is configured
session_maker = sessionmaker(bind=neutron_session._ENGINE,
extension=zsa.ZopeTransactionExtension())
neutron_session._MAKER = scoped_session(session_maker)
def __init__(self): def __init__(self):
neutron_db_api.configure_db() neutron_db_api.configure_db()
self._initDBMaker()
neutron_db_api.register_models(base=models.BASEV2) neutron_db_api.register_models(base=models.BASEV2)
def get_mac_address_range(self, context, id, fields=None): def get_mac_address_range(self, context, id, fields=None):

View File

@@ -60,33 +60,34 @@ def create_ip_address(context, ip_address):
raise exceptions.BadRequest( raise exceptions.BadRequest(
resource="ip_addresses", resource="ip_addresses",
msg="network_id is required if device_ids are supplied.") msg="network_id is required if device_ids are supplied.")
if network_id and device_ids: with context.session.begin():
for device_id in device_ids: if network_id and device_ids:
port = db_api.port_find( for device_id in device_ids:
context, network_id=network_id, device_id=device_id, port = db_api.port_find(
tenant_id=context.tenant_id, scope=db_api.ONE) context, network_id=network_id, device_id=device_id,
ports.append(port) tenant_id=context.tenant_id, scope=db_api.ONE)
elif port_ids: ports.append(port)
for port_id in port_ids: elif port_ids:
port = db_api.port_find(context, id=port_id, for port_id in port_ids:
tenant_id=context.tenant_id, port = db_api.port_find(context, id=port_id,
scope=db_api.ONE) tenant_id=context.tenant_id,
ports.append(port) scope=db_api.ONE)
ports.append(port)
if not ports: if not ports:
raise exceptions.PortNotFound(port_id=port_ids, raise exceptions.PortNotFound(port_id=port_ids,
net_id=network_id) net_id=network_id)
address = ipam_driver.allocate_ip_address( address = ipam_driver.allocate_ip_address(
context, context,
port['network_id'], port['network_id'],
port['id'], port['id'],
CONF.QUARK.ipam_reuse_after, CONF.QUARK.ipam_reuse_after,
ip_version, ip_version,
ip_address) ip_address)
for port in ports: for port in ports:
port["ip_addresses"].append(address) port["ip_addresses"].append(address)
return v._make_ip_dict(address) return v._make_ip_dict(address)
@@ -95,34 +96,35 @@ def update_ip_address(context, id, ip_address):
LOG.info("update_ip_address %s for tenant %s" % LOG.info("update_ip_address %s for tenant %s" %
(id, context.tenant_id)) (id, context.tenant_id))
address = db_api.ip_address_find( with context.session.begin():
context, id=id, tenant_id=context.tenant_id, scope=db_api.ONE) address = db_api.ip_address_find(
context, id=id, tenant_id=context.tenant_id, scope=db_api.ONE)
if not address: if not address:
raise exceptions.NotFound(
message="No IP address found with id=%s" % id)
old_ports = address['ports']
port_ids = ip_address['ip_address'].get('port_ids')
if port_ids is None:
return v._make_ip_dict(address)
for port in old_ports:
port['ip_addresses'].remove(address)
if port_ids:
ports = db_api.port_find(
context, tenant_id=context.tenant_id, id=port_ids,
scope=db_api.ALL)
# NOTE: could be considered inefficient because we're converting
# to a list to check length. Maybe revisit
if len(ports) != len(port_ids):
raise exceptions.NotFound( raise exceptions.NotFound(
message="No ports not found with ids=%s" % port_ids) message="No IP address found with id=%s" % id)
for port in ports:
port['ip_addresses'].extend([address]) old_ports = address['ports']
else: port_ids = ip_address['ip_address'].get('port_ids')
address["deallocated"] = 1 if port_ids is None:
return v._make_ip_dict(address)
for port in old_ports:
port['ip_addresses'].remove(address)
if port_ids:
ports = db_api.port_find(
context, tenant_id=context.tenant_id, id=port_ids,
scope=db_api.ALL)
# NOTE: could be considered inefficient because we're converting
# to a list to check length. Maybe revisit
if len(ports) != len(port_ids):
raise exceptions.NotFound(
message="No ports not found with ids=%s" % port_ids)
for port in ports:
port['ip_addresses'].extend([address])
else:
address["deallocated"] = 1
return v._make_ip_dict(address) return v._make_ip_dict(address)

View File

@@ -42,26 +42,27 @@ def create_ip_policy(context, ip_policy):
resource="ip_policy", resource="ip_policy",
msg="network_ids or subnet_ids not specified") msg="network_ids or subnet_ids not specified")
models = [] with context.session.begin():
if subnet_ids: models = []
subnets = db_api.subnet_find( if subnet_ids:
context, id=subnet_ids, scope=db_api.ALL) subnets = db_api.subnet_find(
if not subnets: context, id=subnet_ids, scope=db_api.ALL)
raise exceptions.SubnetNotFound(id=subnet_ids) if not subnets:
models.extend(subnets) raise exceptions.SubnetNotFound(id=subnet_ids)
models.extend(subnets)
if network_ids: if network_ids:
nets = db_api.network_find( nets = db_api.network_find(
context, id=network_ids, scope=db_api.ALL) context, id=network_ids, scope=db_api.ALL)
if not nets: if not nets:
raise exceptions.NetworkNotFound(net_id=network_ids) raise exceptions.NetworkNotFound(net_id=network_ids)
models.extend(nets) models.extend(nets)
for model in models: for model in models:
if model["ip_policy"]: if model["ip_policy"]:
raise quark_exceptions.IPPolicyAlreadyExists( raise quark_exceptions.IPPolicyAlreadyExists(
id=model["ip_policy"]["id"], n_id=model["id"]) id=model["ip_policy"]["id"], n_id=model["id"])
model["ip_policy"] = db_api.ip_policy_create(context, **ipp) model["ip_policy"] = db_api.ip_policy_create(context, **ipp)
return v._make_ip_policy_dict(model["ip_policy"]) return v._make_ip_policy_dict(model["ip_policy"])
@@ -85,46 +86,49 @@ def update_ip_policy(context, id, ip_policy):
ipp = ip_policy["ip_policy"] ipp = ip_policy["ip_policy"]
ipp_db = db_api.ip_policy_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not ipp_db: ipp_db = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
raise quark_exceptions.IPPolicyNotFound(id=id) if not ipp_db:
raise quark_exceptions.IPPolicyNotFound(id=id)
network_ids = ipp.get("network_ids") network_ids = ipp.get("network_ids")
subnet_ids = ipp.get("subnet_ids") subnet_ids = ipp.get("subnet_ids")
models = [] models = []
if subnet_ids: if subnet_ids:
for subnet in ipp_db["subnets"]: for subnet in ipp_db["subnets"]:
subnet["ip_policy"] = None subnet["ip_policy"] = None
subnets = db_api.subnet_find( subnets = db_api.subnet_find(
context, id=subnet_ids, scope=db_api.ALL) context, id=subnet_ids, scope=db_api.ALL)
if len(subnets) != len(subnet_ids): if len(subnets) != len(subnet_ids):
raise exceptions.SubnetNotFound(id=subnet_ids) raise exceptions.SubnetNotFound(id=subnet_ids)
models.extend(subnets) models.extend(subnets)
if network_ids: if network_ids:
for network in ipp_db["networks"]: for network in ipp_db["networks"]:
network["ip_policy"] = None network["ip_policy"] = None
nets = db_api.network_find(context, id=network_ids, scope=db_api.ALL) nets = db_api.network_find(context, id=network_ids,
if len(nets) != len(network_ids): scope=db_api.ALL)
raise exceptions.NetworkNotFound(net_id=network_ids) if len(nets) != len(network_ids):
models.extend(nets) raise exceptions.NetworkNotFound(net_id=network_ids)
models.extend(nets)
for model in models: for model in models:
if model["ip_policy"]: if model["ip_policy"]:
raise quark_exceptions.IPPolicyAlreadyExists( raise quark_exceptions.IPPolicyAlreadyExists(
id=model["ip_policy"]["id"], n_id=model["id"]) id=model["ip_policy"]["id"], n_id=model["id"])
model["ip_policy"] = ipp_db model["ip_policy"] = ipp_db
ipp_db = db_api.ip_policy_update(context, ipp_db, **ipp) ipp_db = db_api.ip_policy_update(context, ipp_db, **ipp)
return v._make_ip_policy_dict(ipp_db) return v._make_ip_policy_dict(ipp_db)
def delete_ip_policy(context, id): def delete_ip_policy(context, id):
LOG.info("delete_ip_policy %s for tenant %s" % (id, context.tenant_id)) LOG.info("delete_ip_policy %s for tenant %s" % (id, context.tenant_id))
ipp = db_api.ip_policy_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not ipp: ipp = db_api.ip_policy_find(context, id=id, scope=db_api.ONE)
raise quark_exceptions.IPPolicyNotFound(id=id) if not ipp:
if ipp["networks"] or ipp["subnets"]: raise quark_exceptions.IPPolicyNotFound(id=id)
raise quark_exceptions.IPPolicyInUse(id=id) if ipp["networks"] or ipp["subnets"]:
db_api.ip_policy_delete(context, ipp) raise quark_exceptions.IPPolicyInUse(id=id)
db_api.ip_policy_delete(context, ipp)

View File

@@ -82,9 +82,10 @@ def create_mac_address_range(context, mac_range):
LOG.info("create_mac_address_range for tenant %s" % context.tenant_id) LOG.info("create_mac_address_range for tenant %s" % context.tenant_id)
cidr = mac_range["mac_address_range"]["cidr"] cidr = mac_range["mac_address_range"]["cidr"]
cidr, first_address, last_address = _to_mac_range(cidr) cidr, first_address, last_address = _to_mac_range(cidr)
new_range = db_api.mac_address_range_create( with context.session.begin():
context, cidr=cidr, first_address=first_address, new_range = db_api.mac_address_range_create(
last_address=last_address, next_auto_assign_mac=first_address) context, cidr=cidr, first_address=first_address,
last_address=last_address, next_auto_assign_mac=first_address)
return v._make_mac_range_dict(new_range) return v._make_mac_range_dict(new_range)
@@ -103,8 +104,9 @@ def delete_mac_address_range(context, id):
""" """
LOG.info("delete_mac_address_range %s for tenant %s" % LOG.info("delete_mac_address_range %s for tenant %s" %
(id, context.tenant_id)) (id, context.tenant_id))
mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not mar: mar = db_api.mac_address_range_find(context, id=id, scope=db_api.ONE)
raise quark_exceptions.MacAddressRangeNotFound( if not mar:
mac_address_range_id=id) raise quark_exceptions.MacAddressRangeNotFound(
_delete_mac_address_range(context, mar) mac_address_range_id=id)
_delete_mac_address_range(context, mar)

View File

@@ -60,46 +60,47 @@ def create_network(context, network):
""" """
LOG.info("create_network for tenant %s" % context.tenant_id) LOG.info("create_network for tenant %s" % context.tenant_id)
# Generate a uuid that we're going to hand to the backend and db with context.session.begin():
net_uuid = uuidutils.generate_uuid() # Generate a uuid that we're going to hand to the backend and db
net_uuid = uuidutils.generate_uuid()
#TODO(mdietz) this will be the first component registry hook, but #TODO(mdietz) this will be the first component registry hook, but
# lets make it work first # lets make it work first
pnet_type, phys_net, seg_id = _adapt_provider_nets(context, network) pnet_type, phys_net, seg_id = _adapt_provider_nets(context, network)
net_attrs = network["network"] net_attrs = network["network"]
# NOTE(mdietz) I think ideally we would create the providernet # NOTE(mdietz) I think ideally we would create the providernet
# elsewhere as a separate driver step that could be # elsewhere as a separate driver step that could be
# kept in a plugin and completely removed if desired. We could # kept in a plugin and completely removed if desired. We could
# have a pre-callback/observer on the netdriver create_network # have a pre-callback/observer on the netdriver create_network
# that gathers any additional parameters from the network dict # that gathers any additional parameters from the network dict
#TODO(dietz or perkins): Allow this to be overridden later with CLI #TODO(dietz or perkins): Allow this to be overridden later with CLI
default_net_type = CONF.QUARK.default_network_type default_net_type = CONF.QUARK.default_network_type
net_driver = registry.DRIVER_REGISTRY.get_driver(default_net_type) net_driver = registry.DRIVER_REGISTRY.get_driver(default_net_type)
net_driver.create_network(context, net_attrs["name"], network_id=net_uuid, net_driver.create_network(context, net_attrs["name"],
phys_type=pnet_type, phys_net=phys_net, network_id=net_uuid, phys_type=pnet_type,
segment_id=seg_id) phys_net=phys_net, segment_id=seg_id)
subs = net_attrs.pop("subnets", []) subs = net_attrs.pop("subnets", [])
net_attrs["id"] = net_uuid net_attrs["id"] = net_uuid
net_attrs["tenant_id"] = context.tenant_id net_attrs["tenant_id"] = context.tenant_id
net_attrs["network_plugin"] = default_net_type net_attrs["network_plugin"] = default_net_type
new_net = db_api.network_create(context, **net_attrs) new_net = db_api.network_create(context, **net_attrs)
new_subnets = [] new_subnets = []
for sub in subs: for sub in subs:
sub["subnet"]["network_id"] = new_net["id"] sub["subnet"]["network_id"] = new_net["id"]
sub["subnet"]["tenant_id"] = context.tenant_id sub["subnet"]["tenant_id"] = context.tenant_id
s = db_api.subnet_create(context, **sub["subnet"]) s = db_api.subnet_create(context, **sub["subnet"])
new_subnets.append(s) new_subnets.append(s)
new_net["subnets"] = new_subnets new_net["subnets"] = new_subnets
#if not security_groups.get_security_groups( #if not security_groups.get_security_groups(
# context, # context,
# filters={"id": security_groups.DEFAULT_SG_UUID}): # filters={"id": security_groups.DEFAULT_SG_UUID}):
# security_groups._create_default_security_group(context) # security_groups._create_default_security_group(context)
return v._make_network_dict(new_net) return v._make_network_dict(new_net)
@@ -115,10 +116,11 @@ def update_network(context, id, network):
""" """
LOG.info("update_network %s for tenant %s" % LOG.info("update_network %s for tenant %s" %
(id, context.tenant_id)) (id, context.tenant_id))
net = db_api.network_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not net: net = db_api.network_find(context, id=id, scope=db_api.ONE)
raise exceptions.NetworkNotFound(net_id=id) if not net:
net = db_api.network_update(context, net, **network["network"]) raise exceptions.NetworkNotFound(net_id=id)
net = db_api.network_update(context, net, **network["network"])
return v._make_network_dict(net) return v._make_network_dict(net)
@@ -198,16 +200,17 @@ def delete_network(context, id):
: param id: UUID representing the network to delete. : param id: UUID representing the network to delete.
""" """
LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id)) LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id))
net = db_api.network_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not net: net = db_api.network_find(context, id=id, scope=db_api.ONE)
raise exceptions.NetworkNotFound(net_id=id) if not net:
if net.ports: raise exceptions.NetworkNotFound(net_id=id)
raise exceptions.NetworkInUse(net_id=id) if net.ports:
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"]) raise exceptions.NetworkInUse(net_id=id)
net_driver.delete_network(context, id) net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
for subnet in net["subnets"]: net_driver.delete_network(context, id)
subnets._delete_subnet(context, subnet) for subnet in net["subnets"]:
db_api.network_delete(context, net) subnets._delete_subnet(context, subnet)
db_api.network_delete(context, net)
def _diag_network(context, network, fields): def _diag_network(context, network, fields):

View File

@@ -52,61 +52,63 @@ def create_port(context, port):
net_id = port_attrs["network_id"] net_id = port_attrs["network_id"]
addresses = [] addresses = []
port_id = uuidutils.generate_uuid() with context.session.begin():
port_id = uuidutils.generate_uuid()
net = db_api.network_find(context, id=net_id, shared=True, net = db_api.network_find(context, id=net_id,
segment_id=segment_id, scope=db_api.ONE) segment_id=segment_id, scope=db_api.ONE)
if not net:
# Maybe it's a tenant network
net = db_api.network_find(context, id=net_id, scope=db_api.ONE)
if not net: if not net:
raise exceptions.NetworkNotFound(net_id=net_id) # Maybe it's a tenant network
net = db_api.network_find(context, id=net_id, scope=db_api.ONE)
if not net:
raise exceptions.NetworkNotFound(net_id=net_id)
quota.QUOTAS.limit_check( quota.QUOTAS.limit_check(
context, context.tenant_id, context, context.tenant_id,
ports_per_network=len(net.get('ports', [])) + 1) ports_per_network=len(net.get('ports', [])) + 1)
if fixed_ips: if fixed_ips:
for fixed_ip in fixed_ips: for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id") subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address") ip_address = fixed_ip.get("ip_address")
if not (subnet_id and ip_address): if not (subnet_id and ip_address):
raise exceptions.BadRequest( raise exceptions.BadRequest(
resource="fixed_ips", resource="fixed_ips",
msg="subnet_id and ip_address required") msg="subnet_id and ip_address required")
addresses.append(ipam_driver.allocate_ip_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after,
ip_address=ip_address))
else:
addresses.append(ipam_driver.allocate_ip_address( addresses.append(ipam_driver.allocate_ip_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after, context, net["id"], port_id, CONF.QUARK.ipam_reuse_after))
ip_address=ip_address))
else:
addresses.append(ipam_driver.allocate_ip_address(
context, net["id"], port_id, CONF.QUARK.ipam_reuse_after))
group_ids, security_groups = v.make_security_group_list( group_ids, security_groups = v.make_security_group_list(
context, port["port"].pop("security_groups", None)) context, port["port"].pop("security_groups", None))
mac = ipam_driver.allocate_mac_address(context, net["id"], port_id, mac = ipam_driver.allocate_mac_address(context, net["id"], port_id,
CONF.QUARK.ipam_reuse_after, CONF.QUARK.ipam_reuse_after,
mac_address=mac_address) mac_address=mac_address)
mac_address_string = str(netaddr.EUI(mac['address'], mac_address_string = str(netaddr.EUI(mac['address'],
dialect=netaddr.mac_unix)) dialect=netaddr.mac_unix))
address_pairs = [{'mac_address': mac_address_string, address_pairs = [{'mac_address': mac_address_string,
'ip_address': address.get('address_readable', '')} 'ip_address': address.get('address_readable', '')}
for address in addresses] for address in addresses]
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"]) net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
backend_port = net_driver.create_port(context, net["id"], port_id=port_id, backend_port = net_driver.create_port(context, net["id"],
security_groups=group_ids, port_id=port_id,
allowed_pairs=address_pairs) security_groups=group_ids,
allowed_pairs=address_pairs)
port_attrs["network_id"] = net["id"] port_attrs["network_id"] = net["id"]
port_attrs["id"] = port_id port_attrs["id"] = port_id
port_attrs["security_groups"] = security_groups port_attrs["security_groups"] = security_groups
LOG.info("Including extra plugin attrs: %s" % backend_port) LOG.info("Including extra plugin attrs: %s" % backend_port)
port_attrs.update(backend_port) port_attrs.update(backend_port)
new_port = db_api.port_create( new_port = db_api.port_create(
context, addresses=addresses, mac_address=mac["address"], context, addresses=addresses, mac_address=mac["address"],
backend_key=backend_port["uuid"], **port_attrs) backend_key=backend_port["uuid"], **port_attrs)
# Include any driver specific bits # Include any driver specific bits
return v._make_port_dict(new_port) return v._make_port_dict(new_port)
@@ -121,46 +123,47 @@ def update_port(context, id, port):
neutron/api/v2/attributes.py. neutron/api/v2/attributes.py.
""" """
LOG.info("update_port %s for tenant %s" % (id, context.tenant_id)) LOG.info("update_port %s for tenant %s" % (id, context.tenant_id))
port_db = db_api.port_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not port_db: port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
raise exceptions.PortNotFound(port_id=id) if not port_db:
raise exceptions.PortNotFound(port_id=id)
address_pairs = [] address_pairs = []
fixed_ips = port["port"].pop("fixed_ips", None) fixed_ips = port["port"].pop("fixed_ips", None)
if fixed_ips: if fixed_ips:
ipam_driver.deallocate_ip_address( ipam_driver.deallocate_ip_address(
context, port_db, ipam_reuse_after=CONF.QUARK.ipam_reuse_after) context, port_db, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
addresses = [] addresses = []
for fixed_ip in fixed_ips: for fixed_ip in fixed_ips:
subnet_id = fixed_ip.get("subnet_id") subnet_id = fixed_ip.get("subnet_id")
ip_address = fixed_ip.get("ip_address") ip_address = fixed_ip.get("ip_address")
if not (subnet_id and ip_address): if not (subnet_id and ip_address):
raise exceptions.BadRequest( raise exceptions.BadRequest(
resource="fixed_ips", resource="fixed_ips",
msg="subnet_id and ip_address required") msg="subnet_id and ip_address required")
# Note: we don't allow overlapping subnets, thus subnet_id is # Note: we don't allow overlapping subnets, thus subnet_id is
# ignored. # ignored.
addresses.append(ipam_driver.allocate_ip_address( addresses.append(ipam_driver.allocate_ip_address(
context, port_db["network_id"], id, context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after, ip_address=ip_address)) CONF.QUARK.ipam_reuse_after, ip_address=ip_address))
port["port"]["addresses"] = addresses port["port"]["addresses"] = addresses
mac_address_string = str(netaddr.EUI(port_db.mac_address, mac_address_string = str(netaddr.EUI(port_db.mac_address,
dialect=netaddr.mac_unix)) dialect=netaddr.mac_unix))
address_pairs = [{'mac_address': mac_address_string, address_pairs = [{'mac_address': mac_address_string,
'ip_address': 'ip_address':
address.get('address_readable', '')} address.get('address_readable', '')}
for address in addresses] for address in addresses]
group_ids, security_groups = v.make_security_group_list( group_ids, security_groups = v.make_security_group_list(
context, port["port"].pop("security_groups", None)) context, port["port"].pop("security_groups", None))
net_driver = registry.DRIVER_REGISTRY.get_driver( net_driver = registry.DRIVER_REGISTRY.get_driver(
port_db.network["network_plugin"]) port_db.network["network_plugin"])
net_driver.update_port(context, port_id=port_db.backend_key, net_driver.update_port(context, port_id=port_db.backend_key,
security_groups=group_ids, security_groups=group_ids,
allowed_pairs=address_pairs) allowed_pairs=address_pairs)
port["port"]["security_groups"] = security_groups port["port"]["security_groups"] = security_groups
port = db_api.port_update(context, port_db, **port["port"]) port = db_api.port_update(context, port_db, **port["port"])
return v._make_port_dict(port) return v._make_port_dict(port)
@@ -170,46 +173,48 @@ def post_update_port(context, id, port):
raise exceptions.BadRequest(resource="ports", raise exceptions.BadRequest(resource="ports",
msg="Port body required") msg="Port body required")
port_db = db_api.port_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not port_db: port_db = db_api.port_find(context, id=id, scope=db_api.ONE)
raise exceptions.PortNotFound(port_id=id, net_id="") if not port_db:
raise exceptions.PortNotFound(port_id=id, net_id="")
port = port["port"] port = port["port"]
if "fixed_ips" in port and port["fixed_ips"]: if "fixed_ips" in port and port["fixed_ips"]:
for ip in port["fixed_ips"]: for ip in port["fixed_ips"]:
address = None address = None
if ip: if ip:
if "ip_id" in ip: if "ip_id" in ip:
ip_id = ip["ip_id"] ip_id = ip["ip_id"]
address = db_api.ip_address_find( address = db_api.ip_address_find(
context, id=ip_id, tenant_id=context.tenant_id, context, id=ip_id, tenant_id=context.tenant_id,
scope=db_api.ONE) scope=db_api.ONE)
elif "ip_address" in ip: elif "ip_address" in ip:
ip_address = ip["ip_address"] ip_address = ip["ip_address"]
net_address = netaddr.IPAddress(ip_address) net_address = netaddr.IPAddress(ip_address)
address = db_api.ip_address_find( address = db_api.ip_address_find(
context, ip_address=net_address, context, ip_address=net_address,
network_id=port_db["network_id"], network_id=port_db["network_id"],
tenant_id=context.tenant_id, scope=db_api.ONE) tenant_id=context.tenant_id, scope=db_api.ONE)
if not address: if not address:
address = ipam_driver.allocate_ip_address( address = ipam_driver.allocate_ip_address(
context, port_db["network_id"], id, context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after, ip_address=ip_address) CONF.QUARK.ipam_reuse_after,
else: ip_address=ip_address)
address = ipam_driver.allocate_ip_address( else:
context, port_db["network_id"], id, address = ipam_driver.allocate_ip_address(
CONF.QUARK.ipam_reuse_after) context, port_db["network_id"], id,
CONF.QUARK.ipam_reuse_after)
address["deallocated"] = 0 address["deallocated"] = 0
already_contained = False already_contained = False
for port_address in port_db["ip_addresses"]: for port_address in port_db["ip_addresses"]:
if address["id"] == port_address["id"]: if address["id"] == port_address["id"]:
already_contained = True already_contained = True
break break
if not already_contained: if not already_contained:
port_db["ip_addresses"].append(address) port_db["ip_addresses"].append(address)
return v._make_port_dict(port_db) return v._make_port_dict(port_db)
@@ -296,15 +301,16 @@ def delete_port(context, id):
if not port: if not port:
raise exceptions.PortNotFound(net_id=id) raise exceptions.PortNotFound(net_id=id)
backend_key = port["backend_key"] with context.session.begin():
mac_address = netaddr.EUI(port["mac_address"]).value backend_key = port["backend_key"]
ipam_driver.deallocate_mac_address(context, mac_address) mac_address = netaddr.EUI(port["mac_address"]).value
ipam_driver.deallocate_ip_address( ipam_driver.deallocate_mac_address(context, mac_address)
context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after) ipam_driver.deallocate_ip_address(
db_api.port_delete(context, port) context, port, ipam_reuse_after=CONF.QUARK.ipam_reuse_after)
net_driver = registry.DRIVER_REGISTRY.get_driver( db_api.port_delete(context, port)
port.network["network_plugin"]) net_driver = registry.DRIVER_REGISTRY.get_driver(
net_driver.delete_port(context, backend_key) port.network["network_plugin"])
net_driver.delete_port(context, backend_key)
def disassociate_port(context, id, ip_address_id): def disassociate_port(context, id, ip_address_id):
@@ -317,19 +323,20 @@ def disassociate_port(context, id, ip_address_id):
""" """
LOG.info("disassociate_port %s for tenant %s ip_address_id %s" % LOG.info("disassociate_port %s for tenant %s ip_address_id %s" %
(id, context.tenant_id, ip_address_id)) (id, context.tenant_id, ip_address_id))
port = db_api.port_find(context, id=id, ip_address_id=[ip_address_id], with context.session.begin():
scope=db_api.ONE) port = db_api.port_find(context, id=id, ip_address_id=[ip_address_id],
scope=db_api.ONE)
if not port: if not port:
raise exceptions.PortNotFound(port_id=id, net_id='') raise exceptions.PortNotFound(port_id=id, net_id='')
the_address = [address for address in port["ip_addresses"] the_address = [address for address in port["ip_addresses"]
if address["id"] == ip_address_id][0] if address["id"] == ip_address_id][0]
port["ip_addresses"] = [address for address in port["ip_addresses"] port["ip_addresses"] = [address for address in port["ip_addresses"]
if address.id != ip_address_id] if address.id != ip_address_id]
if len(the_address["ports"]) == 0: if len(the_address["ports"]) == 0:
the_address["deallocated"] = 1 the_address["deallocated"] = 1
return v._make_port_dict(port) return v._make_port_dict(port)

View File

@@ -49,23 +49,24 @@ def create_route(context, route):
LOG.info("create_route for tenant %s" % context.tenant_id) LOG.info("create_route for tenant %s" % context.tenant_id)
route = route["route"] route = route["route"]
subnet_id = route["subnet_id"] subnet_id = route["subnet_id"]
subnet = db_api.subnet_find(context, id=subnet_id, scope=db_api.ONE) with context.session.begin():
if not subnet: subnet = db_api.subnet_find(context, id=subnet_id, scope=db_api.ONE)
raise exceptions.SubnetNotFound(subnet_id=subnet_id) if not subnet:
raise exceptions.SubnetNotFound(subnet_id=subnet_id)
# TODO(anyone): May want to denormalize the cidr values into columns # TODO(anyone): May want to denormalize the cidr values into columns
# to achieve single db lookup on conflict check # to achieve single db lookup on conflict check
route_cidr = netaddr.IPNetwork(route["cidr"]) route_cidr = netaddr.IPNetwork(route["cidr"])
subnet_routes = db_api.route_find(context, subnet_id=subnet_id, subnet_routes = db_api.route_find(context, subnet_id=subnet_id,
scope=db_api.ALL) scope=db_api.ALL)
for sub_route in subnet_routes: for sub_route in subnet_routes:
sub_route_cidr = netaddr.IPNetwork(sub_route["cidr"]) sub_route_cidr = netaddr.IPNetwork(sub_route["cidr"])
if sub_route_cidr.value == DEFAULT_ROUTE.value: if sub_route_cidr.value == DEFAULT_ROUTE.value:
continue continue
if route_cidr in sub_route_cidr or sub_route_cidr in route_cidr: if route_cidr in sub_route_cidr or sub_route_cidr in route_cidr:
raise quark_exceptions.RouteConflict( raise quark_exceptions.RouteConflict(
route_id=sub_route["id"], cidr=str(route_cidr)) route_id=sub_route["id"], cidr=str(route_cidr))
new_route = db_api.route_create(context, **route) new_route = db_api.route_create(context, **route)
return v._make_route_dict(new_route) return v._make_route_dict(new_route)
@@ -74,7 +75,8 @@ def delete_route(context, id):
# admin and only filter on tenant if they aren't. Correct # admin and only filter on tenant if they aren't. Correct
# for all the above later # for all the above later
LOG.info("delete_route %s for tenant %s" % (id, context.tenant_id)) LOG.info("delete_route %s for tenant %s" % (id, context.tenant_id))
route = db_api.route_find(context, id, scope=db_api.ONE) with context.session.begin():
if not route: route = db_api.route_find(context, id, scope=db_api.ONE)
raise quark_exceptions.RouteNotFound(route_id=id) if not route:
db_api.route_delete(context, route) raise quark_exceptions.RouteNotFound(route_id=id)
db_api.route_delete(context, route)

View File

@@ -76,16 +76,17 @@ def create_security_group(context, security_group, net_driver):
raise sg_ext.SecurityGroupDefaultAlreadyExists() raise sg_ext.SecurityGroupDefaultAlreadyExists()
group_id = uuidutils.generate_uuid() group_id = uuidutils.generate_uuid()
net_driver.create_security_group( with context.session.begin():
context, net_driver.create_security_group(
group_name, context,
group_id=group_id, group_name,
**group) group_id=group_id,
**group)
group["id"] = group_id group["id"] = group_id
group["name"] = group_name group["name"] = group_name
group["tenant_id"] = context.tenant_id group["tenant_id"] = context.tenant_id
dbgroup = db_api.security_group_create(context, **group) dbgroup = db_api.security_group_create(context, **group)
return v._make_security_group_dict(dbgroup) return v._make_security_group_dict(dbgroup)
@@ -121,21 +122,23 @@ def _create_default_security_group(context, net_driver):
def create_security_group_rule(context, security_group_rule, net_driver): def create_security_group_rule(context, security_group_rule, net_driver):
LOG.info("create_security_group for tenant %s" % LOG.info("create_security_group for tenant %s" %
(context.tenant_id)) (context.tenant_id))
rule = _validate_security_group_rule(
context, security_group_rule["security_group_rule"])
rule["id"] = uuidutils.generate_uuid()
group_id = rule["security_group_id"] with context.session.begin():
group = db_api.security_group_find(context, id=group_id, rule = _validate_security_group_rule(
scope=db_api.ONE) context, security_group_rule["security_group_rule"])
if not group: rule["id"] = uuidutils.generate_uuid()
raise sg_ext.SecurityGroupNotFound(group_id=group_id)
quota.QUOTAS.limit_check( group_id = rule["security_group_id"]
context, context.tenant_id, group = db_api.security_group_find(context, id=group_id,
security_rules_per_group=len(group.get("rules", [])) + 1) scope=db_api.ONE)
if not group:
raise sg_ext.SecurityGroupNotFound(group_id=group_id)
net_driver.create_security_group_rule(context, group_id, rule) quota.QUOTAS.limit_check(
context, context.tenant_id,
security_rules_per_group=len(group.get("rules", [])) + 1)
net_driver.create_security_group_rule(context, group_id, rule)
return v._make_security_group_rule_dict( return v._make_security_group_rule_dict(
db_api.security_group_rule_create(context, **rule)) db_api.security_group_rule_create(context, **rule))
@@ -145,37 +148,39 @@ def delete_security_group(context, id, net_driver):
LOG.info("delete_security_group %s for tenant %s" % LOG.info("delete_security_group %s for tenant %s" %
(id, context.tenant_id)) (id, context.tenant_id))
group = db_api.security_group_find(context, id=id, scope=db_api.ONE) with context.session.begin():
group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
#TODO(anyone): name and ports are lazy-loaded. Could be good op later #TODO(anyone): name and ports are lazy-loaded. Could be good op later
if not group: if not group:
raise sg_ext.SecurityGroupNotFound(group_id=id) raise sg_ext.SecurityGroupNotFound(group_id=id)
if id == DEFAULT_SG_UUID or group.name == "default": if id == DEFAULT_SG_UUID or group.name == "default":
raise sg_ext.SecurityGroupCannotRemoveDefault() raise sg_ext.SecurityGroupCannotRemoveDefault()
if group.ports: if group.ports:
raise sg_ext.SecurityGroupInUse(id=id) raise sg_ext.SecurityGroupInUse(id=id)
net_driver.delete_security_group(context, id) net_driver.delete_security_group(context, id)
db_api.security_group_delete(context, group) db_api.security_group_delete(context, group)
def delete_security_group_rule(context, id, net_driver): def delete_security_group_rule(context, id, net_driver):
LOG.info("delete_security_group %s for tenant %s" % LOG.info("delete_security_group %s for tenant %s" %
(id, context.tenant_id)) (id, context.tenant_id))
rule = db_api.security_group_rule_find(context, id=id, with context.session.begin():
rule = db_api.security_group_rule_find(context, id=id,
scope=db_api.ONE)
if not rule:
raise sg_ext.SecurityGroupRuleNotFound(group_id=id)
group = db_api.security_group_find(context, id=rule["group_id"],
scope=db_api.ONE) scope=db_api.ONE)
if not rule: if not group:
raise sg_ext.SecurityGroupRuleNotFound(group_id=id) raise sg_ext.SecurityGroupNotFound(id=id)
group = db_api.security_group_find(context, id=rule["group_id"], net_driver.delete_security_group_rule(
scope=db_api.ONE) context, group.id, v._make_security_group_rule_dict(rule))
if not group:
raise sg_ext.SecurityGroupNotFound(id=id)
net_driver.delete_security_group_rule( rule["id"] = id
context, group.id, v._make_security_group_rule_dict(rule)) db_api.security_group_rule_delete(context, rule)
rule["id"] = id
db_api.security_group_rule_delete(context, rule)
def get_security_group(context, id, fields=None): def get_security_group(context, id, fields=None):
@@ -219,8 +224,9 @@ def update_security_group(context, id, security_group, net_driver):
if id == DEFAULT_SG_UUID: if id == DEFAULT_SG_UUID:
raise sg_ext.SecurityGroupCannotUpdateDefault() raise sg_ext.SecurityGroupCannotUpdateDefault()
new_group = security_group["security_group"] new_group = security_group["security_group"]
group = db_api.security_group_find(context, id=id, scope=db_api.ONE) with context.session.begin():
net_driver.update_security_group(context, id, **new_group) group = db_api.security_group_find(context, id=id, scope=db_api.ONE)
net_driver.update_security_group(context, id, **new_group)
db_group = db_api.security_group_update(context, group, **new_group) db_group = db_api.security_group_update(context, group, **new_group)
return v._make_security_group_dict(db_group) return v._make_security_group_dict(db_group)

View File

@@ -85,53 +85,54 @@ def create_subnet(context, subnet):
LOG.info("create_subnet for tenant %s" % context.tenant_id) LOG.info("create_subnet for tenant %s" % context.tenant_id)
net_id = subnet["subnet"]["network_id"] net_id = subnet["subnet"]["network_id"]
net = db_api.network_find(context, id=net_id, scope=db_api.ONE) with context.session.begin():
if not net: net = db_api.network_find(context, id=net_id, scope=db_api.ONE)
raise exceptions.NetworkNotFound(net_id=net_id) if not net:
raise exceptions.NetworkNotFound(net_id=net_id)
sub_attrs = subnet["subnet"] sub_attrs = subnet["subnet"]
_validate_subnet_cidr(context, net_id, sub_attrs["cidr"]) _validate_subnet_cidr(context, net_id, sub_attrs["cidr"])
cidr = netaddr.IPNetwork(sub_attrs["cidr"]) cidr = netaddr.IPNetwork(sub_attrs["cidr"])
gateway_ip = utils.pop_param(sub_attrs, "gateway_ip", str(cidr[1])) gateway_ip = utils.pop_param(sub_attrs, "gateway_ip", str(cidr[1]))
dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", []) dns_ips = utils.pop_param(sub_attrs, "dns_nameservers", [])
host_routes = utils.pop_param(sub_attrs, "host_routes", []) host_routes = utils.pop_param(sub_attrs, "host_routes", [])
allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None) allocation_pools = utils.pop_param(sub_attrs, "allocation_pools", None)
sub_attrs["network"] = net sub_attrs["network"] = net
new_subnet = db_api.subnet_create(context, **sub_attrs) new_subnet = db_api.subnet_create(context, **sub_attrs)
default_route = None default_route = None
for route in host_routes: for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"]) netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value: if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route default_route = route
gateway_ip = default_route["nexthop"] gateway_ip = default_route["nexthop"]
new_subnet["routes"].append(db_api.route_create( new_subnet["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"])) context, cidr=route["destination"], gateway=route["nexthop"]))
if default_route is None: if default_route is None:
new_subnet["routes"].append(db_api.route_create( new_subnet["routes"].append(db_api.route_create(
context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip)) context, cidr=str(routes.DEFAULT_ROUTE), gateway=gateway_ip))
for dns_ip in dns_ips: for dns_ip in dns_ips:
new_subnet["dns_nameservers"].append(db_api.dns_create( new_subnet["dns_nameservers"].append(db_api.dns_create(
context, ip=netaddr.IPAddress(dns_ip))) context, ip=netaddr.IPAddress(dns_ip)))
if isinstance(allocation_pools, list): if isinstance(allocation_pools, list):
ranges = [] ranges = []
cidrset = netaddr.IPSet([netaddr.IPNetwork(new_subnet["cidr"])]) cidrset = netaddr.IPSet([netaddr.IPNetwork(new_subnet["cidr"])])
for p in allocation_pools: for p in allocation_pools:
cidrset -= netaddr.IPSet(netaddr.IPRange(p["start"], p["end"])) cidrset -= netaddr.IPSet(netaddr.IPRange(p["start"], p["end"]))
non_allocation_pools = v._pools_from_cidr(cidrset) non_allocation_pools = v._pools_from_cidr(cidrset)
for p in non_allocation_pools: for p in non_allocation_pools:
r = netaddr.IPRange(p["start"], p["end"]) r = netaddr.IPRange(p["start"], p["end"])
ranges.append(dict( ranges.append(dict(
length=len(r), length=len(r),
offset=int(r[0]) - int(cidr[0]))) offset=int(r[0]) - int(cidr[0])))
new_subnet["ip_policy"] = db_api.ip_policy_create(context, new_subnet["ip_policy"] = db_api.ip_policy_create(context,
exclude=ranges) exclude=ranges)
subnet_dict = v._make_subnet_dict(new_subnet, subnet_dict = v._make_subnet_dict(new_subnet,
default_route=routes.DEFAULT_ROUTE) default_route=routes.DEFAULT_ROUTE)
@@ -161,49 +162,50 @@ def update_subnet(context, id, subnet):
LOG.info("update_subnet %s for tenant %s" % LOG.info("update_subnet %s for tenant %s" %
(id, context.tenant_id)) (id, context.tenant_id))
subnet_db = db_api.subnet_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not subnet_db: subnet_db = db_api.subnet_find(context, id=id, scope=db_api.ONE)
raise exceptions.SubnetNotFound(id=id) if not subnet_db:
raise exceptions.SubnetNotFound(id=id)
s = subnet["subnet"] s = subnet["subnet"]
dns_ips = s.pop("dns_nameservers", []) dns_ips = s.pop("dns_nameservers", [])
host_routes = s.pop("host_routes", []) host_routes = s.pop("host_routes", [])
gateway_ip = s.pop("gateway_ip", None) gateway_ip = s.pop("gateway_ip", None)
if gateway_ip: if gateway_ip:
default_route = None default_route = None
for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"])
if netaddr_route.value == routes.DEFAULT_ROUTE.value:
default_route = route
break
if default_route is None:
route_model = db_api.route_find(
context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id,
scope=db_api.ONE)
if route_model:
db_api.route_update(context, route_model,
gateway=gateway_ip)
else:
db_api.route_create(context,
cidr=str(routes.DEFAULT_ROUTE),
gateway=gateway_ip, subnet_id=id)
if dns_ips:
subnet_db["dns_nameservers"] = []
for dns_ip in dns_ips:
subnet_db["dns_nameservers"].append(db_api.dns_create(
context,
ip=netaddr.IPAddress(dns_ip)))
if host_routes:
subnet_db["routes"] = []
for route in host_routes: for route in host_routes:
netaddr_route = netaddr.IPNetwork(route["destination"]) subnet_db["routes"].append(db_api.route_create(
if netaddr_route.value == routes.DEFAULT_ROUTE.value: context, cidr=route["destination"], gateway=route["nexthop"]))
default_route = route
break
if default_route is None:
route_model = db_api.route_find(
context, cidr=str(routes.DEFAULT_ROUTE), subnet_id=id,
scope=db_api.ONE)
if route_model:
db_api.route_update(context, route_model,
gateway=gateway_ip)
else:
db_api.route_create(context,
cidr=str(routes.DEFAULT_ROUTE),
gateway=gateway_ip, subnet_id=id)
if dns_ips: subnet = db_api.subnet_update(context, subnet_db, **s)
subnet_db["dns_nameservers"] = []
for dns_ip in dns_ips:
subnet_db["dns_nameservers"].append(db_api.dns_create(
context,
ip=netaddr.IPAddress(dns_ip)))
if host_routes:
subnet_db["routes"] = []
for route in host_routes:
subnet_db["routes"].append(db_api.route_create(
context, cidr=route["destination"], gateway=route["nexthop"]))
subnet = db_api.subnet_update(context, subnet_db, **s)
return v._make_subnet_dict(subnet, default_route=routes.DEFAULT_ROUTE) return v._make_subnet_dict(subnet, default_route=routes.DEFAULT_ROUTE)
@@ -292,22 +294,23 @@ def delete_subnet(context, id):
: param id: UUID representing the subnet to delete. : param id: UUID representing the subnet to delete.
""" """
LOG.info("delete_subnet %s for tenant %s" % (id, context.tenant_id)) LOG.info("delete_subnet %s for tenant %s" % (id, context.tenant_id))
subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE) with context.session.begin():
if not subnet: subnet = db_api.subnet_find(context, id=id, scope=db_api.ONE)
raise exceptions.SubnetNotFound(subnet_id=id) if not subnet:
raise exceptions.SubnetNotFound(subnet_id=id)
payload = dict(tenant_id=subnet["tenant_id"], payload = dict(tenant_id=subnet["tenant_id"],
ip_block_id=subnet["id"], ip_block_id=subnet["id"],
created_at=subnet["created_at"], created_at=subnet["created_at"],
deleted_at=timeutils.utcnow()) deleted_at=timeutils.utcnow())
_delete_subnet(context, subnet) _delete_subnet(context, subnet)
notifier_api.notify(context, notifier_api.notify(context,
notifier_api.publisher_id("network"), notifier_api.publisher_id("network"),
"ip_block.delete", "ip_block.delete",
notifier_api.CONF.default_notification_level, notifier_api.CONF.default_notification_level,
payload) payload)
def diagnose_subnet(context, id, fields): def diagnose_subnet(context, id, fields):

View File

@@ -87,6 +87,7 @@ def _make_subnet_dict(subnet, default_route=None, fields=None):
"allocation_pools": _allocation_pools(subnet), "allocation_pools": _allocation_pools(subnet),
"dns_nameservers": dns_nameservers or [], "dns_nameservers": dns_nameservers or [],
"cidr": subnet.get("cidr"), "cidr": subnet.get("cidr"),
"shared": STRATEGY.is_parent_network(net_id),
"enable_dhcp": None} "enable_dhcp": None}
def _host_route(route): def _host_route(route):

View File

@@ -15,6 +15,7 @@
import contextlib import contextlib
import copy import copy
import time
import uuid import uuid
import mock import mock
@@ -697,7 +698,17 @@ class TestQuarkDeleteSubnet(test_quark_plugin.TestQuarkPlugin):
class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin): class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager @contextlib.contextmanager
def _stubs(self, s, deleted_at=None): def _stubs(self, s, deleted_at=None):
class FakeContext(object):
def __enter__(*args, **kwargs):
pass
def __exit__(*args, **kwargs):
pass
self.context.session.begin = FakeContext
s["network"] = models.Network() s["network"] = models.Network()
s["network"]["created_at"] = s["created_at"]
subnet = models.Subnet(**s) subnet = models.Subnet(**s)
db_mod = "quark.db.api" db_mod = "quark.db.api"
api_mod = "neutron.openstack.common.notifier.api" api_mod = "neutron.openstack.common.notifier.api"
@@ -706,13 +717,15 @@ class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
mock.patch("%s.subnet_find" % db_mod), mock.patch("%s.subnet_find" % db_mod),
mock.patch("%s.network_find" % db_mod), mock.patch("%s.network_find" % db_mod),
mock.patch("%s.subnet_create" % db_mod), mock.patch("%s.subnet_create" % db_mod),
mock.patch("%s.ip_policy_create" % db_mod),
mock.patch("%s.subnet_delete" % db_mod), mock.patch("%s.subnet_delete" % db_mod),
mock.patch("%s.notify" % api_mod), mock.patch("%s.notify" % api_mod),
mock.patch("%s.utcnow" % time_mod) mock.patch("%s.utcnow" % time_mod)
) as (sub_find, net_find, sub_create, sub_del, notify, time): ) as (sub_find, net_find, sub_create, pol_cre, sub_del, notify,
time_func):
sub_create.return_value = subnet sub_create.return_value = subnet
sub_find.return_value = subnet sub_find.return_value = subnet
time.return_value = deleted_at time_func.return_value = deleted_at
yield notify yield notify
def test_create_subnet_notification(self): def test_create_subnet_notification(self):
@@ -730,8 +743,10 @@ class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
created_at=s["created_at"])) created_at=s["created_at"]))
def test_delete_subnet_notification(self): def test_delete_subnet_notification(self):
s = dict(tenant_id=1, id=1, created_at="123") now = time.strftime('%Y-%m-%d %H:%M:%S')
with self._stubs(s, deleted_at="456") as notify: later = time.strftime('%Y-%m-%d %H:%M:%S')
s = dict(tenant_id=1, id=1, created_at=now)
with self._stubs(s, deleted_at=later) as notify:
self.plugin.delete_subnet(self.context, 1) self.plugin.delete_subnet(self.context, 1)
notify.assert_called_once_with( notify.assert_called_once_with(
self.context, self.context,
@@ -741,7 +756,7 @@ class TestSubnetsNotification(test_quark_plugin.TestQuarkPlugin):
dict(tenant_id=s["tenant_id"], dict(tenant_id=s["tenant_id"],
created_at=s["created_at"], created_at=s["created_at"],
ip_block_id=s["id"], ip_block_id=s["id"],
deleted_at="456")) deleted_at=later))
class TestQuarkDiagnoseSubnets(test_quark_plugin.TestQuarkPlugin): class TestQuarkDiagnoseSubnets(test_quark_plugin.TestQuarkPlugin):

View File

@@ -24,3 +24,15 @@ class TestBase(unittest2.TestCase):
def setUp(self): def setUp(self):
super(TestBase, self).setUp() super(TestBase, self).setUp()
self.context = context.Context('fake', 'fake', is_admin=False) self.context = context.Context('fake', 'fake', is_admin=False)
class FakeContext(object):
def __new__(cls, *args, **kwargs):
return super(FakeContext, cls).__new__(cls)
def __enter__(*args, **kwargs):
pass
def __exit__(*args, **kwargs):
pass
self.context.session.begin = FakeContext

View File

@@ -202,6 +202,7 @@ class QuarkNewIPAddressAllocation(QuarkIpamBaseTest):
if not addresses: if not addresses:
addresses = [None] addresses = [None]
db_mod = "quark.db.api" db_mod = "quark.db.api"
self.context.session.add = mock.Mock()
with contextlib.nested( with contextlib.nested(
mock.patch("%s.ip_address_find" % db_mod), mock.patch("%s.ip_address_find" % db_mod),
mock.patch("%s.subnet_find_allocation_counts" % db_mod) mock.patch("%s.subnet_find_allocation_counts" % db_mod)
@@ -310,6 +311,7 @@ class QuarkIPAddressAllocateDeallocated(QuarkIpamBaseTest):
@contextlib.contextmanager @contextlib.contextmanager
def _stubs(self, ip_find, subnet, address, addresses_found): def _stubs(self, ip_find, subnet, address, addresses_found):
db_mod = "quark.db.api" db_mod = "quark.db.api"
self.context.session.add = mock.Mock()
with contextlib.nested( with contextlib.nested(
mock.patch("%s.ip_address_find" % db_mod), mock.patch("%s.ip_address_find" % db_mod),
mock.patch("%s.ip_address_update" % db_mod), mock.patch("%s.ip_address_update" % db_mod),
@@ -360,13 +362,17 @@ class QuarkIPAddressAllocateDeallocated(QuarkIpamBaseTest):
This edge case occurs because users are allowed to select a specific IP This edge case occurs because users are allowed to select a specific IP
address to create. address to create.
""" """
network_mod = models.Network()
network_mod.update(dict(ip_policy=None))
subnet = dict(id=1, ip_version=4, next_auto_assign_ip=0, subnet = dict(id=1, ip_version=4, next_auto_assign_ip=0,
cidr="0.0.0.0/24", first_ip=0, last_ip=255, cidr="0.0.0.0/24", first_ip=0, last_ip=255,
network=dict(ip_policy=None), ip_policy=None) network=network_mod, ip_policy=None)
address0 = dict(id=1, address=0) address0 = dict(id=1, address=0)
addresses_found = [None, None] addresses_found = [None, None]
subnet_mod = models.Subnet()
subnet_mod.update(subnet)
with self._stubs( with self._stubs(
False, subnet, address0, addresses_found False, subnet_mod, address0, addresses_found
) as (choose_subnet): ) as (choose_subnet):
ipaddress = self.ipam.allocate_ip_address(self.context, 0, 0, 0) ipaddress = self.ipam.allocate_ip_address(self.context, 0, 0, 0)
self.assertEqual(ipaddress["address"], 2) self.assertEqual(ipaddress["address"], 2)
@@ -380,6 +386,7 @@ class TestQuarkIpPoliciesIpAllocation(QuarkIpamBaseTest):
if not addresses: if not addresses:
addresses = [None] addresses = [None]
db_mod = "quark.db.api" db_mod = "quark.db.api"
self.context.session.add = mock.Mock()
with contextlib.nested( with contextlib.nested(
mock.patch("%s.ip_address_find" % db_mod), mock.patch("%s.ip_address_find" % db_mod),
mock.patch("%s.subnet_find_allocation_counts" % db_mod) mock.patch("%s.subnet_find_allocation_counts" % db_mod)
@@ -491,6 +498,7 @@ class QuarkIPAddressAllocationNotifications(QuarkIpamBaseTest):
db_mod = "quark.db.api" db_mod = "quark.db.api"
api_mod = "neutron.openstack.common.notifier.api" api_mod = "neutron.openstack.common.notifier.api"
time_mod = "neutron.openstack.common.timeutils" time_mod = "neutron.openstack.common.timeutils"
self.context.session.add = mock.Mock()
with contextlib.nested( with contextlib.nested(
mock.patch("%s.ip_address_find" % db_mod), mock.patch("%s.ip_address_find" % db_mod),
mock.patch("%s.ip_address_create" % db_mod), mock.patch("%s.ip_address_create" % db_mod),