From 4d97dba20b821a97a3b0a10ed25f4fd0cb38f5fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C5=82awek=20Kap=C5=82o=C5=84ski?= Date: Tue, 14 Nov 2017 21:23:26 +0000 Subject: [PATCH] Revert "Revert "objects: get, update and delete converted to Subnet OVO usage"" This reverts commit 1470baf02beb6e9f732129edc4c8979881ddea8f. As Neutron uses own objects registry this commit should be safe to merge again and it shoudn't break projects like midonet. Change-Id: I51833eebb4b24207f395e1d4095b1809f191a480 --- neutron/db/db_base_plugin_common.py | 93 ++++++++++++------- neutron/db/db_base_plugin_v2.py | 87 ++++++++--------- neutron/db/ipam_backend_mixin.py | 2 +- neutron/objects/subnet.py | 4 +- neutron/plugins/ml2/plugin.py | 4 +- .../tests/unit/db/test_db_base_plugin_v2.py | 6 ++ .../unit/db/test_ipam_pluggable_backend.py | 10 +- neutron/tests/unit/objects/test_objects.py | 2 +- .../unit/plugins/ml2/drivers/ext_test.py | 21 ++++- 9 files changed, 139 insertions(+), 90 deletions(-) diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py index 9b3a1affc0d..0620596fbd7 100644 --- a/neutron/db/db_base_plugin_common.py +++ b/neutron/db/db_base_plugin_common.py @@ -35,6 +35,7 @@ from neutron.db import _utils as db_utils from neutron.db import api as db_api from neutron.db import common_db_mixin from neutron.db import models_v2 +from neutron.objects import base as base_obj from neutron.objects import ports as port_obj from neutron.objects import subnet as subnet_obj from neutron.objects import subnetpool as subnetpool_obj @@ -132,25 +133,50 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): 'tenant_id': subnet['tenant_id'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], - 'cidr': subnet['cidr'], - 'subnetpool_id': subnet.get('subnetpool_id'), - 'allocation_pools': [{'start': pool['first_ip'], - 'end': pool['last_ip']} - for pool in subnet['allocation_pools']], - 'gateway_ip': subnet['gateway_ip'], + 'subnetpool_id': subnet['subnetpool_id'], 'enable_dhcp': subnet['enable_dhcp'], 'ipv6_ra_mode': subnet['ipv6_ra_mode'], 'ipv6_address_mode': subnet['ipv6_address_mode'], - 'dns_nameservers': [dns['address'] - for dns in subnet['dns_nameservers']], - 'host_routes': [{'destination': route['destination'], - 'nexthop': route['nexthop']} - for route in subnet['routes']], } - # The shared attribute for a subnet is the same as its parent network - res['shared'] = self._is_network_shared(context, subnet.rbac_entries) - # Call auxiliary extend functions, if any - resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, res, subnet) + res['gateway_ip'] = str( + subnet['gateway_ip']) if subnet['gateway_ip'] else None + # TODO(korzen) this method can get subnet as DB object or Subnet OVO, + # so temporary workaround will be to fill in the fields in separate + # ways. After converting all code pieces to use Subnet OVO, the latter + # 'else' can be deleted + if isinstance(subnet, subnet_obj.Subnet): + res['cidr'] = str(subnet.cidr) + res['allocation_pools'] = [{'start': str(pool.start), + 'end': str(pool.end)} + for pool in subnet.allocation_pools] + res['host_routes'] = [{'destination': str(route.destination), + 'nexthop': str(route.nexthop)} + for route in subnet.host_routes] + res['dns_nameservers'] = [str(dns.address) + for dns in subnet.dns_nameservers] + res['shared'] = subnet.shared + # Call auxiliary extend functions, if any + resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, + res, subnet.db_obj) + else: + res['cidr'] = subnet['cidr'] + res['allocation_pools'] = [{'start': pool['first_ip'], + 'end': pool['last_ip']} + for pool in subnet['allocation_pools']] + res['host_routes'] = [{'destination': route['destination'], + 'nexthop': route['nexthop']} + for route in subnet['routes']] + res['dns_nameservers'] = [dns['address'] + for dns in subnet['dns_nameservers']] + + # The shared attribute for a subnet is the same + # as its parent network + res['shared'] = self._is_network_shared(context, + subnet.rbac_entries) + # Call auxiliary extend functions, if any + resource_extend.apply_funcs(subnet_def.COLLECTION_NAME, + res, subnet) + return db_utils.resource_fields(res, fields) def _make_subnetpool_dict(self, subnetpool, fields=None): @@ -200,12 +226,20 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): return network def _get_subnet(self, context, id): + # TODO(slaweq): remove this method when all will be switched to use OVO + # objects only try: subnet = model_query.get_by_id(context, models_v2.Subnet, id) except exc.NoResultFound: raise n_exc.SubnetNotFound(subnet_id=id) return subnet + def _get_subnet_object(self, context, id): + subnet = subnet_obj.Subnet.get_object(context, id=id) + if not subnet: + raise n_exc.SubnetNotFound(subnet_id=id) + return subnet + def _get_subnetpool(self, context, id): subnetpool = subnetpool_obj.SubnetPool.get_object( context, id=id) @@ -231,34 +265,21 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): @db_api.context_manager.reader def _get_subnets_by_network(self, context, network_id): - subnet_qry = context.session.query(models_v2.Subnet) - return subnet_qry.filter_by(network_id=network_id).all() + return subnet_obj.Subnet.get_objects(context, network_id=network_id) @db_api.context_manager.reader def _get_subnets_by_subnetpool(self, context, subnetpool_id): - subnet_qry = context.session.query(models_v2.Subnet) - return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all() - - @db_api.context_manager.reader - def _get_all_subnets(self, context): - # NOTE(salvatore-orlando): This query might end up putting - # a lot of stress on the db. Consider adding a cache layer - return context.session.query(models_v2.Subnet).all() + return subnet_obj.Subnet.get_objects(context, + subnetpool_id=subnetpool_id) def _get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): - marker_obj = db_utils.get_marker_obj(self, context, 'subnet', - limit, marker) - make_subnet_dict = functools.partial(self._make_subnet_dict, - context=context) - return model_query.get_collection(context, models_v2.Subnet, - make_subnet_dict, - filters=filters, fields=fields, - sorts=sorts, - limit=limit, - marker_obj=marker_obj, - page_reverse=page_reverse) + pager = base_obj.Pager(sorts, limit, page_reverse, marker) + filters = filters or {} + return subnet_obj.Subnet.get_objects(context, _pager=pager, + validate_filters=False, + **filters) def _make_network_dict(self, network, fields=None, process_extensions=True, context=None): diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index e22ea80248c..8f492263007 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -60,6 +60,7 @@ from neutron.ipam import subnet_alloc from neutron import neutron_plugin_base_v2 from neutron.objects import base as base_obj from neutron.objects import ports as port_obj +from neutron.objects import subnet as subnet_obj from neutron.objects import subnetpool as subnetpool_obj @@ -271,9 +272,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, context, models_v2.Port).filter(models_v2.Port.network_id == id) ports = ports.filter(not_(models_v2.Port.device_owner.startswith( constants.DEVICE_OWNER_NETWORK_PREFIX))) - subnets = model_query.query_with_hooks( - context, models_v2.Subnet).filter( - models_v2.Subnet.network_id == id) + subnets = subnet_obj.Subnet.get_objects(context, network_id=id) tenant_ids = set([port['tenant_id'] for port in ports] + [subnet['tenant_id'] for subnet in subnets]) # raise if multiple tenants found or if the only tenant found @@ -450,7 +449,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, # retry reference errors so we can check the port type and # cleanup if a network-owned port snuck in without failing for subnet in subnets: - self.delete_subnet(context, subnet['id']) + self._delete_subnet(context, subnet) with db_api.context_manager.writer.using(context): network_db = self._get_network(context, id) network = self._make_network_dict(network_db, context=context) @@ -572,12 +571,13 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, ipal = models_v2.IPAllocation alloc_qry = context.session.query(ipal) alloc_qry = alloc_qry.join("port", "routerport") + gateway_ip = str(cur_subnet['gateway_ip']) allocated = alloc_qry.filter( - ipal.ip_address == cur_subnet['gateway_ip'], + ipal.ip_address == gateway_ip, ipal.subnet_id == cur_subnet['id']).first() if allocated and allocated['port_id']: raise n_exc.GatewayIpInUse( - ip_address=cur_subnet['gateway_ip'], + ip_address=gateway_ip, port_id=allocated['port_id']) if validators.is_attr_set(s.get('dns_nameservers')): @@ -824,19 +824,22 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, """ s = subnet['subnet'] new_cidr = s.get('cidr') - db_subnet = self._get_subnet(context, id) - orig = self._make_subnet_dict(db_subnet, fields=None, context=context) + subnet_obj = self._get_subnet_object(context, id) + orig = self._make_subnet_dict(subnet_obj, fields=None, context=context) # Fill 'ip_version' and 'allocation_pools' fields with the current # value since _validate_subnet() expects subnet spec has 'ip_version' # and 'allocation_pools' fields. - s['ip_version'] = db_subnet.ip_version - s['cidr'] = db_subnet.cidr - s['id'] = db_subnet.id - s['tenant_id'] = db_subnet.tenant_id - s['subnetpool_id'] = db_subnet.subnetpool_id - self._validate_subnet(context, s, cur_subnet=db_subnet) - db_pools = [netaddr.IPRange(p['first_ip'], p['last_ip']) - for p in db_subnet.allocation_pools] + s['ip_version'] = subnet_obj.ip_version + # TODO(slaweq): convert cidr to str will not be necessary when we + # will switch to use subnet OVO in ipam module + s['cidr'] = str(subnet_obj.cidr) + s['id'] = subnet_obj.id + s['project_id'] = subnet_obj.project_id + s['tenant_id'] = subnet_obj.project_id + s['subnetpool_id'] = subnet_obj.subnetpool_id + self._validate_subnet(context, s, cur_subnet=subnet_obj) + db_pools = [netaddr.IPRange(p.start, p.end) + for p in subnet_obj.allocation_pools] if new_cidr and ipv6_utils.is_ipv6_pd_enabled(s): # This is an ipv6 prefix delegation-enabled subnet being given an @@ -855,8 +858,10 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, s['allocation_pools'] = range_pools # If either gateway_ip or allocation_pools were specified - gateway_ip = s.get('gateway_ip', db_subnet.gateway_ip) - gateway_ip_changed = gateway_ip != db_subnet.gateway_ip + subnet_gateway = (subnet_obj.gateway_ip + if subnet_obj.gateway_ip else None) + gateway_ip = s.get('gateway_ip', subnet_gateway) + gateway_ip_changed = gateway_ip != subnet_gateway if gateway_ip_changed or s.get('allocation_pools') is not None: pools = range_pools if range_pools is not None else db_pools if gateway_ip: @@ -870,13 +875,6 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, with db_api.context_manager.writer.using(context): subnet, changes = self.ipam.update_db_subnet(context, id, s, db_pools) - # we expire here since ipam may have made changes to relationships - # that will be stale on any subsequent lookups while the subnet - # object is in the session otherwise. - # Check if subnet attached to session before expire. - if subnet in context.session: - context.session.expire(subnet) - return self._make_subnet_dict(subnet, context=context), orig @property @@ -980,7 +978,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, # Do not allow a subnet to be deleted if a router is attached to it self._subnet_check_ip_allocations_internal_router_ports( context, id) - subnet = self._get_subnet(context, id) + subnet = self._get_subnet_object(context, id) is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) if not is_auto_addr_subnet: # we only automatically remove IP addresses from user ports if @@ -998,41 +996,47 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, LOG.debug("Deleting subnet %s", id) # Make sure the subnet isn't used by other resources _check_subnet_not_used(context, id) + subnet = self._get_subnet_object(context, id) self._remove_subnet_ip_allocations_from_ports(context, id) - # retry integrity errors to catch ip allocation races + self._delete_subnet(context, subnet) + + def _delete_subnet(self, context, subnet): with db_api.exc_to_retry(sql_exc.IntegrityError), \ db_api.context_manager.writer.using(context): - subnet_db = self._get_subnet(context, id) - subnet = self._make_subnet_dict(subnet_db, context=context) registry.notify(resources.SUBNET, events.PRECOMMIT_DELETE, - self, context=context, subnet_id=id) - context.session.delete(subnet_db) + self, context=context, subnet_id=subnet.id) + subnet.delete() # Delete related ipam subnet manually, # since there is no FK relationship - self.ipam.delete_subnet(context, id) + self.ipam.delete_subnet(context, subnet.id) registry.notify(resources.SUBNET, events.AFTER_DELETE, - self, context=context, subnet=subnet) + self, context=context, subnet=subnet.to_dict()) @db_api.retry_if_session_inactive() def get_subnet(self, context, id, fields=None): - subnet = self._get_subnet(context, id) - return self._make_subnet_dict(subnet, fields, context=context) + subnet_obj = self._get_subnet(context, id) + return self._make_subnet_dict(subnet_obj, fields, context=context) @db_api.retry_if_session_inactive() def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): - return self._get_subnets(context, filters, fields, sorts, limit, - marker, page_reverse) + subnet_objs = self._get_subnets(context, filters, fields, sorts, limit, + marker, page_reverse) + return [ + self._make_subnet_dict(subnet_object, fields, context) + for subnet_object in subnet_objs + ] @db_api.retry_if_session_inactive() def get_subnets_count(self, context, filters=None): - return model_query.get_collection_count(context, models_v2.Subnet, - filters=filters) + filters = filters or {} + return subnet_obj.Subnet.count(context, validate_filters=False, + **filters) @db_api.retry_if_session_inactive() def get_subnets_by_network(self, context, network_id): - return [self._make_subnet_dict(subnet_db) for subnet_db in + return [self._make_subnet_dict(subnet_obj) for subnet_obj in self._get_subnets_by_network(context, network_id)] def _validate_address_scope_id(self, context, address_scope_id, @@ -1202,8 +1206,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, def delete_subnetpool(self, context, id): with db_api.context_manager.writer.using(context): subnetpool = self._get_subnetpool(context, id=id) - subnets = self._get_subnets_by_subnetpool(context, id) - if subnets: + if subnet_obj.Subnet.objects_exist(context, subnetpool_id=id): reason = _("Subnet pool has existing allocations") raise n_exc.SubnetPoolDeleteError(reason=reason) subnetpool.delete() diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index 425b163e7f6..8170e94e24d 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -235,7 +235,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): if cfg.CONF.allow_overlapping_ips: subnet_list = network.subnets else: - subnet_list = self._get_all_subnets(context) + subnet_list = self._get_subnets(context) for subnet in subnet_list: if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and subnet.cidr != const.PROVISIONAL_IPV6_PD_PREFIX): diff --git a/neutron/objects/subnet.py b/neutron/objects/subnet.py index 9c1fb99447e..b21cbce281a 100644 --- a/neutron/objects/subnet.py +++ b/neutron/objects/subnet.py @@ -175,7 +175,9 @@ class Subnet(base.NeutronDbObject): 'name': obj_fields.StringField(nullable=True), 'network_id': common_types.UUIDField(), 'segment_id': common_types.UUIDField(nullable=True), - 'subnetpool_id': common_types.UUIDField(nullable=True), + # NOTE: subnetpool_id can be 'prefix_delegation' string + # when the IPv6 Prefix Delegation is enabled + 'subnetpool_id': obj_fields.StringField(nullable=True), 'ip_version': common_types.IPVersionEnumField(), 'cidr': common_types.IPNetworkField(), 'gateway_ip': obj_fields.IPAddressField(nullable=True), diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 6453fb137de..3619e797490 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -1100,8 +1100,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, @registry.receives(resources.SUBNET, [events.PRECOMMIT_DELETE]) def _subnet_delete_precommit_handler(self, rtype, event, trigger, context, subnet_id, **kwargs): - record = self._get_subnet(context, subnet_id) - subnet = self._make_subnet_dict(record, context=context) + subnet_obj = self._get_subnet_object(context, subnet_id) + subnet = self._make_subnet_dict(subnet_obj, context=context) network = self.get_network(context, subnet['network_id']) mech_context = driver_context.SubnetContext(self, context, subnet, network) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 76b9166f435..ce87e55ad94 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -630,6 +630,12 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): tenant_id=None, service_types=None, set_context=False): + + cidr = netaddr.IPNetwork(cidr) if cidr else None + if (gateway_ip is not None and + gateway_ip != constants.ATTR_NOT_SPECIFIED): + gateway_ip = netaddr.IPAddress(gateway_ip) + with optional_ctx(network, self.network, set_context=set_context, tenant_id=tenant_id) as network_to_use: diff --git a/neutron/tests/unit/db/test_ipam_pluggable_backend.py b/neutron/tests/unit/db/test_ipam_pluggable_backend.py index 27d1dc34910..d95b386d530 100644 --- a/neutron/tests/unit/db/test_ipam_pluggable_backend.py +++ b/neutron/tests/unit/db/test_ipam_pluggable_backend.py @@ -714,6 +714,12 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase): subnet_factory = mock.Mock() context = self.admin_context + if 'cidr' in subnet: + subnet['cidr'] = netaddr.IPNetwork(subnet['cidr']) + if 'cidr' in expected_subnet: + expected_subnet['cidr'] = netaddr.IPNetwork( + expected_subnet['cidr']) + mocks = self._prepare_mocks_with_pool_mock( pool_mock, subnet_factory=subnet_factory) @@ -731,7 +737,7 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase): old_pools = [{'start': '192.1.1.2', 'end': '192.1.1.254'}] context = self.admin_context subnet = {'id': uuidutils.generate_uuid(), - 'ip_version': '4', + 'ip_version': constants.IP_VERSION_4, 'cidr': '192.1.1.0/24', 'ipv6_address_mode': None, 'ipv6_ra_mode': None} @@ -750,7 +756,7 @@ class TestDbBasePluginIpam(test_db_base.NeutronDbPluginV2TestCase): old_pools = [{'start': '192.1.1.2', 'end': '192.1.1.254'}] context = self.admin_context subnet = {'id': uuidutils.generate_uuid(), - 'ip_version': '4', + 'ip_version': constants.IP_VERSION_4, 'cidr': '192.1.1.0/24', 'ipv6_address_mode': None, 'ipv6_ra_mode': None} diff --git a/neutron/tests/unit/objects/test_objects.py b/neutron/tests/unit/objects/test_objects.py index 48150ee3b8a..c955c3e9102 100644 --- a/neutron/tests/unit/objects/test_objects.py +++ b/neutron/tests/unit/objects/test_objects.py @@ -93,7 +93,7 @@ object_data = { 'SecurityGroupRule': '1.0-e9b8dace9d48b936c62ad40fe1f339d5', 'SegmentHostMapping': '1.0-521597cf82ead26217c3bd10738f00f0', 'ServiceProfile': '1.0-9beafc9e7d081b8258f3c5cb66ac5eed', - 'Subnet': '1.0-9c19023a61b42d29fbf3766df380e5b7', + 'Subnet': '1.0-927155c1fdd5a615cbcb981dda97bce4', 'SubnetPool': '1.0-a0e03895d1a6e7b9d4ab7b0ca13c3867', 'SubnetPoolPrefix': '1.0-13c15144135eb869faa4a76dc3ee3b6c', 'SubnetServiceType': '1.0-05ae4cdb2a9026a697b143926a1add8c', diff --git a/neutron/tests/unit/plugins/ml2/drivers/ext_test.py b/neutron/tests/unit/plugins/ml2/drivers/ext_test.py index 6a561b5989b..3f6c2fb1ff0 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/ext_test.py +++ b/neutron/tests/unit/plugins/ml2/drivers/ext_test.py @@ -26,6 +26,7 @@ from sqlalchemy import orm from neutron.api import extensions from neutron.db import models_v2 +from neutron.objects import subnet as subnet_obj from neutron.tests.unit.plugins.ml2 import extensions as test_extensions @@ -59,13 +60,20 @@ class TestExtensionDriver(TestExtensionDriverBase): assert(isinstance(result, dict)) assert(result['id'] is not None) - def _check_extend(self, session, result, db_entry, - expected_db_entry_class): + def _check_extend(self, session, result, entry, + expected_db_entry_class, expected_obj_entry_class=None): + # TODO(slaweq): After converting all code to use Subnet OVO, + # expected_db_entry_class can be removed as only OVO object + # should be expected here assert(isinstance(session, oslo_db.sqlalchemy.session.Session)) assert(isinstance(result, dict)) assert(result['id'] is not None) - assert(isinstance(db_entry, expected_db_entry_class)) - assert(db_entry.id == result['id']) + if expected_obj_entry_class: + assert(isinstance(entry, expected_db_entry_class) or + isinstance(entry, expected_obj_entry_class)) + else: + assert(isinstance(entry, expected_db_entry_class)) + assert(entry.id == result['id']) def _store_change(self, result, data, field): if field in data and data[field] != constants.ATTR_NOT_SPECIFIED: @@ -102,7 +110,10 @@ class TestExtensionDriver(TestExtensionDriverBase): result['subnet_extension'] = self.val_by_id[result['id']] def extend_subnet_dict(self, session, subnet_db, result): - self._check_extend(session, result, subnet_db, models_v2.Subnet) + self._check_extend( + session, result, subnet_db, + expected_db_entry_class=models_v2.Subnet, + expected_obj_entry_class=subnet_obj.Subnet) result['subnet_extension'] = self.val_by_id.get(result['id']) def process_create_port(self, plugin_context, data, result):