NSX|P: Support policy DHCP

IPv4 support for Policy DHCP depending on the NSX version & on config.
Including devstack support for configuration & cleanup, and admin utilility
for migration from MP implementation to Policy one.

IPv6 support will follow in a future patch.

Change-Id: I01bfb5bd530c63ca8b635bbebcac47659187077e
This commit is contained in:
asarfaty 2019-12-23 15:14:20 +02:00
parent 38a5f8c11c
commit 79a638b550
15 changed files with 1679 additions and 114 deletions

View File

@ -77,6 +77,7 @@ class NSXClient(object):
self.host = host self.host = host
self.username = username self.username = username
self.password = password self.password = password
self.allow_passthrough = allow_passthrough
self.neutron_db = (NeutronNsxDB(db_connection) self.neutron_db = (NeutronNsxDB(db_connection)
if db_connection else None) if db_connection else None)
@ -84,6 +85,7 @@ class NSXClient(object):
username=self.username, username=self.username,
password=self.password, password=self.password,
nsx_api_managers=[self.host], nsx_api_managers=[self.host],
allow_passthrough=allow_passthrough,
# allow admin user to delete entities created # allow admin user to delete entities created
# under openstack principal identity # under openstack principal identity
allow_overwrite_header=True) allow_overwrite_header=True)
@ -184,6 +186,7 @@ class NSXClient(object):
return segments return segments
def delete_network_nsx_dhcp_port(self, network_id): def delete_network_nsx_dhcp_port(self, network_id):
# Delete dhcp port when using MP dhcp
if not self.nsxlib: if not self.nsxlib:
# no passthrough api # no passthrough api
return return

View File

@ -646,6 +646,9 @@ NSX Policy Plugin
nsxadmin -r routers -o update-nat-firewall-match --property firewall-match=external/internal nsxadmin -r routers -o update-nat-firewall-match --property firewall-match=external/internal
- Migrate networks DHCP from MP to Policy (for NSX 3.0 upgrades)::
nsxadmin -r dhcp-binding -o migrate-to-policy --property dhcp-config=<id>
Client Certificate Client Certificate
~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~

View File

@ -299,8 +299,8 @@ Add those parameters in ``local.conf``::
NSX_POLICY=<ip> NSX_POLICY=<ip>
NSX_USER=<username> NSX_USER=<username>
NSX_PASSWORD=<password> NSX_PASSWORD=<password>
DHCP_PROFILE_UUID=<MP name or UUID of the DHCP profile> DHCP_PROFILE_UUID=<MP or Policy name or UUID of the DHCP profile>
METADATA_PROXY_UUID=<MP name or UUID of the metadata proxy> METADATA_PROXY_UUID=<MP or Policy name or UUID of the metadata proxy>
DEFAULT_TIER0_ROUTER_UUID=<Policy name or ID of Tier0> DEFAULT_TIER0_ROUTER_UUID=<Policy name or ID of Tier0>
DEFAULT_OVERLAY_TZ_UUID=<Policy name or ID of of the overlay transport zone> DEFAULT_OVERLAY_TZ_UUID=<Policy name or ID of of the overlay transport zone>

View File

@ -361,11 +361,6 @@ nsx_v3_and_p = [
"that will be used to enable native metadata service. " "that will be used to enable native metadata service. "
"It needs to be created in NSX before starting Neutron " "It needs to be created in NSX before starting Neutron "
"with the NSX plugin.")), "with the NSX plugin.")),
cfg.StrOpt('dhcp_profile',
help=_("This is the name or UUID of the NSX DHCP Profile "
"that will be used to enable native DHCP service. It "
"needs to be created in NSX before starting Neutron "
"with the NSX plugin")),
cfg.StrOpt('native_metadata_route', cfg.StrOpt('native_metadata_route',
default="169.254.169.254/31", default="169.254.169.254/31",
help=_("The metadata route used for native metadata proxy " help=_("The metadata route used for native metadata proxy "
@ -410,6 +405,11 @@ nsx_v3_and_p = [
] ]
nsx_v3_opts = nsx_v3_and_p + [ nsx_v3_opts = nsx_v3_and_p + [
cfg.StrOpt('dhcp_profile',
help=_("This is the name or UUID of the NSX DHCP Profile "
"that will be used to enable native DHCP service. It "
"needs to be created in NSX before starting Neutron "
"with the NSX plugin")),
cfg.StrOpt('default_overlay_tz', cfg.StrOpt('default_overlay_tz',
help=_("This is the name or UUID of the default NSX overlay " help=_("This is the name or UUID of the default NSX overlay "
"transport zone that will be used for creating " "transport zone that will be used for creating "
@ -505,6 +505,12 @@ nsx_v3_opts = nsx_v3_and_p + [
] ]
nsx_p_opts = nsx_v3_and_p + [ nsx_p_opts = nsx_v3_and_p + [
cfg.StrOpt('dhcp_profile',
help=_("This is the name or UUID of the NSX DHCP Profile, "
"or the name or ID of the Policy DHCP server config "
"that will be used to enable native DHCP service. It "
"needs to be created in NSX before starting Neutron "
"with the NSX plugin")),
cfg.StrOpt('default_tier0_router', cfg.StrOpt('default_tier0_router',
help=_("Name or UUID of the default tier0 router that will be " help=_("Name or UUID of the default tier0 router that will be "
"used for connecting to tier1 logical routers and " "used for connecting to tier1 logical routers and "

View File

@ -35,9 +35,8 @@ class NsxV3AvailabilityZone(common_az.ConfiguredAvailabilityZone):
# Should be implemented by children # Should be implemented by children
pass pass
def init_from_config_section(self, az_name): def init_from_config_section(self, az_name, mandatory_dhcp=True):
az_info = self.get_az_opts() az_info = self.get_az_opts()
if self._has_native_dhcp_metadata(): if self._has_native_dhcp_metadata():
# The optional parameters will get the global values if not # The optional parameters will get the global values if not
# defined for this AZ # defined for this AZ
@ -49,8 +48,9 @@ class NsxV3AvailabilityZone(common_az.ConfiguredAvailabilityZone):
reason=(_("metadata_proxy for availability zone %s " reason=(_("metadata_proxy for availability zone %s "
"must be defined") % az_name)) "must be defined") % az_name))
# This is mandatory only if using MP dhcp
self.dhcp_profile = az_info.get('dhcp_profile') self.dhcp_profile = az_info.get('dhcp_profile')
if not self.dhcp_profile: if not self.dhcp_profile and mandatory_dhcp:
raise nsx_exc.NsxInvalidConfiguration( raise nsx_exc.NsxInvalidConfiguration(
opt_name="dhcp_profile", opt_name="dhcp_profile",
opt_value='None', opt_value='None',

View File

@ -159,9 +159,6 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._native_dhcp_enabled = False self._native_dhcp_enabled = False
def _init_native_metadata(self): def _init_native_metadata(self):
if not self.nsxlib:
return
for az in self.get_azs_list(): for az in self.get_azs_list():
if not az._native_md_proxy_uuid: if not az._native_md_proxy_uuid:
LOG.error("Unable to retrieve Metadata Proxy %s for " LOG.error("Unable to retrieve Metadata Proxy %s for "
@ -1635,7 +1632,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# Configure existing ports to work with the new DHCP server # Configure existing ports to work with the new DHCP server
try: try:
for port_data in existing_ports: for port_data in existing_ports:
self._add_dhcp_binding(context, port_data) self._add_port_mp_dhcp_binding(context, port_data)
except Exception: except Exception:
LOG.error('Unable to create DHCP bindings for existing ports ' LOG.error('Unable to create DHCP bindings for existing ports '
'on subnet %s', subnet['id']) 'on subnet %s', subnet['id'])
@ -1701,7 +1698,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
ips.append(fixed_ip) ips.append(fixed_ip)
return ips return ips
def _add_dhcp_binding(self, context, port): def _add_port_mp_dhcp_binding(self, context, port):
if not utils.is_port_dhcp_configurable(port): if not utils.is_port_dhcp_configurable(port):
return return
dhcp_service = nsx_db.get_nsx_service_binding( dhcp_service = nsx_db.get_nsx_service_binding(
@ -1760,7 +1757,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
'port': port['id'], 'port': port['id'],
'server': dhcp_service_id}) 'server': dhcp_service_id})
def _delete_dhcp_binding(self, context, port): def _delete_port_mp_dhcp_binding(self, context, port):
# Do not check device_owner here because Nova may have already # Do not check device_owner here because Nova may have already
# deleted that before Neutron's port deletion. # deleted that before Neutron's port deletion.
bindings = nsx_db.get_nsx_dhcp_bindings(context.session, port['id']) bindings = nsx_db.get_nsx_dhcp_bindings(context.session, port['id'])
@ -1797,7 +1794,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
ip_address == binding['ip_address']): ip_address == binding['ip_address']):
return binding return binding
def _update_dhcp_binding(self, context, old_port, new_port): def _update_port_mp_dhcp_binding(self, context, old_port, new_port):
# First check if any IPv4 address in fixed_ips is changed. # First check if any IPv4 address in fixed_ips is changed.
# Then update DHCP server setting or DHCP static binding # Then update DHCP server setting or DHCP static binding
# depending on the port type. # depending on the port type.
@ -1808,9 +1805,9 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# Note that the device_owner could be changed, # Note that the device_owner could be changed,
# but still needs DHCP binding. # but still needs DHCP binding.
if utils.is_port_dhcp_configurable(old_port): if utils.is_port_dhcp_configurable(old_port):
self._delete_dhcp_binding(context, old_port) self._delete_port_mp_dhcp_binding(context, old_port)
else: else:
self._add_dhcp_binding(context, new_port) self._add_port_mp_dhcp_binding(context, new_port)
return return
# Collect IPv4 DHCP addresses from original and updated fixed_ips # Collect IPv4 DHCP addresses from original and updated fixed_ips
@ -2054,6 +2051,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
is_slaac = (subnet.get('ipv6_address_mode') == 'slaac') is_slaac = (subnet.get('ipv6_address_mode') == 'slaac')
if enable_dhcp and not is_slaac: if enable_dhcp and not is_slaac:
# No DHCPv6 support yet # No DHCPv6 support yet
# TODO(asarfaty): add ipv6 support for policy plugin
msg = _("DHCPv6 is not supported") msg = _("DHCPv6 is not supported")
LOG.error(msg) LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg) raise n_exc.InvalidInput(error_message=msg)
@ -2084,7 +2082,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
LOG.error(msg) LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg) raise n_exc.InvalidInput(error_message=msg)
def _create_subnet(self, context, subnet): def _create_subnet_with_mp_dhcp(self, context, subnet):
self._validate_number_of_subnet_static_routes(subnet) self._validate_number_of_subnet_static_routes(subnet)
self._validate_host_routes_input(subnet) self._validate_host_routes_input(subnet)
self._validate_subnet_ip_version(subnet['subnet']) self._validate_subnet_ip_version(subnet['subnet'])
@ -2304,7 +2302,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
return super(NsxPluginV3Base, self).get_subnets( return super(NsxPluginV3Base, self).get_subnets(
context, filters, fields, sorts, limit, marker, page_reverse) context, filters, fields, sorts, limit, marker, page_reverse)
def delete_subnet(self, context, subnet_id): def delete_subnet_with_mp_dhcp(self, context, subnet_id):
# TODO(berlin): cancel public external subnet announcement # TODO(berlin): cancel public external subnet announcement
if self._has_native_dhcp_metadata(): if self._has_native_dhcp_metadata():
# Ensure that subnet is not deleted if attached to router. # Ensure that subnet is not deleted if attached to router.
@ -2328,7 +2326,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
return return
super(NsxPluginV3Base, self).delete_subnet(context, subnet_id) super(NsxPluginV3Base, self).delete_subnet(context, subnet_id)
def _update_subnet(self, context, subnet_id, subnet): def update_subnet_with_mp_dhcp(self, context, subnet_id, subnet):
updated_subnet = None updated_subnet = None
orig_subnet = self.get_subnet(context, subnet_id) orig_subnet = self.get_subnet(context, subnet_id)
self._validate_number_of_subnet_static_routes(subnet) self._validate_number_of_subnet_static_routes(subnet)
@ -2336,12 +2334,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
subnet, subnet,
orig_enable_dhcp=orig_subnet['enable_dhcp'], orig_enable_dhcp=orig_subnet['enable_dhcp'],
orig_host_routes=orig_subnet['host_routes']) orig_host_routes=orig_subnet['host_routes'])
network = self._get_network(context, orig_subnet['network_id']) network = self._get_network(context, orig_subnet['network_id'])
if (subnet['subnet'].get('ip_version') !=
orig_subnet.get('ip_version')):
self._validate_single_ipv6_subnet(
context, network, subnet['subnet'])
if self._has_native_dhcp_metadata(): if self._has_native_dhcp_metadata():
enable_dhcp = self._subnet_with_native_dhcp( enable_dhcp = self._subnet_with_native_dhcp(
@ -2413,7 +2406,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if key != 'dns_nameservers': if key != 'dns_nameservers':
kwargs['options'] = None kwargs['options'] = None
if 'options' in kwargs: if 'options' in kwargs:
sr, gw_ip = self.nsxlib.native_dhcp.build_static_routes( sr, gw_ip = self._build_static_routes(
updated_subnet.get('gateway_ip'), updated_subnet.get('gateway_ip'),
updated_subnet.get('cidr'), updated_subnet.get('cidr'),
updated_subnet.get('host_routes', [])) updated_subnet.get('host_routes', []))
@ -2509,6 +2502,28 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# we have nothing else to do but raise the exception. # we have nothing else to do but raise the exception.
raise raise
def _build_static_routes(self, gateway_ip, cidr, host_routes):
# The following code is based on _generate_opts_per_subnet() in
# neutron/agent/linux/dhcp.py. It prepares DHCP options for a subnet.
# Add route for directly connected network.
static_routes = [{'network': cidr, 'next_hop': '0.0.0.0'}]
# Copy routes from subnet host_routes attribute.
if host_routes:
for hr in host_routes:
if hr['destination'] == constants.IPv4_ANY:
if not gateway_ip:
gateway_ip = hr['nexthop']
else:
static_routes.append({'network': hr['destination'],
'next_hop': hr['nexthop']})
# If gateway_ip is defined, add default route via this gateway.
if gateway_ip:
static_routes.append({'network': constants.IPv4_ANY,
'next_hop': gateway_ip})
return static_routes, gateway_ip
def _get_dhcp_options(self, context, ip, extra_dhcp_opts, net_id, def _get_dhcp_options(self, context, ip, extra_dhcp_opts, net_id,
subnet): subnet):
# Always add option121. # Always add option121.
@ -2519,7 +2534,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
{'network': '%s' % net_az.native_metadata_route, {'network': '%s' % net_az.native_metadata_route,
'next_hop': ip}]}} 'next_hop': ip}]}}
if subnet: if subnet:
sr, gateway_ip = self.nsxlib.native_dhcp.build_static_routes( sr, gateway_ip = self._build_static_routes(
subnet.get('gateway_ip'), subnet.get('cidr'), subnet.get('gateway_ip'), subnet.get('cidr'),
subnet.get('host_routes', [])) subnet.get('host_routes', []))
options['option121']['static_routes'].extend(sr) options['option121']['static_routes'].extend(sr)
@ -2642,6 +2657,9 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
return False return False
return True return True
def _has_dhcp_enabled_subnet(self, context, network):
return not self._has_no_dhcp_enabled_subnet(context, network)
def _has_single_dhcp_enabled_subnet(self, context, network): def _has_single_dhcp_enabled_subnet(self, context, network):
# Check if there is only one DHCP-enabled subnet in the network. # Check if there is only one DHCP-enabled subnet in the network.
count = 0 count = 0

View File

@ -34,10 +34,6 @@ class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
def get_az_opts(self): def get_az_opts(self):
return config.get_nsxp_az_opts(self.name) return config.get_nsxp_az_opts(self.name)
def init_from_config_section(self, az_name):
super(NsxPAvailabilityZone, self).init_from_config_section(az_name)
#TODO(asarfaty): Add nsx-p specific configs here
def init_defaults(self): def init_defaults(self):
# use the default configuration # use the default configuration
self.metadata_proxy = cfg.CONF.nsx_p.metadata_proxy self.metadata_proxy = cfg.CONF.nsx_p.metadata_proxy
@ -141,6 +137,24 @@ class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
auto_config=False, is_mandatory=False, auto_config=False, is_mandatory=False,
search_scope=search_scope) search_scope=search_scope)
# Init dhcp config from policy or MP
self.use_policy_dhcp = False
if (nsxpolicy.feature_supported(
nsx_constants.FEATURE_NSX_POLICY_DHCP)):
try:
self._policy_dhcp_server_config = self._init_default_resource(
nsxpolicy, nsxpolicy.dhcp_server_config, 'dhcp_profile',
auto_config=False, is_mandatory=False,
search_scope=search_scope)
if self._policy_dhcp_server_config:
self.use_policy_dhcp = True
except Exception:
# Not found. try as MP profile
pass
self._native_dhcp_profile_uuid = None
if not self.use_policy_dhcp and nsxlib:
self._translate_dhcp_profile(nsxlib, search_scope=search_scope)
self.use_policy_md = False self.use_policy_md = False
if (nsxpolicy.feature_supported( if (nsxpolicy.feature_supported(
nsx_constants.FEATURE_NSX_POLICY_MDPROXY)): nsx_constants.FEATURE_NSX_POLICY_MDPROXY)):
@ -167,12 +181,6 @@ class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
else: else:
self._native_md_proxy_uuid = None self._native_md_proxy_uuid = None
# If passthrough api is supported, also initialize those NSX objects
if nsxlib:
self._translate_dhcp_profile(nsxlib, search_scope=search_scope)
else:
self._native_dhcp_profile_uuid = None
def _get_edge_cluster_tzs(self, nsxpolicy, nsxlib, ec_uuid): def _get_edge_cluster_tzs(self, nsxpolicy, nsxlib, ec_uuid):
ec_nodes = nsxpolicy.edge_cluster.get_edge_node_ids(ec_uuid) ec_nodes = nsxpolicy.edge_cluster.get_edge_node_ids(ec_uuid)
ec_tzs = [] ec_tzs = []
@ -228,7 +236,15 @@ class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
self._default_tier0_router, self._default_tier0_router,
tier0_ec_uuid) tier0_ec_uuid)
if self._native_dhcp_profile_uuid: if self.use_policy_dhcp:
dhcp_ec_path = nsxpolicy.dhcp_server_config.get(
self._policy_dhcp_server_config).get('edge_cluster_path')
dhcp_ec = p_utils.path_to_id(dhcp_ec_path)
if dhcp_ec != tier0_ec_uuid:
self._validate_tz(nsxpolicy, nsxlib, 'DHCP server config',
self._policy_dhcp_server_config,
dhcp_ec)
elif self._native_dhcp_profile_uuid:
dhcp_ec = nsxlib.native_dhcp_profile.get( dhcp_ec = nsxlib.native_dhcp_profile.get(
self._native_dhcp_profile_uuid).get('edge_cluster_id') self._native_dhcp_profile_uuid).get('edge_cluster_id')
if dhcp_ec != tier0_ec_uuid: if dhcp_ec != tier0_ec_uuid:

View File

@ -275,14 +275,13 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
self._enable_ipv6_routing() self._enable_ipv6_routing()
# Validate other mandatory configuration # Validate other mandatory configuration
if cfg.CONF.nsx_p.allow_passthrough: if not cfg.CONF.nsx_p.dhcp_profile:
if not cfg.CONF.nsx_p.dhcp_profile: raise cfg.RequiredOptError("dhcp_profile",
raise cfg.RequiredOptError("dhcp_profile", group=cfg.OptGroup('nsx_p'))
group=cfg.OptGroup('nsx_p'))
if not cfg.CONF.nsx_p.metadata_proxy: if not cfg.CONF.nsx_p.metadata_proxy:
raise cfg.RequiredOptError("metadata_proxy", raise cfg.RequiredOptError("metadata_proxy",
group=cfg.OptGroup('nsx_p')) group=cfg.OptGroup('nsx_p'))
# If using tags to find the objects, make sure tag scope is configured # If using tags to find the objects, make sure tag scope is configured
if (cfg.CONF.nsx_p.init_objects_by_tags and if (cfg.CONF.nsx_p.init_objects_by_tags and
@ -357,12 +356,18 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
return None, None return None, None
def _init_dhcp_metadata(self): def _init_dhcp_metadata(self):
if (cfg.CONF.dhcp_agent_notification and if cfg.CONF.dhcp_agent_notification:
cfg.CONF.nsx_p.allow_passthrough):
msg = _("Need to disable dhcp_agent_notification when " msg = _("Need to disable dhcp_agent_notification when "
"native DHCP & Metadata is enabled") "native DHCP & Metadata is enabled")
raise nsx_exc.NsxPluginException(err_msg=msg) raise nsx_exc.NsxPluginException(err_msg=msg)
self._init_native_dhcp()
default_az = self.get_default_az()
if default_az.use_policy_dhcp:
self.use_policy_dhcp = True
else:
self._init_native_dhcp()
self.use_policy_dhcp = False
self._init_native_metadata() self._init_native_metadata()
def init_availability_zones(self): def init_availability_zones(self):
@ -593,14 +598,17 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
def _setup_rpc(self): def _setup_rpc(self):
self.endpoints = [agents_db.AgentExtRpcCallback()] self.endpoints = [agents_db.AgentExtRpcCallback()]
def _net_nsx_name(self, network):
return utils.get_name_and_uuid(network['name'] or 'network',
network['id'])
def _create_network_on_backend(self, context, net_data, def _create_network_on_backend(self, context, net_data,
transparent_vlan, transparent_vlan,
provider_data, az): provider_data, az):
net_data['id'] = net_data.get('id') or uuidutils.generate_uuid() net_data['id'] = net_data.get('id') or uuidutils.generate_uuid()
# update the network name to indicate the neutron id too. # update the network name to indicate the neutron id too.
net_name = utils.get_name_and_uuid(net_data['name'] or 'network', net_name = self._net_nsx_name(net_data)
net_data['id'])
tags = self.nsxpolicy.build_v3_tags_payload( tags = self.nsxpolicy.build_v3_tags_payload(
net_data, resource_type='os-neutron-net-id', net_data, resource_type='os-neutron-net-id',
project_name=context.tenant_name) project_name=context.tenant_name)
@ -803,13 +811,22 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
return created_net return created_net
def delete_network(self, context, network_id): def delete_network(self, context, network_id):
if cfg.CONF.nsx_p.allow_passthrough:
self._delete_network_disable_dhcp(context, network_id)
is_nsx_net = self._network_is_nsx_net(context, network_id)
is_external_net = self._network_is_external(context, network_id) is_external_net = self._network_is_external(context, network_id)
# First call DB operation for delete network as it will perform if not is_external_net:
# First disable DHCP & delete its port
if self.use_policy_dhcp:
lock = 'nsxp_network_' + network_id
with locking.LockManager.get_lock(lock):
network = self._get_network(context, network_id)
if not self._has_active_port(context, network_id):
self._disable_network_dhcp(context, network)
elif cfg.CONF.nsx_p.allow_passthrough:
self._delete_network_disable_dhcp(context, network_id)
is_nsx_net = self._network_is_nsx_net(context, network_id)
# Call DB operation for delete network as it will perform
# checks on active ports # checks on active ports
self._retry_delete_network(context, network_id) self._retry_delete_network(context, network_id)
@ -904,6 +921,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
try: try:
self.nsxpolicy.segment.update(network_id, **kwargs) self.nsxpolicy.segment.update(network_id, **kwargs)
except nsx_lib_exc.ManagerError: except nsx_lib_exc.ManagerError:
LOG.exception("Unable to update NSX backend, rolling " LOG.exception("Unable to update NSX backend, rolling "
"back changes on neutron") "back changes on neutron")
@ -956,16 +974,374 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
self.nsxpolicy.tier1.update(router_id, self.nsxpolicy.tier1.update(router_id,
ipv6_ndra_profile_id=profile_id) ipv6_ndra_profile_id=profile_id)
def _validate_net_dhcp_edge_cluster(self, context, network, az):
"""Validate that the dhcp server edge cluster match the one of
the network TZ
"""
if not self.nsxlib:
# Cannot validate the TZ because the fabric apis are available
# only via the nsxlib
return
net_tz = self._get_net_tz(context, network['id'])
dhcp_ec_path = self.nsxpolicy.dhcp_server_config.get(
az._policy_dhcp_server_config).get('edge_cluster_path')
ec_id = p_utils.path_to_id(dhcp_ec_path)
ec_nodes = self.nsxlib.edge_cluster.get_transport_nodes(ec_id)
ec_tzs = []
for tn_uuid in ec_nodes:
ec_tzs.extend(self.nsxlib.transport_node.get_transport_zones(
tn_uuid))
if net_tz not in ec_tzs:
msg = (_('Network TZ %(tz)s does not match DHCP server '
'edge cluster %(ec)s') %
{'tz': net_tz, 'ec': ec_id})
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
def _create_subnet_dhcp_port(self, context, az, network, subnet):
port_data = {
"name": "",
"admin_state_up": True,
"device_id": network['id'],
"device_owner": const.DEVICE_OWNER_DHCP,
"network_id": network['id'],
"tenant_id": network["tenant_id"],
"mac_address": const.ATTR_NOT_SPECIFIED,
"fixed_ips": [{"subnet_id": subnet['id']}],
psec.PORTSECURITY: False
}
# Create the DHCP port (on neutron only) and update its port security
port = {'port': port_data}
neutron_port = super(NsxPolicyPlugin, self).create_port(context, port)
is_ens_tz_port = self._is_ens_tz_port(context, port_data)
self._create_port_preprocess_security(context, port, port_data,
neutron_port, is_ens_tz_port)
self._process_portbindings_create_and_update(
context, port_data, neutron_port)
def _delete_subnet_dhcp_port(self, context, net_id):
dhcp_port = self._get_sunbet_dhcp_port(context, net_id)
if dhcp_port:
self.delete_port(context, dhcp_port['id'],
force_delete_dhcp=True)
def _get_sunbet_dhcp_port(self, context, net_id):
filters = {
'network_id': [net_id],
'device_owner': [const.DEVICE_OWNER_DHCP]
}
dhcp_ports = self.get_ports(context, filters=filters)
return dhcp_ports[0] if dhcp_ports else None
def _get_sunbet_dhcp_server_ip(self, context, net_id, dhcp_subnet_id):
dhcp_port = self._get_sunbet_dhcp_port(context, net_id)
if dhcp_port:
dhcp_server_ips = [fip['ip_address']
for fip in dhcp_port['fixed_ips']
if fip['subnet_id'] == dhcp_subnet_id]
if dhcp_server_ips:
return dhcp_server_ips[0]
def _is_dhcp_network(self, context, net_id):
dhcp_port = self._get_sunbet_dhcp_port(context, net_id)
return True if dhcp_port else False
def _get_segment_subnets(self, context, net_id, net_az=None, **kwargs):
"""Get list of segmentSubnet objects to put on the segment
Including router interface subnets (for overlay networks) &
DHCP subnet (if using policy DHCP)
"""
dhcp_subnet = None
if 'dhcp_subnet' in kwargs:
dhcp_subnet = kwargs['dhcp_subnet']
else:
# Get it from the network
if self.use_policy_dhcp:
# TODO(asarfaty): Add ipv6 support
network = self._get_network(context, net_id)
for subnet in network.subnets:
if subnet.enable_dhcp and subnet.ip_version == 4:
dhcp_subnet = self.get_subnet(context, subnet.id)
break
router_subnets = None
if 'router_subnets' in kwargs:
router_subnets = kwargs['router_subnets']
else:
# Get it from the network, only if overlay
if self._is_overlay_network(context, net_id):
router_ids = self._get_network_router_ids(
context.elevated(), net_id)
if router_ids:
router_id = router_ids[0]
router_subnets = self._load_router_subnet_cidrs_from_db(
context.elevated(), router_id)
seg_subnets = []
dhcp_subnet_id = None
if dhcp_subnet:
dhcp_subnet_id = dhcp_subnet['id']
gw_addr = self._get_gateway_addr_from_subnet(dhcp_subnet)
cidr_prefix = int(dhcp_subnet['cidr'].split('/')[1])
dhcp_server_ip = self._get_sunbet_dhcp_server_ip(
context, net_id, dhcp_subnet_id)
dns_nameservers = dhcp_subnet['dns_nameservers']
if (not dns_nameservers or
not validators.is_attr_set(dns_nameservers)):
# Use preconfigured dns server
if not net_az:
net_az = self.get_network_az_by_net_id(context, net_id)
dns_nameservers = net_az.nameservers
dhcp_config = policy_defs.SegmentDhcpConfig(
server_address="%s/%s" % (dhcp_server_ip, cidr_prefix),
dns_servers=dns_nameservers,
is_ipv6=False) # TODO(asarfaty): add ipv6 support
seg_subnet = policy_defs.Subnet(gateway_address=gw_addr,
dhcp_ranges=[],
dhcp_config=dhcp_config)
seg_subnets.append(seg_subnet)
if router_subnets:
for rtr_subnet in router_subnets:
if rtr_subnet['id'] == dhcp_subnet_id:
# Do not add the same subnet twice
continue
if rtr_subnet['network_id'] == net_id:
gw_addr = self._get_gateway_addr_from_subnet(rtr_subnet)
seg_subnets.append(
policy_defs.Subnet(gateway_address=gw_addr,
dhcp_ranges=[],
dhcp_config=None))
return seg_subnets
def _enable_subnet_dhcp(self, context, network, subnet, az):
# Allocate a neutron port for the DHCP server
self._create_subnet_dhcp_port(context, az, network, subnet)
# Update the DHCP server on the segment
net_id = network['id']
segment_id = self._get_network_nsx_segment_id(context, net_id)
seg_subnets = self._get_segment_subnets(
context, net_id, net_az=az, dhcp_subnet=subnet)
net_name = self._net_nsx_name(network)
# Update dhcp server config on the segment
self.nsxpolicy.segment.create_or_overwrite(
net_name,
segment_id=segment_id,
dhcp_server_config_id=az._policy_dhcp_server_config,
subnets=seg_subnets)
def _disable_network_dhcp(self, context, network):
net_id = network['id']
# Remove dhcp server config from the segment
segment_id = self._get_network_nsx_segment_id(
context, net_id)
seg_subnets = self._get_segment_subnets(
context, net_id, dhcp_subnet=None)
self.nsxpolicy.segment.update(
segment_id,
subnets=seg_subnets,
dhcp_server_config_id=None)
# Delete the neutron DHCP port (and its bindings)
self._delete_subnet_dhcp_port(context, net_id)
def _update_subnet_dhcp(self, context, network, subnet, az):
net_id = network['id']
segment_id = self._get_network_nsx_segment_id(context, net_id)
seg_subnets = self._get_segment_subnets(
context, net_id, net_az=az, dhcp_subnet=subnet)
net_name = self._net_nsx_name(network)
filters = {'network_id': [net_id]}
ports = self.get_ports(context, filters=filters)
self.nsxpolicy.segment.create_or_overwrite(
net_name, segment_id=segment_id,
dhcp_server_config_id=az._policy_dhcp_server_config,
subnets=seg_subnets)
# Update DHCP bindings for all the ports.
for port in ports:
self._add_or_overwrite_port_policy_dhcp_binding(
context, port, segment_id, subnet)
def _validate_net_type_with_dhcp(self, context, network):
ddi_support, ddi_type = self._is_ddi_supported_on_net_with_type(
context, network['id'], network=network)
if not ddi_support:
msg = _("Native DHCP is not supported for %(type)s "
"network %(id)s") % {'id': network['id'],
'type': ddi_type}
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
@nsx_plugin_common.api_replay_mode_wrapper @nsx_plugin_common.api_replay_mode_wrapper
def create_subnet(self, context, subnet): def create_subnet(self, context, subnet):
return self._create_subnet(context, subnet) if not self.use_policy_dhcp:
# Subnet with MP DHCP
return self._create_subnet_with_mp_dhcp(context, subnet)
self._validate_number_of_subnet_static_routes(subnet)
self._validate_host_routes_input(subnet)
self._validate_subnet_ip_version(subnet['subnet'])
net_id = subnet['subnet']['network_id']
network = self._get_network(context, net_id)
self._validate_single_ipv6_subnet(context, network, subnet['subnet'])
net_az = self.get_network_az_by_net_id(context, net_id)
# Allow manipulation of only 1 subnet of the same network at once
lock = 'nsxp_network_' + net_id
with locking.LockManager.get_lock(lock):
# DHCP validations (before creating the neutron subnet)
with_dhcp = False
if self._subnet_with_native_dhcp(subnet['subnet']):
with_dhcp = True
self._validate_external_subnet(context, net_id)
self._validate_net_dhcp_edge_cluster(context, network, net_az)
self._validate_net_type_with_dhcp(context, network)
if self._has_dhcp_enabled_subnet(context, network):
msg = (_("Can not create more than one DHCP-enabled "
"subnet in network %s") % net_id)
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
# Create the neutron subnet.
# Any failure from here and on will require rollback.
created_subnet = super(NsxPolicyPlugin, self).create_subnet(
context, subnet)
try:
# This can be called only after the super create
# since we need the subnet pool to be translated
# to allocation pools
self._validate_address_space(context, created_subnet)
except n_exc.InvalidInput:
# revert the subnet creation
with excutils.save_and_reraise_exception():
super(NsxPolicyPlugin, self).delete_subnet(
context, created_subnet['id'])
self._extension_manager.process_create_subnet(context,
subnet['subnet'], created_subnet)
if with_dhcp:
try:
# Enable the network DHCP on the NSX
self._enable_subnet_dhcp(
context, network, created_subnet, net_az)
except (nsx_lib_exc.ManagerError, nsx_exc.NsxPluginException):
# revert the subnet creation
with excutils.save_and_reraise_exception():
# Try to delete the DHCP port, and the neutron subnet
self._delete_subnet_dhcp_port(context, net_id)
super(NsxPolicyPlugin, self).delete_subnet(
context, created_subnet['id'])
return created_subnet
def delete_subnet(self, context, subnet_id): def delete_subnet(self, context, subnet_id):
# Call common V3 code to delete the subnet if not self.use_policy_dhcp:
# Subnet with MP DHCP
return self.delete_subnet_with_mp_dhcp(context, subnet_id)
if self._has_native_dhcp_metadata():
# Ensure that subnet is not deleted if attached to router.
self._subnet_check_ip_allocations_internal_router_ports(
context, subnet_id)
subnet = self.get_subnet(context, subnet_id)
if self._subnet_with_native_dhcp(subnet):
lock = 'nsxp_network_' + subnet['network_id']
with locking.LockManager.get_lock(lock):
# Check if it is the last DHCP-enabled subnet to delete.
network = self._get_network(context, subnet['network_id'])
if self._has_single_dhcp_enabled_subnet(context, network):
try:
self._disable_network_dhcp(context, network)
except Exception as e:
LOG.error("Failed to disable DHCP for "
"network %(id)s. Exception: %(e)s",
{'id': network['id'], 'e': e})
# Continue for the neutron subnet deletion
# Delete neutron subnet
super(NsxPolicyPlugin, self).delete_subnet(context, subnet_id) super(NsxPolicyPlugin, self).delete_subnet(context, subnet_id)
def update_subnet(self, context, subnet_id, subnet): def update_subnet(self, context, subnet_id, subnet):
return self._update_subnet(context, subnet_id, subnet) if not self.use_policy_dhcp:
# Subnet with MP DHCP
return self.update_subnet_with_mp_dhcp(context, subnet_id, subnet)
subnet_data = subnet['subnet']
updated_subnet = None
orig_subnet = self.get_subnet(context, subnet_id)
self._validate_number_of_subnet_static_routes(subnet)
self._validate_host_routes_input(
subnet,
orig_enable_dhcp=orig_subnet['enable_dhcp'],
orig_host_routes=orig_subnet['host_routes'])
net_id = orig_subnet['network_id']
network = self._get_network(context, net_id)
net_az = self.get_network_az_by_net_id(context, net_id)
enable_dhcp = self._subnet_with_native_dhcp(
subnet_data, orig_subnet=orig_subnet)
orig_enable_dhcp = self._subnet_with_native_dhcp(orig_subnet)
if enable_dhcp != orig_enable_dhcp:
# Update subnet with DHCP status change
self._validate_external_subnet(context, net_id)
lock = 'nsxp_network_' + net_id
with locking.LockManager.get_lock(lock):
if enable_dhcp:
self._validate_net_type_with_dhcp(context, network)
if self._has_dhcp_enabled_subnet(context, network):
msg = (_("Can not create more than one DHCP-enabled "
"subnet in network %s") % net_id)
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
updated_subnet = super(NsxPolicyPlugin, self).update_subnet(
context, subnet_id, subnet)
self._extension_manager.process_update_subnet(
context, subnet_data, updated_subnet)
try:
if enable_dhcp:
self._enable_subnet_dhcp(context, network,
updated_subnet, net_az)
else:
self._disable_network_dhcp(context, network)
except (nsx_lib_exc.ManagerError, nsx_exc.NsxPluginException):
# revert the subnet update
with excutils.save_and_reraise_exception():
super(NsxPolicyPlugin, self).update_subnet(
context, subnet_id, {'subnet': orig_subnet})
else:
# No dhcp changes - just call super update
updated_subnet = super(NsxPolicyPlugin, self).update_subnet(
context, subnet_id, subnet)
self._extension_manager.process_update_subnet(
context, subnet_data, updated_subnet)
# Check if needs to update DHCP related NSX resources
# (only if the subnet changed, but dhcp was already enabled)
if (enable_dhcp and orig_enable_dhcp and
('dns_nameservers' in subnet_data or
'gateway_ip' in subnet_data or
'host_routes' in subnet_data)):
self._update_subnet_dhcp(context, network,
updated_subnet, net_az)
return updated_subnet
def _build_port_address_bindings(self, context, port_data): def _build_port_address_bindings(self, context, port_data):
psec_on, has_ip = self._determine_port_security_and_has_ip(context, psec_on, has_ip = self._determine_port_security_and_has_ip(context,
@ -1066,9 +1442,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
vif_id = port_data['id'] vif_id = port_data['id']
tags = self._build_port_tags(port_data) tags = self._build_port_tags(port_data)
if device_owner == const.DEVICE_OWNER_DHCP: if device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF:
tag_resource_type = 'os-neutron-dport-id'
elif device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF:
tag_resource_type = 'os-neutron-rport-id' tag_resource_type = 'os-neutron-rport-id'
else: else:
tag_resource_type = NSX_P_PORT_RESOURCE_TYPE tag_resource_type = NSX_P_PORT_RESOURCE_TYPE
@ -1179,7 +1553,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
is_octavia_port = (device_owner == oct_const.DEVICE_OWNER_OCTAVIA) is_octavia_port = (device_owner == oct_const.DEVICE_OWNER_OCTAVIA)
if is_external_net or is_router_interface or is_dhcp_port: if is_external_net or is_router_interface or is_dhcp_port:
# DHCP is handled on MP level so far # DHCP is handled on NSX level
# Router is connected automatically in policy # Router is connected automatically in policy
return False return False
@ -1191,6 +1565,136 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
return True return True
def _add_or_overwrite_port_policy_dhcp_binding(
self, context, port, segment_id, dhcp_subnet=None):
if not utils.is_port_dhcp_configurable(port):
return
net_id = port['network_id']
for fixed_ip in self._filter_ipv4_dhcp_fixed_ips(
context, port['fixed_ips']):
# There will be only one ipv4 ip here
binding_id = port['id'] + '-ipv4'
name = 'IPv4 binding for port %s' % port['id']
ip = fixed_ip['ip_address']
hostname = 'host-%s' % ip.replace('.', '-')
if dhcp_subnet:
if fixed_ip['subnet_id'] != dhcp_subnet['id']:
continue
subnet = dhcp_subnet
else:
subnet = self.get_subnet(context, fixed_ip['subnet_id'])
gateway_ip = subnet.get('gateway_ip')
options = self._get_dhcp_options(
context, ip, port.get(ext_edo.EXTRADHCPOPTS),
net_id, subnet)
self.nsxpolicy.segment_dhcp_static_bindings.create_or_overwrite_v4(
name, segment_id, binding_id=binding_id,
gateway_address=gateway_ip,
host_name=hostname,
ip_address=ip,
lease_time=cfg.CONF.nsx_p.dhcp_lease_time,
mac_address=port['mac_address'],
options=options)
# TODO(asarfaty): add ipv6 bindings (without options)
def _add_port_policy_dhcp_binding(self, context, port):
net_id = port['network_id']
if not self._is_dhcp_network(context, net_id):
return
segment_id = self._get_network_nsx_segment_id(context, net_id)
self._add_or_overwrite_port_policy_dhcp_binding(
context, port, segment_id)
def _delete_port_policy_dhcp_binding(self, context, port):
# Do not check device_owner here because Nova may have already
# deleted that before Neutron's port deletion.
net_id = port['network_id']
if not self._is_dhcp_network(context, net_id):
return
segment_id = self._get_network_nsx_segment_id(context, net_id)
v4_dhcp = v6_dhcp = False
for fixed_ip in port['fixed_ips']:
ip_addr = fixed_ip['ip_address']
if netaddr.IPAddress(ip_addr).version == 6:
v6_dhcp = True
else:
v4_dhcp = True
if v4_dhcp:
try:
bindingv4_id = port['id'] + '-ipv4'
self.nsxpolicy.segment_dhcp_static_bindings.delete(
segment_id, bindingv4_id)
except nsx_lib_exc.ResourceNotFound:
pass
if v6_dhcp:
try:
bindingv6_id = port['id'] + '-ipv6'
self.nsxpolicy.segment_dhcp_static_bindings.delete(
segment_id, bindingv6_id)
except nsx_lib_exc.ResourceNotFound:
pass
def _update_port_policy_dhcp_binding(self, context, old_port, new_port):
# First check if any IPv4 address in fixed_ips is changed.
# Then update DHCP server setting or DHCP static binding
# depending on the port type.
# Note that Neutron allows a port with multiple IPs in the
# same subnet. But backend DHCP server may not support that.
if (utils.is_port_dhcp_configurable(old_port) !=
utils.is_port_dhcp_configurable(new_port)):
# Note that the device_owner could be changed,
# but still needs DHCP binding.
if utils.is_port_dhcp_configurable(old_port):
self._delete_port_policy_dhcp_binding(context, old_port)
else:
self._add_port_policy_dhcp_binding(context, new_port)
return
# Collect IPv4 DHCP addresses from original and updated fixed_ips
# in the form of [(subnet_id, ip_address)].
old_fixed_ips = set([(fixed_ip['subnet_id'], fixed_ip['ip_address'])
for fixed_ip in self._filter_ipv4_dhcp_fixed_ips(
context, old_port['fixed_ips'])])
new_fixed_ips = set([(fixed_ip['subnet_id'], fixed_ip['ip_address'])
for fixed_ip in self._filter_ipv4_dhcp_fixed_ips(
context, new_port['fixed_ips'])])
# Find out the subnet/IP differences before and after the update.
ips_to_add = list(new_fixed_ips - old_fixed_ips)
ips_to_delete = list(old_fixed_ips - new_fixed_ips)
ip_change = (ips_to_add or ips_to_delete)
if (old_port["device_owner"] == const.DEVICE_OWNER_DHCP and
ip_change):
# Update backend DHCP server address if the IP address of a DHCP
# port is changed.
if len(new_fixed_ips) != 1:
msg = _("Can only configure one IP address on a DHCP server")
LOG.error(msg)
raise n_exc.InvalidInput(error_message=msg)
fixed_ip = list(new_fixed_ips)[0]
subnet_id = fixed_ip[0]
net_id = old_port['network_id']
network = self.get_network(context, net_id)
subnet = self.get_subnet(context, subnet_id)
net_az = self.get_network_az_by_net_id(context, net_id)
self._update_subnet_dhcp(context, network, subnet, net_az)
elif utils.is_port_dhcp_configurable(new_port):
dhcp_opts_changed = (old_port[ext_edo.EXTRADHCPOPTS] !=
new_port[ext_edo.EXTRADHCPOPTS])
if (ip_change or dhcp_opts_changed or
old_port['mac_address'] != new_port['mac_address']):
if new_fixed_ips:
# Recreate the bindings of this port
self._add_port_policy_dhcp_binding(context, new_port)
else:
self._delete_port_policy_dhcp_binding(context, old_port)
def create_port(self, context, port, l2gw_port_check=False): def create_port(self, context, port, l2gw_port_check=False):
port_data = port['port'] port_data = port['port']
# validate the new port parameters # validate the new port parameters
@ -1257,7 +1761,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
super(NsxPolicyPlugin, self).delete_port( super(NsxPolicyPlugin, self).delete_port(
context, neutron_db['id']) context, neutron_db['id'])
# Attach the policy to the port in the neutron DB # Attach the QoS policy to the port in the neutron DB
if qos_policy_id: if qos_policy_id:
qos_com_utils.update_port_policy_binding(context, qos_com_utils.update_port_policy_binding(context,
neutron_db['id'], neutron_db['id'],
@ -1270,16 +1774,18 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
self._remove_provider_security_groups_from_list(port_data) self._remove_provider_security_groups_from_list(port_data)
# Add Mac/IP binding to native DHCP server and neutron DB. # Add Mac/IP binding to native DHCP server and neutron DB.
if cfg.CONF.nsx_p.allow_passthrough: try:
try: if self.use_policy_dhcp:
self._add_dhcp_binding(context, port_data) self._add_port_policy_dhcp_binding(context, port_data)
except nsx_lib_exc.ManagerError: elif cfg.CONF.nsx_p.allow_passthrough:
# Rollback create port self._add_port_mp_dhcp_binding(context, port_data)
self.delete_port(context, port_data['id'], except nsx_lib_exc.ManagerError:
force_delete_dhcp=True) # Rollback create port
msg = _('Unable to create port. Please contact admin') self.delete_port(context, port_data['id'],
LOG.exception(msg) force_delete_dhcp=True)
raise nsx_exc.NsxPluginException(err_msg=msg) msg = _('Unable to create port. Please contact admin')
LOG.exception(msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
kwargs = {'context': context, 'port': neutron_db} kwargs = {'context': context, 'port': neutron_db}
registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs)
@ -1331,8 +1837,10 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
self.disassociate_floatingips(context, port_id) self.disassociate_floatingips(context, port_id)
# Remove Mac/IP binding from native DHCP server and neutron DB. # Remove Mac/IP binding from native DHCP server and neutron DB.
if cfg.CONF.nsx_p.allow_passthrough: if self.use_policy_dhcp:
self._delete_dhcp_binding(context, port_data) self._delete_port_policy_dhcp_binding(context, port_data)
elif cfg.CONF.nsx_p.allow_passthrough:
self._delete_port_mp_dhcp_binding(context, port_data)
super(NsxPolicyPlugin, self).delete_port(context, port_id) super(NsxPolicyPlugin, self).delete_port(context, port_id)
@ -1469,8 +1977,12 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
port_id) port_id)
# Update DHCP bindings. # Update DHCP bindings.
if cfg.CONF.nsx_p.allow_passthrough: if self.use_policy_dhcp:
self._update_dhcp_binding(context, original_port, updated_port) self._update_port_policy_dhcp_binding(
context, original_port, updated_port)
elif cfg.CONF.nsx_p.allow_passthrough:
self._update_port_mp_dhcp_binding(
context, original_port, updated_port)
# Make sure the port revision is updated # Make sure the port revision is updated
if 'revision_number' in updated_port: if 'revision_number' in updated_port:
@ -1564,7 +2076,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
router_id, router_id,
nat_rule_id=self._get_snat_rule_id(subnet)) nat_rule_id=self._get_snat_rule_id(subnet))
def _get_edge_cluster_path(self, tier0_uuid, router): def _get_router_edge_cluster_path(self, tier0_uuid, router):
# Take the AZ edge cluster if configured # Take the AZ edge cluster if configured
az = self._get_router_az_obj(router) az = self._get_router_az_obj(router)
if az and az._edge_cluster_uuid: if az and az._edge_cluster_uuid:
@ -1662,7 +2174,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
err_msg = (_("Cannot create service router for %s without a " err_msg = (_("Cannot create service router for %s without a "
"gateway") % router_id) "gateway") % router_id)
raise n_exc.InvalidInput(error_message=err_msg) raise n_exc.InvalidInput(error_message=err_msg)
edge_cluster_path = self._get_edge_cluster_path( edge_cluster_path = self._get_router_edge_cluster_path(
tier0_uuid, router) tier0_uuid, router)
if edge_cluster_path: if edge_cluster_path:
self.nsxpolicy.tier1.set_edge_cluster_path( self.nsxpolicy.tier1.set_edge_cluster_path(
@ -2004,8 +2516,9 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
return updated_router return updated_router
def _get_gateway_addr_from_subnet(self, subnet): def _get_gateway_addr_from_subnet(self, subnet):
cidr_prefix = int(subnet['cidr'].split('/')[1]) if subnet['gateway_ip'] and subnet['cidr']:
return "%s/%s" % (subnet['gateway_ip'], cidr_prefix) cidr_prefix = int(subnet['cidr'].split('/')[1])
return "%s/%s" % (subnet['gateway_ip'], cidr_prefix)
@nsx_plugin_common.api_replay_mode_wrapper @nsx_plugin_common.api_replay_mode_wrapper
def add_router_interface(self, context, router_id, interface_info): def add_router_interface(self, context, router_id, interface_info):
@ -2064,15 +2577,8 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
context.elevated(), router_id) context.elevated(), router_id)
if overlay_net: if overlay_net:
# overlay interface # overlay interface
pol_subnets = [] pol_subnets = self._get_segment_subnets(
for rtr_subnet in rtr_subnets: context, network_id, interface_subnets=rtr_subnets)
# For dual stack, we allow one v4 and one v6
# subnet per network
if rtr_subnet['network_id'] == network_id:
gw_addr = self._get_gateway_addr_from_subnet(
rtr_subnet)
pol_subnets.append(
policy_defs.Subnet(gateway_address=gw_addr))
self.nsxpolicy.segment.update(segment_id, self.nsxpolicy.segment.update(segment_id,
tier1_id=router_id, tier1_id=router_id,

View File

@ -1222,16 +1222,15 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
@nsx_plugin_common.api_replay_mode_wrapper @nsx_plugin_common.api_replay_mode_wrapper
def create_subnet(self, context, subnet): def create_subnet(self, context, subnet):
return self._create_subnet(context, subnet) return self._create_subnet_with_mp_dhcp(context, subnet)
def delete_subnet(self, context, subnet_id): def delete_subnet(self, context, subnet_id):
# Call common V3 code to delete the subnet # Call common V3 code to delete the subnet
super(NsxV3Plugin, self).delete_subnet(context, subnet_id) self.delete_subnet_with_mp_dhcp(context, subnet_id)
def update_subnet(self, context, subnet_id, subnet): def update_subnet(self, context, subnet_id, subnet):
updated_subnet = self._update_subnet(context, updated_subnet = self.update_subnet_with_mp_dhcp(
subnet_id, context, subnet_id, subnet)
subnet)
if (cfg.CONF.nsx_v3.metadata_on_demand and if (cfg.CONF.nsx_v3.metadata_on_demand and
not self._has_native_dhcp_metadata()): not self._has_native_dhcp_metadata()):
# If enable_dhcp is changed on a subnet attached to a router, # If enable_dhcp is changed on a subnet attached to a router,
@ -1659,7 +1658,7 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
# Add Mac/IP binding to native DHCP server and neutron DB. # Add Mac/IP binding to native DHCP server and neutron DB.
if cfg.CONF.nsx_v3.native_dhcp_metadata: if cfg.CONF.nsx_v3.native_dhcp_metadata:
try: try:
self._add_dhcp_binding(context, port_data) self._add_port_mp_dhcp_binding(context, port_data)
except nsx_lib_exc.ManagerError: except nsx_lib_exc.ManagerError:
# Rollback create port # Rollback create port
self.delete_port(context, port_data['id'], self.delete_port(context, port_data['id'],
@ -1732,7 +1731,7 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
# Remove Mac/IP binding from native DHCP server and neutron DB. # Remove Mac/IP binding from native DHCP server and neutron DB.
if cfg.CONF.nsx_v3.native_dhcp_metadata: if cfg.CONF.nsx_v3.native_dhcp_metadata:
self._delete_dhcp_binding(context, port) self._delete_port_mp_dhcp_binding(context, port)
else: else:
nsx_rpc.handle_port_metadata_access(self, context, port, nsx_rpc.handle_port_metadata_access(self, context, port,
is_delete=True) is_delete=True)
@ -2016,7 +2015,8 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
# Update DHCP bindings. # Update DHCP bindings.
if cfg.CONF.nsx_v3.native_dhcp_metadata: if cfg.CONF.nsx_v3.native_dhcp_metadata:
self._update_dhcp_binding(context, original_port, updated_port) self._update_port_mp_dhcp_binding(
context, original_port, updated_port)
# Make sure the port revision is updated # Make sure the port revision is updated
if 'revision_number' in updated_port: if 'revision_number' in updated_port:

View File

@ -19,7 +19,7 @@ from oslo_log import log as logging
from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as p_utils from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as p_utils
import vmware_nsx.shell.resources as shell from vmware_nsx.shell import resources as shell
from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import nsx_constants
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -54,6 +54,67 @@ def list_networks(resource, event, trigger, **kwargs):
return bool(mappings) return bool(mappings)
@admin_utils.output_header
def migrate_dhcp_to_policy(resource, event, trigger, **kwargs):
errmsg = ("Need to specify policy dhcp config id. Add "
"--property dhcp-config=<id>")
if not kwargs.get('property'):
LOG.error("%s", errmsg)
return
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
dhcp_config_id = properties.get('dhcp-config')
if not dhcp_config_id:
LOG.error("%s", errmsg)
return
nsxpolicy = p_utils.get_connected_nsxpolicy()
if not nsxpolicy.feature_supported(
nsx_constants.FEATURE_NSX_POLICY_DHCP):
LOG.error("This utility is not available for NSX version %s",
nsxpolicy.get_version())
return
try:
nsxpolicy.dhcp_server_config.get(dhcp_config_id)
except Exception:
LOG.error("%s", errmsg)
return
ctx = context.get_admin_context()
migrate_count = 0
with p_utils.NsxPolicyPluginWrapper() as plugin:
nets = plugin.get_networks(ctx)
for net in nets:
# skip non-dhcp networks
dhcp_port = plugin._get_sunbet_dhcp_port(ctx, net['id'])
if not dhcp_port:
LOG.info("Skipping network %s: No DHCP subnet found",
net['id'])
continue
dhcp_subnet_id = [fip['subnet_id']
for fip in dhcp_port['fixed_ips']][0]
az = plugin.get_network_az_by_net_id(ctx, net['id'])
az._policy_dhcp_server_config = dhcp_config_id
dhcp_subnet = plugin.get_subnet(ctx, dhcp_subnet_id)
# Verify that this network does not use policy DHCP already
segment_id = plugin._get_network_nsx_segment_id(ctx, net['id'])
segment = nsxpolicy.segment.get(segment_id)
if segment.get('dhcp_config_path'):
LOG.info("Skipping network %s: Already using policy DHCP",
net['id'])
continue
LOG.info("Migrating network %s", net['id'])
# Disable MP DHCP
plugin._disable_native_dhcp(ctx, net['id'])
# Enable Policy DHCP
plugin._enable_subnet_dhcp(ctx, net, dhcp_subnet, az)
migrate_count = migrate_count + 1
LOG.info("Finished migrating %s networks", migrate_count)
@admin_utils.output_header @admin_utils.output_header
def update_admin_state(resource, event, trigger, **kwargs): def update_admin_state(resource, event, trigger, **kwargs):
"""Upon upgrade to NSX3 update policy segments & ports """Upon upgrade to NSX3 update policy segments & ports
@ -85,3 +146,7 @@ def update_admin_state(resource, event, trigger, **kwargs):
registry.subscribe(update_admin_state, registry.subscribe(update_admin_state,
constants.NETWORKS, constants.NETWORKS,
shell.Operations.NSX_UPDATE_STATE.value) shell.Operations.NSX_UPDATE_STATE.value)
registry.subscribe(migrate_dhcp_to_policy,
constants.DHCP_BINDING,
shell.Operations.MIGRATE_TO_POLICY.value)

View File

@ -263,6 +263,8 @@ nsxp_resources = {
constants.NETWORKS: Resource(constants.NETWORKS, constants.NETWORKS: Resource(constants.NETWORKS,
[Operations.LIST.value, [Operations.LIST.value,
Operations.NSX_UPDATE_STATE.value]), Operations.NSX_UPDATE_STATE.value]),
constants.DHCP_BINDING: Resource(constants.DHCP_BINDING,
[Operations.MIGRATE_TO_POLICY.value]),
constants.ROUTERS: Resource(constants.ROUTERS, constants.ROUTERS: Resource(constants.ROUTERS,
[Operations.LIST.value, [Operations.LIST.value,
Operations.UPDATE_TIER0.value, Operations.UPDATE_TIER0.value,

View File

@ -111,14 +111,6 @@ class NsxPAvailabilityZonesTestCase(base.BaseTestCase):
nsx_az.NsxPAvailabilityZone, nsx_az.NsxPAvailabilityZone,
self.az_name) self.az_name)
def test_availability_zone_missing_dhcp_profile(self):
# Mandatory parameter
self._config_az(dhcp_profile=None)
self.assertRaises(
nsx_exc.NsxInvalidConfiguration,
nsx_az.NsxPAvailabilityZone,
self.az_name)
def test_availability_zone_missing_md_route(self): def test_availability_zone_missing_md_route(self):
self._config_az(native_metadata_route=None) self._config_az(native_metadata_route=None)
az = nsx_az.NsxPAvailabilityZone(self.az_name) az = nsx_az.NsxPAvailabilityZone(self.az_name)

View File

@ -33,7 +33,9 @@ from vmware_nsx.extensions import advancedserviceproviders as as_providers
from vmware_nsx.plugins.nsx_p import availability_zones as nsx_az from vmware_nsx.plugins.nsx_p import availability_zones as nsx_az
from vmware_nsx.tests.unit.nsx_p import test_plugin from vmware_nsx.tests.unit.nsx_p import test_plugin
from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import core_resources
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import nsx_constants
from vmware_nsxlib.v3.policy import core_resources as policy_resources
from vmware_nsxlib.v3 import resources as nsx_resources from vmware_nsxlib.v3 import resources as nsx_resources
@ -58,7 +60,7 @@ def set_az_in_config(name, metadata_proxy="metadata_proxy1",
class NsxNativeDhcpTestCase(test_plugin.NsxPPluginTestCaseMixin): class NsxNativeDhcpTestCase(test_plugin.NsxPPluginTestCaseMixin):
"""Test native dhcp config when using MP DHCP"""
def setUp(self): def setUp(self):
self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification
cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('dhcp_agent_notification', False)
@ -67,14 +69,20 @@ class NsxNativeDhcpTestCase(test_plugin.NsxPPluginTestCaseMixin):
self.az_metadata_route = '3.3.3.3' self.az_metadata_route = '3.3.3.3'
set_az_in_config(self._az_name, set_az_in_config(self._az_name,
native_metadata_route=self.az_metadata_route) native_metadata_route=self.az_metadata_route)
self._patcher = mock.patch.object(core_resources.NsxLibDhcpProfile, self._patcher1 = mock.patch.object(core_resources.NsxLibDhcpProfile,
'get') 'get')
self._patcher.start() self._patcher1.start()
self._patcher2 = mock.patch.object(
policy_resources.NsxDhcpServerConfigApi,
'get', side_effect=nsxlib_exc.ResourceNotFound)
self._patcher2.start()
self._initialize_azs() self._initialize_azs()
self.plugin._init_dhcp_metadata() self.plugin._init_dhcp_metadata()
self.plugin.use_policy_dhcp = False
def tearDown(self): def tearDown(self):
self._patcher.stop() self._patcher1.stop()
self._patcher2.stop()
cfg.CONF.set_override('dhcp_agent_notification', cfg.CONF.set_override('dhcp_agent_notification',
self._orig_dhcp_agent_notification) self._orig_dhcp_agent_notification)
super(NsxNativeDhcpTestCase, self).tearDown() super(NsxNativeDhcpTestCase, self).tearDown()
@ -881,6 +889,7 @@ class NsxNativeDhcpTestCase(test_plugin.NsxPPluginTestCaseMixin):
class NsxNativeMetadataTestCase(test_plugin.NsxPPluginTestCaseMixin): class NsxNativeMetadataTestCase(test_plugin.NsxPPluginTestCaseMixin):
"""Test native metadata config when using MP MDProxy"""
def setUp(self): def setUp(self):
self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification

View File

@ -0,0 +1,945 @@
# Copyright (c) 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import netaddr
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.extensions import securitygroup as secgrp
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from vmware_nsx.common import config
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.extensions import advancedserviceproviders as as_providers
from vmware_nsx.plugins.nsx_p import availability_zones as nsx_az
from vmware_nsx.tests.unit.nsx_p import test_plugin
from vmware_nsxlib.v3 import core_resources
from vmware_nsxlib.v3.policy import core_resources as nsx_resources
from vmware_nsxlib.v3 import utils as nsxlib_utils
def set_az_in_config(name, metadata_proxy="metadata_proxy1",
dhcp_server_config="dsc1",
native_metadata_route="2.2.2.2",
dns_domain='aaaa',
nameservers=['bbbb']):
group_name = 'az:%s' % name
cfg.CONF.set_override('availability_zones', [name], group="nsx_p")
config.register_nsxp_azs(cfg.CONF, [name])
cfg.CONF.set_override("metadata_proxy", metadata_proxy,
group=group_name)
cfg.CONF.set_override("dhcp_profile", dhcp_server_config,
group=group_name)
cfg.CONF.set_override("native_metadata_route", native_metadata_route,
group=group_name)
cfg.CONF.set_override("dns_domain", dns_domain,
group=group_name)
cfg.CONF.set_override("nameservers", nameservers,
group=group_name)
class NsxPolicyDhcpTestCase(test_plugin.NsxPPluginTestCaseMixin):
"""Test native dhcp config when using MP DHCP"""
#TODO(asarfaty): Add tests for DHCPv6
def setUp(self):
self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification
cfg.CONF.set_override('dhcp_agent_notification', False)
cfg.CONF.set_override('dhcp_profile', 'dsc1', 'nsx_p')
super(NsxPolicyDhcpTestCase, self).setUp()
self._az_name = 'zone1'
self.az_metadata_route = '3.3.3.3'
set_az_in_config(self._az_name,
native_metadata_route=self.az_metadata_route)
self._patcher = mock.patch.object(core_resources.NsxLibDhcpProfile,
'get')
self._patcher.start()
self._initialize_azs()
self.plugin._init_dhcp_metadata()
def tearDown(self):
self._patcher.stop()
cfg.CONF.set_override('dhcp_agent_notification',
self._orig_dhcp_agent_notification)
super(NsxPolicyDhcpTestCase, self).tearDown()
def _make_subnet_data(self,
name=None,
network_id=None,
cidr=None,
gateway_ip=None,
tenant_id=None,
allocation_pools=None,
enable_dhcp=True,
dns_nameservers=None,
ip_version=4,
host_routes=None,
shared=False):
return {'subnet': {
'name': name,
'network_id': network_id,
'cidr': cidr,
'gateway_ip': gateway_ip,
'tenant_id': tenant_id,
'allocation_pools': allocation_pools,
'ip_version': ip_version,
'enable_dhcp': enable_dhcp,
'dns_nameservers': dns_nameservers,
'host_routes': host_routes,
'shared': shared}}
def _bind_name(self, port):
return 'IPv4 binding for port %s' % port['port']['id']
def _verify_dhcp_service(self, network_id, tenant_id, enabled):
# Verify if DHCP service is enabled on a network.
port_res = self._list_ports('json', 200, network_id,
tenant_id=tenant_id,
device_owner=constants.DEVICE_OWNER_DHCP)
port_list = self.deserialize('json', port_res)
self.assertEqual(len(port_list['ports']) == 1, enabled)
def _verify_dhcp_binding(self, subnet, port_data, update_data,
assert_data):
# Verify if DHCP binding is updated.
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'SegmentDhcpStaticBindingConfigApi.'
'create_or_overwrite_v4') as update_dhcp_binding:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
with self.port(subnet=subnet, device_owner=device_owner,
device_id=device_id, **port_data) as port:
binding_name = self._bind_name(port)
ip_address = port['port']['fixed_ips'][0]['ip_address']
options = {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': ip_address},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': constants.IPv4_ANY,
'next_hop': subnet['subnet']['gateway_ip']}]}}
if 'extra_dhcp_opts' in port_data:
other_opts = []
options['others'] = []
for opt in port_data['extra_dhcp_opts']:
other_opts.append(
{'code': nsxlib_utils.get_dhcp_opt_code(
opt['opt_name']),
'values': [opt['opt_value']]})
options['others'] = other_opts
binding_data = {'mac_address': port['port']['mac_address'],
'ip_address': ip_address,
'gateway_address':
subnet['subnet']['gateway_ip'],
'host_name':
'host-%s' % ip_address.replace('.', '-'),
'lease_time': 86400,
'options': options}
# Verify the initial bindings call.
update_dhcp_binding.assert_called_once_with(
binding_name, subnet['subnet']['network_id'],
binding_id=port['port']['id'] + '-ipv4',
**binding_data)
update_dhcp_binding.reset_mock()
# Update the port with provided data.
self.plugin.update_port(
context.get_admin_context(), port['port']['id'],
update_data)
# Extend basic binding data with to-be-asserted data.
binding_data.update(assert_data)
# Verify the update call.
update_dhcp_binding.assert_called_once_with(
binding_name, subnet['subnet']['network_id'],
binding_id=port['port']['id'] + '-ipv4',
**binding_data)
def test_dhcp_service_with_create_network(self):
# Test if DHCP service is disabled on a network when it is created.
with self.network() as network:
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'], False)
def test_dhcp_service_with_delete_dhcp_network(self):
# Test if DHCP service is disabled when directly deleting a network
# with a DHCP-enabled subnet.
with self.network() as network:
with self.subnet(network=network, enable_dhcp=True):
self.plugin.delete_network(context.get_admin_context(),
network['network']['id'])
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
False)
def test_dhcp_service_with_create_non_dhcp_subnet(self):
# Test if DHCP service is disabled on a network when a DHCP-disabled
# subnet is created.
with self.network() as network:
with self.subnet(network=network, enable_dhcp=False):
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
False)
def test_dhcp_service_with_create_multiple_non_dhcp_subnets(self):
# Test if DHCP service is disabled on a network when multiple
# DHCP-disabled subnets are created.
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24',
enable_dhcp=False):
with self.subnet(network=network, cidr='20.0.0.0/24',
enable_dhcp=False):
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
False)
def test_dhcp_service_with_create_dhcp_subnet(self):
# Test if DHCP service is enabled on a network when a DHCP-enabled
# subnet is created.
with self.network() as network:
with self.subnet(network=network, enable_dhcp=True):
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
True)
def test_dhcp_service_with_create_dhcp_subnet_bulk(self):
# Test if DHCP service is enabled on all networks after a
# create_subnet_bulk operation.
with self.network() as network1, self.network() as network2:
subnet1 = self._make_subnet_data(
network_id=network1['network']['id'], cidr='10.0.0.0/24',
tenant_id=network1['network']['tenant_id'])
subnet2 = self._make_subnet_data(
network_id=network2['network']['id'], cidr='20.0.0.0/24',
tenant_id=network2['network']['tenant_id'])
subnets = {'subnets': [subnet1, subnet2]}
with mock.patch.object(self.plugin, '_post_create_subnet'
) as post_create_subnet:
self.plugin.create_subnet_bulk(
context.get_admin_context(), subnets)
# Check if post_create function has been called for
# both subnets.
self.assertEqual(len(subnets['subnets']),
post_create_subnet.call_count)
def test_dhcp_service_with_create_dhcp_subnet_bulk_failure(self):
# Test if user-provided rollback function is invoked when
# exception occurred during a create_subnet_bulk operation.
with self.network() as network1, self.network() as network2:
subnet1 = self._make_subnet_data(
network_id=network1['network']['id'], cidr='10.0.0.0/24',
tenant_id=network1['network']['tenant_id'])
subnet2 = self._make_subnet_data(
network_id=network2['network']['id'], cidr='20.0.0.0/24',
tenant_id=network2['network']['tenant_id'])
subnets = {'subnets': [subnet1, subnet2]}
# Inject an exception on the second create_subnet call.
orig_create_subnet = self.plugin.create_subnet
with mock.patch.object(self.plugin,
'create_subnet') as create_subnet:
def side_effect(*args, **kwargs):
return self._fail_second_call(
create_subnet, orig_create_subnet, *args, **kwargs)
create_subnet.side_effect = side_effect
with mock.patch.object(self.plugin,
'_rollback_subnet') as rollback_subnet:
try:
self.plugin.create_subnet_bulk(
context.get_admin_context(), subnets)
except Exception:
pass
# Check if rollback function has been called for
# the subnet in the first network.
rollback_subnet.assert_called_once_with(mock.ANY, mock.ANY)
subnet_arg = rollback_subnet.call_args[0][0]
self.assertEqual(network1['network']['id'],
subnet_arg['network_id'])
def test_dhcp_service_with_create_multiple_dhcp_subnets(self):
# Test if multiple DHCP-enabled subnets cannot be created in a network.
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24',
enable_dhcp=True):
subnet = {'subnet': {'network_id': network['network']['id'],
'cidr': '20.0.0.0/24',
'enable_dhcp': True}}
self.assertRaises(
n_exc.InvalidInput, self.plugin.create_subnet,
context.get_admin_context(), subnet)
def test_dhcp_service_with_delete_dhcp_subnet(self):
# Test if DHCP service is disabled on a network when a DHCP-disabled
# subnet is deleted.
with self.network() as network:
with self.subnet(network=network, enable_dhcp=True) as subnet:
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
True)
self.plugin.delete_subnet(context.get_admin_context(),
subnet['subnet']['id'])
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
False)
def test_dhcp_service_with_update_dhcp_subnet(self):
# Test if DHCP service is enabled on a network when a DHCP-disabled
# subnet is updated to DHCP-enabled.
with self.network() as network:
with self.subnet(network=network, enable_dhcp=False) as subnet:
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
False)
data = {'subnet': {'enable_dhcp': True}}
self.plugin.update_subnet(context.get_admin_context(),
subnet['subnet']['id'], data)
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
True)
def test_dhcp_service_with_update_multiple_dhcp_subnets(self):
# Test if a DHCP-disabled subnet cannot be updated to DHCP-enabled
# if a DHCP-enabled subnet already exists in the same network.
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24',
enable_dhcp=True):
with self.subnet(network=network, cidr='20.0.0.0/24',
enable_dhcp=False) as subnet:
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'],
True)
data = {'subnet': {'enable_dhcp': True}}
self.assertRaises(
n_exc.InvalidInput, self.plugin.update_subnet,
context.get_admin_context(), subnet['subnet']['id'],
data)
def test_dhcp_service_with_update_dhcp_port(self):
# Test if DHCP server IP is updated when the corresponding DHCP port
# IP is changed.
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'NsxPolicySegmentApi.'
'create_or_overwrite') as update_segment_dhcp:
with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet:
filters = {
'network_id': [subnet['subnet']['network_id']],
'device_owner': [constants.DEVICE_OWNER_DHCP]
}
dhcp_ports = self.plugin.get_ports(
context.get_admin_context(), filters=filters)
port = dhcp_ports[0]
old_ip = port['fixed_ips'][0]['ip_address']
new_ip = str(netaddr.IPAddress(old_ip) + 1)
data = {'port': {'fixed_ips': [
{'subnet_id': subnet['subnet']['id'],
'ip_address': new_ip}]}}
update_segment_dhcp.reset_mock()
self.plugin.update_port(context.get_admin_context(),
port['id'], data)
update_segment_dhcp.assert_called_once()
def test_dhcp_binding_with_create_port(self):
# Test if DHCP binding is added when a compute port is created.
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'SegmentDhcpStaticBindingConfigApi.'
'create_or_overwrite_v4') as create_dhcp_binding:
with self.subnet(enable_dhcp=True) as subnet:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
with self.port(subnet=subnet, device_owner=device_owner,
device_id=device_id) as port:
binding_name = self._bind_name(port)
ip = port['port']['fixed_ips'][0]['ip_address']
hostname = 'host-%s' % ip.replace('.', '-')
options = {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': ip},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': '0.0.0.0/0',
'next_hop': subnet['subnet']['gateway_ip']}]}}
create_dhcp_binding.assert_called_once_with(
binding_name, subnet['subnet']['network_id'],
binding_id=port['port']['id'] + '-ipv4',
mac_address=port['port']['mac_address'],
ip_address=ip,
host_name=hostname,
lease_time=cfg.CONF.nsx_p.dhcp_lease_time,
options=options,
gateway_address=subnet['subnet']['gateway_ip'])
def test_dhcp_binding_with_create_port_with_opts(self):
# Test if DHCP binding is added when a compute port is created
# with extra options.
opt_name = 'interface-mtu'
opt_code = 26
opt_val = '9000'
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'SegmentDhcpStaticBindingConfigApi.'
'create_or_overwrite_v4') as create_dhcp_binding:
with self.subnet(enable_dhcp=True) as subnet:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
extra_dhcp_opts = [{'opt_name': opt_name,
'opt_value': opt_val}]
with self.port(subnet=subnet, device_owner=device_owner,
device_id=device_id,
extra_dhcp_opts=extra_dhcp_opts,
arg_list=('extra_dhcp_opts',)) as port:
binding_name = self._bind_name(port)
ip = port['port']['fixed_ips'][0]['ip_address']
hostname = 'host-%s' % ip.replace('.', '-')
options = {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': ip},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': '0.0.0.0/0',
'next_hop': subnet['subnet']['gateway_ip']}]},
'others': [{'code': opt_code, 'values': [opt_val]}]}
create_dhcp_binding.assert_called_once_with(
binding_name, subnet['subnet']['network_id'],
binding_id=port['port']['id'] + '-ipv4',
mac_address=port['port']['mac_address'],
ip_address=ip,
host_name=hostname,
lease_time=cfg.CONF.nsx_p.dhcp_lease_time,
options=options,
gateway_address=subnet['subnet']['gateway_ip'])
def test_dhcp_binding_with_create_port_with_opts121(self):
# Test if DHCP binding is added when a compute port is created
# with extra option121.
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'SegmentDhcpStaticBindingConfigApi.'
'create_or_overwrite_v4') as create_dhcp_binding:
with self.subnet(enable_dhcp=True) as subnet:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
extra_dhcp_opts = [{'opt_name': 'classless-static-route',
'opt_value': '1.0.0.0/24,1.2.3.4'}]
with self.port(subnet=subnet, device_owner=device_owner,
device_id=device_id,
extra_dhcp_opts=extra_dhcp_opts,
arg_list=('extra_dhcp_opts',)) as port:
ip = port['port']['fixed_ips'][0]['ip_address']
binding_name = self._bind_name(port)
hostname = 'host-%s' % ip.replace('.', '-')
options = {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': ip},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': '0.0.0.0/0',
'next_hop': subnet['subnet']['gateway_ip']},
{'network': '1.0.0.0/24',
'next_hop': '1.2.3.4'}]}}
create_dhcp_binding.assert_called_once_with(
binding_name, subnet['subnet']['network_id'],
binding_id=port['port']['id'] + '-ipv4',
mac_address=port['port']['mac_address'],
ip_address=ip,
host_name=hostname,
lease_time=cfg.CONF.nsx_p.dhcp_lease_time,
options=options,
gateway_address=subnet['subnet']['gateway_ip'])
def test_dhcp_binding_with_create_port_with_bad_opts(self):
with self.subnet(enable_dhcp=True) as subnet:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
ctx = context.get_admin_context()
# Use illegal opt-name
extra_dhcp_opts = [{'opt_name': 'Dummy',
'opt_value': 'Dummy'}]
data = {'port': {
'name': 'dummy',
'network_id': subnet['subnet']['network_id'],
'tenant_id': subnet['subnet']['tenant_id'],
'device_owner': device_owner,
'device_id': device_id,
'extra_dhcp_opts': extra_dhcp_opts,
'admin_state_up': True,
'fixed_ips': [],
'mac_address': '00:00:00:00:00:01',
}}
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_port, ctx, data)
# Use illegal option121 value
extra_dhcp_opts = [{'opt_name': 'classless-static-route',
'opt_value': '1.0.0.0/24,5.5.5.5,cc'}]
data['port']['extra_dhcp_opts'] = extra_dhcp_opts
self.assertRaises(n_exc.InvalidInput,
self.plugin.create_port, ctx, data)
def test_dhcp_binding_with_delete_port(self):
# Test if DHCP binding is removed when the associated compute port
# is deleted.
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'SegmentDhcpStaticBindingConfigApi.'
'delete') as delete_dhcp_binding:
with self.subnet(enable_dhcp=True) as subnet:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
with self.port(subnet=subnet, device_owner=device_owner,
device_id=device_id) as port:
self.plugin.delete_port(
context.get_admin_context(), port['port']['id'])
delete_dhcp_binding.assert_called_with(
port['port']['network_id'],
port['port']['id'] + '-ipv4')
def test_dhcp_binding_with_update_port_delete_ip(self):
# Test if DHCP binding is deleted when the IP of the associated
# compute port is deleted.
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'SegmentDhcpStaticBindingConfigApi.'
'delete') as delete_dhcp_binding:
with self.subnet(enable_dhcp=True) as subnet:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
with self.port(subnet=subnet, device_owner=device_owner,
device_id=device_id) as port:
data = {'port': {'fixed_ips': [],
'admin_state_up': False,
secgrp.SECURITYGROUPS: []}}
self.plugin.update_port(
context.get_admin_context(), port['port']['id'], data)
delete_dhcp_binding.assert_called_with(
port['port']['network_id'],
port['port']['id'] + '-ipv4')
def test_dhcp_binding_with_update_port_ip(self):
# Test if DHCP binding is updated when the IP of the associated
# compute port is changed.
with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet:
port_data = {'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.3'}]}
new_ip = '10.0.0.4'
update_data = {'port': {'fixed_ips': [
{'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}}
assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'),
'ip_address': new_ip,
'options': {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': new_ip},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': constants.IPv4_ANY,
'next_hop': subnet['subnet']['gateway_ip']}]}}}
self._verify_dhcp_binding(subnet, port_data, update_data,
assert_data)
def test_dhcp_binding_with_update_port_mac(self):
# Test if DHCP binding is updated when the Mac of the associated
# compute port is changed.
with self.subnet(enable_dhcp=True) as subnet:
port_data = {'mac_address': '11:22:33:44:55:66'}
new_mac = '22:33:44:55:66:77'
update_data = {'port': {'mac_address': new_mac}}
assert_data = {'mac_address': new_mac,
'options': {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': mock.ANY},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': constants.IPv4_ANY,
'next_hop': subnet['subnet']['gateway_ip']}]}}}
self._verify_dhcp_binding(subnet, port_data, update_data,
assert_data)
def test_dhcp_binding_with_update_port_mac_ip(self):
# Test if DHCP binding is updated when the IP and Mac of the associated
# compute port are changed at the same time.
with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet:
port_data = {'mac_address': '11:22:33:44:55:66',
'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.3'}]}
new_mac = '22:33:44:55:66:77'
new_ip = '10.0.0.4'
update_data = {'port': {'mac_address': new_mac, 'fixed_ips': [
{'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}}
assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'),
'mac_address': new_mac,
'ip_address': new_ip,
'options': {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': new_ip},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': constants.IPv4_ANY,
'next_hop': subnet['subnet']['gateway_ip']}]}}}
self._verify_dhcp_binding(subnet, port_data, update_data,
assert_data)
def test_update_port_with_update_dhcp_opt(self):
# Test updating extra-dhcp-opts via port update.
with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet:
mac_address = '11:22:33:44:55:66'
ip_addr = '10.0.0.3'
port_data = {'arg_list': ('extra_dhcp_opts',),
'mac_address': mac_address,
'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
'ip_address': ip_addr}],
'extra_dhcp_opts': [
{'opt_name': 'interface-mtu',
'opt_value': '9000'}]}
update_data = {'port': {'extra_dhcp_opts': [
{'opt_name': 'interface-mtu',
'opt_value': '9002'}]}}
assert_data = {'mac_address': mac_address,
'ip_address': ip_addr,
'options': {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': ip_addr},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': constants.IPv4_ANY,
'next_hop': subnet['subnet']['gateway_ip']}]},
'others': [{'code': 26, 'values': ['9002']}]}}
self._verify_dhcp_binding(subnet, port_data, update_data,
assert_data)
def test_update_port_with_adding_dhcp_opt(self):
# Test adding extra-dhcp-opts via port update.
with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet:
mac_address = '11:22:33:44:55:66'
ip_addr = '10.0.0.3'
port_data = {'arg_list': ('extra_dhcp_opts',),
'mac_address': mac_address,
'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
'ip_address': ip_addr}],
'extra_dhcp_opts': [
{'opt_name': 'nis-domain',
'opt_value': 'abc'}]}
update_data = {'port': {'extra_dhcp_opts': [
{'opt_name': 'interface-mtu',
'opt_value': '9002'}]}}
assert_data = {'mac_address': mac_address,
'ip_address': ip_addr,
'options': {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': ip_addr},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': constants.IPv4_ANY,
'next_hop': subnet['subnet']['gateway_ip']}]},
'others': [{'code': 26, 'values': ['9002']},
{'code': 40, 'values': ['abc']}]}}
self._verify_dhcp_binding(subnet, port_data, update_data,
assert_data)
def test_update_port_with_deleting_dhcp_opt(self):
# Test adding extra-dhcp-opts via port update.
with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet:
mac_address = '11:22:33:44:55:66'
ip_addr = '10.0.0.3'
port_data = {'arg_list': ('extra_dhcp_opts',),
'mac_address': mac_address,
'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
'ip_address': ip_addr}],
'extra_dhcp_opts': [
{'opt_name': 'interface-mtu',
'opt_value': '9002'},
{'opt_name': 'nis-domain',
'opt_value': 'abc'}]}
update_data = {'port': {'extra_dhcp_opts': [
{'opt_name': 'interface-mtu',
'opt_value': None}]}}
assert_data = {'mac_address': mac_address,
'ip_address': ip_addr,
'options': {'option121': {'static_routes': [
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
cfg.CONF.nsx_p.native_metadata_route,
'next_hop': ip_addr},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': constants.IPv4_ANY,
'next_hop': subnet['subnet']['gateway_ip']}]},
'others': [{'code': 40, 'values': ['abc']}]}}
self._verify_dhcp_binding(subnet, port_data, update_data,
assert_data)
def test_dhcp_binding_with_update_port_name(self):
# Test if DHCP binding is not updated when the name of the associated
# compute port is changed.
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'SegmentDhcpStaticBindingConfigApi.'
'create_or_overwrite_v4') as update_dhcp_binding:
with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
with self.port(subnet=subnet, device_owner=device_owner,
device_id=device_id, name='abc') as port:
data = {'port': {'name': 'xyz'}}
update_dhcp_binding.reset_mock()
self.plugin.update_port(
context.get_admin_context(), port['port']['id'], data)
update_dhcp_binding.assert_not_called()
def test_create_network_with_bad_az_hint(self):
p = directory.get_plugin()
ctx = context.get_admin_context()
data = {'network': {
'name': 'test-az',
'tenant_id': self._tenant_id,
'port_security_enabled': False,
'admin_state_up': True,
'shared': False,
'availability_zone_hints': ['bad_hint']
}}
self.assertRaises(n_exc.NeutronException,
p.create_network,
ctx, data)
def test_create_network_with_az_hint(self):
p = directory.get_plugin()
ctx = context.get_admin_context()
data = {'network': {
'name': 'test-az',
'tenant_id': self._tenant_id,
'port_security_enabled': False,
'admin_state_up': True,
'shared': False,
'availability_zone_hints': [self._az_name]
}}
# network creation should succeed
net = p.create_network(ctx, data)
self.assertEqual([self._az_name],
net['availability_zone_hints'])
self.assertEqual([self._az_name],
net['availability_zones'])
def test_create_network_with_no_az_hint(self):
p = directory.get_plugin()
ctx = context.get_admin_context()
data = {'network': {
'name': 'test-az',
'tenant_id': self._tenant_id,
'port_security_enabled': False,
'admin_state_up': True,
'shared': False
}}
# network creation should succeed
net = p.create_network(ctx, data)
self.assertEqual([],
net['availability_zone_hints'])
self.assertEqual([nsx_az.DEFAULT_NAME],
net['availability_zones'])
def test_dhcp_service_with_create_az_network(self):
# Test if DHCP service is disabled on a network when it is created.
with self.network(availability_zone_hints=[self._az_name],
arg_list=('availability_zone_hints',)) as network:
self._verify_dhcp_service(network['network']['id'],
network['network']['tenant_id'], False)
def test_dhcp_binding_with_create_az_port(self):
# Test if DHCP binding is added when a compute port is created.
with mock.patch('vmware_nsxlib.v3.policy.core_resources.'
'SegmentDhcpStaticBindingConfigApi.'
'create_or_overwrite_v4') as create_dhcp_binding:
with self.network(
availability_zone_hints=[self._az_name],
arg_list=('availability_zone_hints',)) as network:
with self.subnet(enable_dhcp=True, network=network) as subnet:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X'
device_id = uuidutils.generate_uuid()
with self.port(subnet=subnet, device_owner=device_owner,
device_id=device_id) as port:
binding_name = self._bind_name(port)
ip = port['port']['fixed_ips'][0]['ip_address']
hostname = 'host-%s' % ip.replace('.', '-')
options = {'option121': {'static_routes': [
{'network': '%s' %
self.az_metadata_route,
'next_hop': '0.0.0.0'},
{'network': '%s' %
self.az_metadata_route,
'next_hop': ip},
{'network': subnet['subnet']['cidr'],
'next_hop': '0.0.0.0'},
{'network': '0.0.0.0/0',
'next_hop': subnet['subnet']['gateway_ip']}]}}
create_dhcp_binding.assert_called_once_with(
binding_name, subnet['subnet']['network_id'],
binding_id=port['port']['id'] + '-ipv4',
mac_address=port['port']['mac_address'],
ip_address=ip,
host_name=hostname,
lease_time=cfg.CONF.nsx_p.dhcp_lease_time,
options=options,
gateway_address=subnet['subnet']['gateway_ip'])
def test_create_subnet_with_dhcp_port(self):
with self.subnet(enable_dhcp=True) as subnet:
# find the dhcp port and verify it has port security disabled
ports = self.plugin.get_ports(
context.get_admin_context())
self.assertEqual(1, len(ports))
self.assertEqual('network:dhcp', ports[0]['device_owner'])
self.assertEqual(subnet['subnet']['network_id'],
ports[0]['network_id'])
self.assertEqual(False, ports[0]['port_security_enabled'])
class NsxPolicyMetadataTestCase(test_plugin.NsxPPluginTestCaseMixin):
"""Test native metadata config when using MP MDProxy"""
def setUp(self):
self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification
cfg.CONF.set_override('dhcp_agent_notification', False)
super(NsxPolicyMetadataTestCase, self).setUp()
self._az_name = 'zone1'
self._az_metadata_proxy = 'dummy'
set_az_in_config(self._az_name, metadata_proxy=self._az_metadata_proxy)
self._patcher = mock.patch.object(core_resources.NsxLibMetadataProxy,
'get')
self._patcher.start()
self._initialize_azs()
self.plugin._init_dhcp_metadata()
def tearDown(self):
self._patcher.stop()
cfg.CONF.set_override('dhcp_agent_notification',
self._orig_dhcp_agent_notification)
super(NsxPolicyMetadataTestCase, self).tearDown()
def test_metadata_proxy_configuration(self):
# Test if dhcp_agent_notification and metadata_proxy are
# configured correctly.
orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification
cfg.CONF.set_override('dhcp_agent_notification', True)
self.assertRaises(nsx_exc.NsxPluginException,
self.plugin._init_dhcp_metadata)
cfg.CONF.set_override('dhcp_agent_notification',
orig_dhcp_agent_notification)
orig_metadata_proxy_uuid = cfg.CONF.nsx_p.metadata_proxy
cfg.CONF.set_override('metadata_proxy', '', 'nsx_p')
self.assertRaises(cfg.RequiredOptError,
self.plugin._init_default_config)
cfg.CONF.set_override('metadata_proxy', orig_metadata_proxy_uuid,
'nsx_p')
def test_metadata_proxy_with_create_network(self):
# Test if native metadata proxy is enabled on a network when it is
# created (Using Policy MDproxy).
self.plugin._availability_zones_data._default_az.use_policy_md = True
with mock.patch.object(nsx_resources.NsxPolicySegmentApi,
'create_or_overwrite') as create:
with self.network() as network:
create.assert_called_once_with(
mock.ANY,
segment_id=network['network']['id'],
description=mock.ANY,
vlan_ids=mock.ANY,
transport_zone_id=mock.ANY,
tags=mock.ANY,
metadata_proxy_id=test_plugin.NSX_MD_PROXY_ID)
def test_metadata_proxy_with_create_az_network(self):
# Test if native metadata proxy is enabled on a network when it is
# created (Using Plolicy MDproxy).
azs = self.plugin._availability_zones_data.availability_zones
azs[self._az_name].use_policy_md = True
with mock.patch.object(nsx_resources.NsxPolicySegmentApi,
'create_or_overwrite') as create:
with self.network(
availability_zone_hints=[self._az_name],
arg_list=('availability_zone_hints',)) as network:
create.assert_called_once_with(
mock.ANY,
segment_id=network['network']['id'],
description=mock.ANY,
vlan_ids=mock.ANY,
transport_zone_id=mock.ANY,
tags=mock.ANY,
metadata_proxy_id='dummy')
def test_metadata_proxy_with_get_subnets(self):
# Test if get_subnets() handles advanced-service-provider extension,
# which is used when processing metadata requests.
self.plugin._availability_zones_data._default_az.use_policy_md = True
with self.network() as n1, self.network() as n2:
with self.subnet(network=n1, enable_dhcp=False) as s1, \
self.subnet(network=n2, enable_dhcp=False) as s2:
# Get all the subnets.
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 2)
self.assertEqual(set([s['id'] for s in subnets]),
set([s1['subnet']['id'], s2['subnet']['id']]))
lswitch_id = uuidutils.generate_uuid()
neutron_id = n1['network']['id']
segment_path = '/infra/segments/%s' % neutron_id
# Get only the subnets associated with a particular advanced
# service provider (i.e. logical switch).
with mock.patch('vmware_nsxlib.v3.policy.NsxPolicyLib.'
'search_resource_by_realized_id',
return_value=[segment_path]):
subnets = self._list('subnets', query_params='%s=%s' %
(as_providers.ADV_SERVICE_PROVIDERS,
lswitch_id))['subnets']
self.assertEqual(len(subnets), 1)
self.assertEqual(subnets[0]['id'], s1['subnet']['id'])

View File

@ -1533,7 +1533,7 @@ class TestPortsV2(common_v3.NsxV3SubnetMixin,
def test_port_failure_rollback_dhcp_exception(self): def test_port_failure_rollback_dhcp_exception(self):
self._enable_native_dhcp_md() self._enable_native_dhcp_md()
self.plugin = directory.get_plugin() self.plugin = directory.get_plugin()
with mock.patch.object(self.plugin, '_add_dhcp_binding', with mock.patch.object(self.plugin, '_add_port_mp_dhcp_binding',
side_effect=nsxlib_exc.ManagerError): side_effect=nsxlib_exc.ManagerError):
self.port() self.port()
ctx = context.get_admin_context() ctx = context.get_admin_context()