Refactor DvsManager code

The DVS manager code has it's own dvs_id although many of the apis should
use different dvs-ids in different cases.
This patch creates a separate class for a DVS manager with an assigned dvs-id
and keep the original class free of a specific dvs assignment, which is used by
the dvs plugin.

In addition it creates a vc-manager class to replace the old dvs manger.
this class inherits from different vc related classes: dvsManager, VMManager
and ClusterManager.
This way those apis can be consumed separately.

This patch also required some refactoring in the NSX-v qos code, as it did not
support different dvs-es correctly

Change-Id: Iab2159585795207d7a6236c34b91b860cb13cc7c
This commit is contained in:
Adit Sarfaty 2017-02-14 11:47:49 +02:00
parent 261bf07ff3
commit d9545e0e75
11 changed files with 221 additions and 181 deletions

View File

@ -209,6 +209,13 @@ def get_nsx_switch_ids(session, neutron_id):
neutron_id=neutron_id)]
def get_nsx_network_mappings(session, neutron_id):
# This function returns a list of NSX switch identifiers because of
# the possibility of chained logical switches
return session.query(nsx_models.NeutronNsxNetworkMapping).filter_by(
neutron_id=neutron_id).all()
def get_nsx_switch_id_for_dvs(session, neutron_id, dvs_id):
"""Retrieve the NSX switch ID for a given DVS ID and neutron network."""
try:

View File

@ -23,34 +23,28 @@ from vmware_nsx.dvs import dvs_utils
LOG = logging.getLogger(__name__)
PORTGROUP_PREFIX = 'dvportgroup'
API_FIND_ALL_BY_UUID = 'FindAllByUuid'
# QoS related constants
QOS_IN_DIRECTION = 'incomingPackets'
QOS_AGENT_NAME = 'dvfilter-generic-vmware'
API_FIND_ALL_BY_UUID = 'FindAllByUuid'
DSCP_RULE_DESCRIPTION = 'Openstack Dscp Marking RUle'
class DvsManager(object):
"""Management class for dvs related tasks."""
class SingleDvsManager(object):
"""Management class for dvs related tasks for the dvs plugin
def __init__(self, dvs_id=None):
"""Initializer.
For the globally configured dvs.
the moref of the configured DVS will be learnt. This will be used in
the operations supported by the manager.
"""
def __init__(self):
self._dvs = DvsManager()
self._dvs_moref = self._get_dvs_moref_by_name(
self._dvs.get_vc_session(),
dvs_utils.dvs_name_get())
A global session with the VC will be established. In addition to this
the moref of the configured DVS will be learnt. This will be used in
the operations supported by the manager.
NOTE: the DVS port group name will be the Neutron network UUID.
"""
self._session = dvs_utils.dvs_create_session()
# In the future we may decide to support more than one DVS
if dvs_id is None:
self._dvs_moref = self._get_dvs_moref(self._session,
dvs_utils.dvs_name_get())
else:
self._dvs_moref = vim_util.get_moref(dvs_id,
'VmwareDistributedVirtualSwitch')
def _get_dvs_moref(self, session, dvs_name):
def _get_dvs_moref_by_name(self, session, dvs_name):
"""Get the moref of the configured DVS."""
results = session.invoke_api(vim_util,
'get_objects',
@ -66,6 +60,44 @@ class DvsManager(object):
results = vim_util.continue_retrieval(session.vim, results)
raise nsx_exc.DvsNotFound(dvs=dvs_name)
def add_port_group(self, net_id, vlan_tag=None):
return self._dvs.add_port_group(self._dvs_moref, net_id,
vlan_tag=vlan_tag)
def delete_port_group(self, net_id):
return self._dvs.delete_port_group(self._dvs_moref, net_id)
def get_port_group_info(self, net_id):
return self._dvs.get_port_group_info(self._dvs_moref, net_id)
def net_id_to_moref(self, net_id):
return self._dvs._net_id_to_moref(self._dvs_moref, net_id)
class VCManagerBase(object):
"""Base class for all VC related classes, to initialize the session"""
def __init__(self):
"""Initializer.
A global session with the VC will be established.
NOTE: the DVS port group name will be the Neutron network UUID.
"""
self._session = dvs_utils.dvs_create_session()
def get_vc_session(self):
return self._session
class DvsManager(VCManagerBase):
"""Management class for dvs related tasks
The dvs-id is not a class member, ince multiple dvs-es can be supported.
"""
def _get_dvs_moref_by_id(self, dvs_id):
return vim_util.get_moref(dvs_id, 'VmwareDistributedVirtualSwitch')
def _get_port_group_spec(self, net_id, vlan_tag):
"""Gets the port groups spec for net_id and vlan_tag."""
client_factory = self._session.vim.client.factory
@ -83,12 +115,12 @@ class DvsManager(object):
pg_spec.defaultPortConfig = config
return pg_spec
def add_port_group(self, net_id, vlan_tag=None):
def add_port_group(self, dvs_moref, net_id, vlan_tag=None):
"""Add a new port group to the configured DVS."""
pg_spec = self._get_port_group_spec(net_id, vlan_tag)
task = self._session.invoke_api(self._session.vim,
'CreateDVPortgroup_Task',
self._dvs_moref,
dvs_moref,
spec=pg_spec)
try:
# NOTE(garyk): cache the returned moref
@ -102,16 +134,17 @@ class DvsManager(object):
LOG.info(_LI("%(net_id)s with tag %(vlan_tag)s created on %(dvs)s."),
{'net_id': net_id,
'vlan_tag': vlan_tag,
'dvs': self._dvs_moref.value})
'dvs': dvs_moref.value})
def _net_id_to_moref(self, net_id):
# DEBUG ADIT used only by the DVS plugin
def _net_id_to_moref(self, dvs_moref, net_id):
"""Gets the moref for the specific neutron network."""
# NOTE(garyk): return this from a cache if not found then invoke
# code below.
port_groups = self._session.invoke_api(vim_util,
'get_object_properties',
self._session.vim,
self._dvs_moref,
dvs_moref,
['portgroup'])
if len(port_groups) and hasattr(port_groups[0], 'propSet'):
for prop in port_groups[0].propSet:
@ -244,7 +277,7 @@ class DvsManager(object):
# Update the dvs port groups config for a vxlan/vlan network
# update the spec using a callback and user data
def update_port_groups_config(self, net_id, net_moref,
def update_port_groups_config(self, dvs_id, net_id, net_moref,
spec_update_calback, spec_update_data):
is_vlan = self._is_vlan_network_by_moref(net_moref)
if is_vlan:
@ -252,7 +285,9 @@ class DvsManager(object):
spec_update_calback,
spec_update_data)
else:
return self._update_vxlan_port_groups_config(net_id,
dvs_moref = self._get_dvs_moref_by_id(dvs_id)
return self._update_vxlan_port_groups_config(dvs_moref,
net_id,
net_moref,
spec_update_calback,
spec_update_data)
@ -261,6 +296,7 @@ class DvsManager(object):
# Searching the port groups for a partial match to the network id & moref
# update the spec using a callback and user data
def _update_vxlan_port_groups_config(self,
dvs_moref,
net_id,
net_moref,
spec_update_calback,
@ -268,7 +304,7 @@ class DvsManager(object):
port_groups = self._session.invoke_api(vim_util,
'get_object_properties',
self._session.vim,
self._dvs_moref,
dvs_moref,
['portgroup'])
found = False
if len(port_groups) and hasattr(port_groups[0], 'propSet'):
@ -303,9 +339,9 @@ class DvsManager(object):
spec_update_calback,
spec_update_data)
def delete_port_group(self, net_id):
def delete_port_group(self, dvs_moref, net_id):
"""Delete a specific port group."""
moref = self._net_id_to_moref(net_id)
moref = self._net_id_to_moref(dvs_moref, net_id)
task = self._session.invoke_api(self._session.vim,
'Destroy_Task',
moref)
@ -318,10 +354,11 @@ class DvsManager(object):
net_id)
LOG.info(_LI("%(net_id)s delete from %(dvs)s."),
{'net_id': net_id,
'dvs': self._dvs_moref.value})
'dvs': dvs_moref.value})
def get_portgroup_info(self, pg_moref):
def get_port_group_info(self, dvs_moref, net_id):
"""Get portgroup information."""
pg_moref = self._net_id_to_moref(dvs_moref, net_id)
# Expand the properties to collect on need basis.
properties = ['name']
pg_info = self._session.invoke_api(vim_util,
@ -330,6 +367,45 @@ class DvsManager(object):
pg_moref, properties)
return pg_info
def _get_dvs_moref_from_teaming_data(self, teaming_data):
"""Get the moref dvs that belongs to the teaming data"""
if 'switchObj' in teaming_data:
if 'objectId' in teaming_data['switchObj']:
dvs_id = teaming_data['switchObj']['objectId']
return vim_util.get_moref(
dvs_id, 'VmwareDistributedVirtualSwitch')
def update_port_group_spec_teaming(self, pg_spec, teaming_data):
mapping = {'FAILOVER_ORDER': 'failover_explicit',
'ETHER_CHANNEL': 'loadbalance_ip',
'LACP_ACTIVE': 'loadbalance_ip',
'LACP_PASSIVE': 'loadbalance_ip',
'LACP_V2': 'loadbalance_ip',
'LOADBALANCE_SRCID': 'loadbalance_srcid',
'LOADBALANCE_SRCMAC': 'loadbalance_srcmac',
'LOADBALANCE_LOADBASED': 'loadbalance_loadbased'}
dvs_moref = self._get_dvs_moref_from_teaming_data(teaming_data)
port_conf = pg_spec.defaultPortConfig
policy = port_conf.uplinkTeamingPolicy
policy.inherited = False
policy.policy.inherited = False
policy.policy.value = mapping[teaming_data['teamingPolicy']]
policy.uplinkPortOrder.inherited = False
ports = teaming_data['failoverUplinkPortNames']
policy.uplinkPortOrder.activeUplinkPort = ports
# The standby port will be those not configure as active ones
uplinks = self._session.invoke_api(vim_util,
"get_object_property",
self._session.vim,
dvs_moref,
"config.uplinkPortPolicy")
standby = list(set(uplinks.uplinkPortName) - set(ports))
policy.uplinkPortOrder.standbyUplinkPort = standby
class VMManager(VCManagerBase):
"""Management class for VMs related VC tasks."""
def get_vm_moref_obj(self, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
@ -448,45 +524,9 @@ class DvsManager(object):
"config.hardware.device")
return hardware_devices
def _get_dvs_moref_from_teaming_data(self, teaming_data):
"""Get the moref dvs that belongs to the teaming data
If not found: return the default one
"""
dvs_moref = self._dvs_moref
if 'switchObj' in teaming_data:
if 'objectId' in teaming_data['switchObj']:
dvs_id = teaming_data['switchObj']['objectId']
dvs_moref = vim_util.get_moref(
dvs_id, 'VmwareDistributedVirtualSwitch')
return dvs_moref
def update_port_group_spec_teaming(self, pg_spec, teaming_data):
mapping = {'FAILOVER_ORDER': 'failover_explicit',
'ETHER_CHANNEL': 'loadbalance_ip',
'LACP_ACTIVE': 'loadbalance_ip',
'LACP_PASSIVE': 'loadbalance_ip',
'LACP_V2': 'loadbalance_ip',
'LOADBALANCE_SRCID': 'loadbalance_srcid',
'LOADBALANCE_SRCMAC': 'loadbalance_srcmac',
'LOADBALANCE_LOADBASED': 'loadbalance_loadbased'}
dvs_moref = self._get_dvs_moref_from_teaming_data(teaming_data)
port_conf = pg_spec.defaultPortConfig
policy = port_conf.uplinkTeamingPolicy
policy.inherited = False
policy.policy.inherited = False
policy.policy.value = mapping[teaming_data['teamingPolicy']]
policy.uplinkPortOrder.inherited = False
ports = teaming_data['failoverUplinkPortNames']
policy.uplinkPortOrder.activeUplinkPort = ports
# The standby port will be those not configure as active ones
uplinks = self._session.invoke_api(vim_util,
"get_object_property",
self._session.vim,
dvs_moref,
"config.uplinkPortPolicy")
standby = list(set(uplinks.uplinkPortName) - set(ports))
policy.uplinkPortOrder.standbyUplinkPort = standby
class ClusterManager(VCManagerBase):
"""Management class for Cluster related VC tasks."""
def _reconfigure_cluster(self, session, cluster, config_spec):
"""Reconfigure a cluster in vcenter"""
@ -736,3 +776,8 @@ class DvsManager(object):
config_spec.rulesSpec = ruleSpec
if groupSpec or ruleSpec:
self._reconfigure_cluster(session, cluster, config_spec)
class VCManager(DvsManager, VMManager, ClusterManager):
"""Management class for all vc related tasks."""
pass

View File

@ -93,7 +93,7 @@ class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin,
neutron_extensions.append_api_extensions_path(
[vmware_nsx.NSX_EXT_PATH])
self.cfg_group = 'dvs' # group name for dvs section in nsx.ini
self._dvs = dvs.DvsManager()
self._dvs = dvs.SingleDvsManager()
# Common driver code
self.base_binding_dict = {
@ -152,13 +152,13 @@ class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin,
net_id = None
if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.PORTGROUP:
net_id = net_data.get(pnet.PHYSICAL_NETWORK)
dvpg_moref = self._dvs._net_id_to_moref(net_id)
pg_info = self._dvs.get_portgroup_info(dvpg_moref)
pg_info = self._dvs.get_port_group_info(net_id)
if pg_info.get('name') != net_data.get('name'):
err_msg = (_("Portgroup name %(dvpg)s must match network "
"name %(network)s") % {'dvpg': pg_info.get('name'),
'network': net_data.get('name')})
raise n_exc.InvalidInput(error_message=err_msg)
dvpg_moref = self._dvs.net_id_to_moref(net_id)
dvs_id = dvpg_moref.value
else:
dvs_id = self._dvs_get_id(net_data)

View File

@ -217,11 +217,10 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
pbin.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
# This needs to be set prior to binding callbacks
self.dvs_id = cfg.CONF.nsxv.dvs_id
if cfg.CONF.nsxv.use_dvs_features:
self._dvs = dvs.DvsManager(dvs_id=self.dvs_id)
self._vcm = dvs.VCManager()
else:
self._dvs = None
self._vcm = None
# Create the client to interface with the NSX-v
_nsx_v_callbacks = edge_utils.NsxVCallbacks(self)
self.nsx_v = vcns_driver.VcnsDriver(_nsx_v_callbacks)
@ -807,14 +806,15 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
if validators.is_attr_set(network.get(mpnet.SEGMENTS)):
return True
def _delete_backend_network(self, moref):
def _delete_backend_network(self, moref, dvs_id=None):
"""Deletes the backend NSX network.
This can either be a VXLAN or a VLAN network. The type is determined
by the prefix of the moref.
The dvs_id is relevant only if it is a vlan network
"""
if moref.startswith(PORTGROUP_PREFIX):
self.nsx_v.delete_port_group(self.dvs_id, moref)
self.nsx_v.delete_port_group(dvs_id, moref)
else:
self.nsx_v.delete_virtual_wire(moref)
@ -831,12 +831,12 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
net_data['id'])
def _update_network_teaming(self, dvs_id, net_id, net_moref):
if self._dvs:
if self._vcm:
h, switch = self.nsx_v.vcns.get_vdn_switch(dvs_id)
try:
self._dvs.update_port_groups_config(
net_id, net_moref,
self._dvs.update_port_group_spec_teaming,
self._vcm.update_port_groups_config(
dvs_id, net_id, net_moref,
self._vcm.update_port_group_spec_teaming,
switch)
except Exception as e:
LOG.error(_LE('Unable to update teaming information for '
@ -1108,8 +1108,10 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Delete VLAN networks on other DVSes if it
# fails to be created on one DVS and reraise
# the original exception.
for net_moref in net_morefs:
self._delete_backend_network(net_moref)
for dvsmoref, netmoref in six.iteritems(
dvs_pg_mappings):
self._delete_backend_network(
netmoref, dvsmoref)
dvs_pg_mappings[dvs_id] = net_moref
net_morefs.append(net_moref)
dvs_net_ids.append(self._get_vlan_network_name(
@ -1207,9 +1209,12 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
not predefined):
self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id)
# Ensure that an predefined portgroup will not be deleted
if network_type != c_utils.NsxVNetworkTypes.PORTGROUP:
if network_type == c_utils.NsxVNetworkTypes.VXLAN:
for net_moref in net_morefs:
self._delete_backend_network(net_moref)
elif network_type != c_utils.NsxVNetworkTypes.PORTGROUP:
for dvsmrf, netmrf in six.iteritems(dvs_pg_mappings):
self._delete_backend_network(netmrf, dvsmrf)
LOG.exception(_LE('Failed to create network'))
# If init is incomplete calling _update_qos_network() will result a
@ -1219,7 +1224,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Therefore we skip this code during init.
if backend_network and self.init_is_complete:
# Update the QOS restrictions of the backend network
self._update_network_qos(context, net_data, dvs_net_ids, net_moref)
self._update_qos_on_created_network(context, net_data)
new_net[qos_consts.QOS_POLICY_ID] = (
qos_com_utils.get_network_policy_id(context, new_net['id']))
@ -1229,26 +1234,37 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._apply_dict_extend_functions('networks', new_net, net_model)
return new_net
def _update_network_qos(self, context, net_data, dvs_net_ids, net_moref):
def _update_qos_on_created_network(self, context, net_data):
if validators.is_attr_set(net_data.get(qos_consts.QOS_POLICY_ID)):
# Translate the QoS rule data into Nsx values
qos_data = qos_utils.NsxVQosRule(
context=context,
qos_policy_id=net_data[qos_consts.QOS_POLICY_ID])
# update the qos data on the dvs
for dvs_net_id in dvs_net_ids:
self._dvs.update_port_groups_config(
dvs_net_id,
net_moref,
self._dvs.update_port_group_spec_qos, qos_data)
# update the BWM data on the backend
qos_policy_id = net_data[qos_consts.QOS_POLICY_ID]
self._update_qos_on_backend_network(
context, net_data['id'], qos_policy_id)
# attach the policy to the network in the neutron DB
qos_com_utils.update_network_policy_binding(
context,
net_data['id'],
net_data[qos_consts.QOS_POLICY_ID])
def _update_qos_on_backend_network(self, context, net_id, qos_policy_id):
# Translate the QoS rule data into Nsx values
qos_data = qos_utils.NsxVQosRule(
context=context, qos_policy_id=qos_policy_id)
# default dvs for this network
az = self.get_network_az(context, net_id)
az_dvs_id = az.dvs_id
# get the network moref/s from the db
net_mappings = nsx_db.get_nsx_network_mappings(
context.session, net_id)
for mapping in net_mappings:
# update the qos restrictions of the network
self._vcm.update_port_groups_config(
mapping.dvs_id or az_dvs_id,
net_id, mapping.nsx_id,
self._vcm.update_port_group_spec_qos, qos_data)
def _cleanup_dhcp_edge_before_deletion(self, context, net_id):
if self.metadata_proxy_handler:
# Find if this is the last network which is bound
@ -1298,7 +1314,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
return False
def delete_network(self, context, id):
mappings = nsx_db.get_nsx_switch_ids(context.session, id)
mappings = nsx_db.get_nsx_network_mappings(context.session, id)
bindings = nsxv_db.get_network_bindings(context.session, id)
if cfg.CONF.nsxv.spoofguard_enabled:
sg_policy_id = nsxv_db.get_spoofguard_policy_id(
@ -1340,7 +1356,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
if (bindings and
bindings[0].binding_type == c_utils.NsxVNetworkTypes.PORTGROUP):
if cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id:
if self._is_neutron_spoofguard_policy(id, mappings[0],
if self._is_neutron_spoofguard_policy(id, mappings[0].nsx_id,
sg_policy_id):
self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id)
return
@ -1353,7 +1369,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id)
edge_utils.check_network_in_use_at_backend(context, id)
for mapping in mappings:
self._delete_backend_network(mapping)
self._delete_backend_network(
mapping.nsx_id, mapping.dvs_id)
def _extend_get_network_dict_provider(self, context, net):
self._extend_network_dict_provider(context, net)
@ -1422,7 +1439,6 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
- dvs_list_changed True/False
- dvs_pg_mappings - mapping of the new elements dvs->moref
"""
net_morefs = []
dvs_pg_mappings = {}
current_dvs_ids = set(self._get_dvs_ids(
@ -1446,8 +1462,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Delete VLAN networks on other DVSes if it
# fails to be created on one DVS and reraise
# the original exception.
for net_moref in net_morefs:
self._delete_backend_network(net_moref)
for dvsmoref, netmoref in six.iteritems(dvs_pg_mappings):
self._delete_backend_network(netmoref, dvsmoref)
dvs_pg_mappings[dvs_id] = net_moref
return True, dvs_pg_mappings
@ -1545,18 +1561,11 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
qos_policy_id = (net_attrs[qos_consts.QOS_POLICY_ID]
if qos_consts.QOS_POLICY_ID in net_attrs
else orig_net.get(qos_consts.QOS_POLICY_ID))
qos_data = qos_utils.NsxVQosRule(
context=context, qos_policy_id=qos_policy_id)
self._update_qos_on_backend_network(context, id, qos_policy_id)
# get the network moref/s from the db
for moref in net_morefs:
# update the qos restrictions of the network
self._dvs.update_port_groups_config(
id, moref, self._dvs.update_port_group_spec_qos, qos_data)
# attach the policy to the network in neutron DB
qos_com_utils.update_network_policy_binding(
context, id, qos_policy_id)
# attach the policy to the network in neutron DB
qos_com_utils.update_network_policy_binding(
context, id, qos_policy_id)
net_res[qos_consts.QOS_POLICY_ID] = (
qos_com_utils.get_network_policy_id(context, id))
@ -1691,13 +1700,13 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
and not p[psec.PORTSECURITY]])
def _add_vm_to_exclude_list(self, context, device_id, port_id):
if (self._dvs and
if (self._vcm and
cfg.CONF.nsxv.use_exclude_list):
# first time for this vm (we expect the count to be 1 already
# because the DB was already updated)
if (self._count_no_sec_ports_for_device_id(
context, device_id) <= 1):
vm_moref = self._dvs.get_vm_moref(device_id)
vm_moref = self._vcm.get_vm_moref(device_id)
if vm_moref is not None:
try:
LOG.info(_LI("Add VM %(dev)s to exclude list on "
@ -1718,14 +1727,14 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
def _remove_vm_from_exclude_list(self, context, device_id, port_id,
expected_count=0):
if (self._dvs and
if (self._vcm and
cfg.CONF.nsxv.use_exclude_list):
# No ports left in DB (expected count is 0 or 1 depending
# on whether the DB was already updated),
# So we can remove it from the backend exclude list
if (self._count_no_sec_ports_for_device_id(
context, device_id) <= expected_count):
vm_moref = self._dvs.get_vm_moref(device_id)
vm_moref = self._vcm.get_vm_moref(device_id)
if vm_moref is not None:
try:
LOG.info(_LI("Remove VM %(dev)s from exclude list on "
@ -4031,7 +4040,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
def _handle_qos_notification(self, context, resource_type,
qos_policys, event_type):
qos_utils.handle_qos_notification(qos_policys, event_type, self._dvs)
qos_utils.handle_qos_notification(qos_policys, event_type, self)
def get_az_by_hint(self, hint):
az = self._availability_zones_data.get_availability_zone(hint)

View File

@ -2629,9 +2629,9 @@ class NsxVCallbacks(object):
def __init__(self, plugin):
self.plugin = plugin
if cfg.CONF.nsxv.use_dvs_features:
self._dvs = dvs.DvsManager(dvs_id=cfg.CONF.nsxv.dvs_id)
self._vcm = dvs.VCManager()
else:
self._dvs = None
self._vcm = None
def complete_edge_creation(
self, context, edge_id, name, router_id, dist, deploy_successful,
@ -2655,12 +2655,12 @@ class NsxVCallbacks(object):
context.session, router_id,
status=plugin_const.ACTIVE)
if (not dist and
self._dvs and availability_zone and
self._vcm and availability_zone and
availability_zone.edge_ha and
availability_zone.edge_host_groups):
with locking.LockManager.get_lock('nsx-vc-drs-update'):
update_edge_host_groups(self.plugin.nsx_v.vcns, edge_id,
self._dvs, availability_zone)
self._vcm, availability_zone)
else:
LOG.error(_LE("Failed to deploy Edge for router %s"), name)
if router_db:

View File

@ -24,8 +24,6 @@ from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import log as logging
from vmware_nsx.db import db as nsx_db
LOG = logging.getLogger(__name__)
@ -89,7 +87,7 @@ class NsxVQosRule(object):
return self
def handle_qos_notification(policies_list, event_type, dvs):
def handle_qos_notification(policies_list, event_type, core_plugin):
# Check if QoS policy rule was created/deleted/updated
# Only if the policy rule was updated, we need to update the dvs
if event_type != callbacks_events.UPDATED:
@ -97,26 +95,17 @@ def handle_qos_notification(policies_list, event_type, dvs):
for policy_obj in policies_list:
if hasattr(policy_obj, "rules"):
handle_qos_policy_notification(policy_obj, dvs)
handle_qos_policy_notification(policy_obj, core_plugin)
def handle_qos_policy_notification(policy_obj, dvs):
def handle_qos_policy_notification(policy_obj, core_plugin):
# Reload the policy as admin so we will have a context
context = n_context.get_admin_context()
admin_policy = qos_policy.QosPolicy.get_object(
context, id=policy_obj.id)
# get all the bound networks of this policy
networks = admin_policy.get_bound_networks()
qos_rule = NsxVQosRule(context=context,
qos_policy_id=policy_obj.id)
for net_id in networks:
# update the new bw limitations for this network
net_morefs = nsx_db.get_nsx_switch_ids(context.session, net_id)
for moref in net_morefs:
# update the qos restrictions of the network
dvs.update_port_groups_config(
net_id,
moref,
dvs.update_port_group_spec_qos,
qos_rule)
core_plugin._update_qos_on_backend_network(
context, net_id, policy_obj.id)

View File

@ -194,12 +194,12 @@ def list_missing_ports(resource, event, trigger, **kwargs):
LOG.info(_LI("All internal ports verified on the NSX manager"))
def get_vm_network_device(dvs_mng, vm_moref, mac_address):
def get_vm_network_device(vm_mng, vm_moref, mac_address):
"""Return the network device with MAC 'mac_address'.
This code was inspired by Nova vif.get_network_device
"""
hardware_devices = dvs_mng.get_vm_interfaces_info(vm_moref)
hardware_devices = vm_mng.get_vm_interfaces_info(vm_moref)
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
@ -217,7 +217,7 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
"""
# Connect to the DVS manager, using the configuration parameters
try:
dvs_mng = dvs.DvsManager()
vm_mng = dvs.VMManager()
except Exception as e:
LOG.error(_LE("Cannot connect to the DVS: Please update the [dvs] "
"section in the nsx.ini file: %s"), e)
@ -233,8 +233,8 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
device_id = port.get('device_id')
# get the vm moref & spec from the DVS
vm_moref = dvs_mng.get_vm_moref_obj(device_id)
vm_spec = dvs_mng.get_vm_spec(vm_moref)
vm_moref = vm_mng.get_vm_moref_obj(device_id)
vm_spec = vm_mng.get_vm_spec(vm_moref)
# Go over the VM interfaces and check if it should be updated
update_spec = False
@ -250,7 +250,7 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
continue
# find the old interface by it's mac and delete it
device = get_vm_network_device(dvs_mng, vm_moref, port['mac_address'])
device = get_vm_network_device(vm_mng, vm_moref, port['mac_address'])
if device is None:
LOG.warning(_LW("No device with MAC address %s exists on the VM"),
port['mac_address'])
@ -258,13 +258,13 @@ def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
device_type = device.__class__.__name__
LOG.info(_LI("Detaching old interface from VM %s"), device_id)
dvs_mng.detach_vm_interface(vm_moref, device)
vm_mng.detach_vm_interface(vm_moref, device)
# add the new interface as OpaqueNetwork
LOG.info(_LI("Attaching new interface to VM %s"), device_id)
nsx_net_id = get_network_nsx_id(admin_cxt.session, port['network_id'])
dvs_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'],
nsx_net_id, device_type)
vm_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'],
nsx_net_id, device_type)
registry.subscribe(list_missing_ports,

View File

@ -49,12 +49,12 @@ class DvsTestCase(base.BaseTestCase):
@mock.patch.object(dvs_utils, 'dvs_create_session',
return_value=fake_session())
@mock.patch.object(dvs.DvsManager, '_get_dvs_moref',
@mock.patch.object(dvs.SingleDvsManager, '_get_dvs_moref_by_name',
return_value=mock.MagicMock())
def setUp(self, mock_moref, mock_session):
super(DvsTestCase, self).setUp()
cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs')
self._dvs = dvs.DvsManager()
self._dvs = dvs.SingleDvsManager()
self.assertEqual(mock_moref.return_value, self._dvs._dvs_moref)
mock_moref.assert_called_once_with(mock_session.return_value,
'fake_dvs')
@ -63,7 +63,7 @@ class DvsTestCase(base.BaseTestCase):
return_value=fake_session())
def test_dvs_not_found(self, mock_session):
self.assertRaises(nsx_exc.DvsNotFound,
dvs.DvsManager)
dvs.SingleDvsManager)
@mock.patch.object(dvs.DvsManager, '_get_port_group_spec',
return_value='fake-spec')
@ -75,7 +75,7 @@ class DvsTestCase(base.BaseTestCase):
return_value='fake-spec')
def test_add_port_group_with_exception(self, fake_get_spec):
with (
mock.patch.object(self._dvs._session, 'wait_for_task',
mock.patch.object(self._dvs._dvs._session, 'wait_for_task',
side_effect=exp.NeutronException())
):
self.assertRaises(exp.NeutronException,
@ -87,19 +87,19 @@ class DvsTestCase(base.BaseTestCase):
return_value='fake-moref')
def test_delete_port_group(self, fake_get_moref):
self._dvs.delete_port_group('fake-uuid')
fake_get_moref.assert_called_once_with('fake-uuid')
fake_get_moref.assert_called_once_with(mock.ANY, 'fake-uuid')
@mock.patch.object(dvs.DvsManager, '_net_id_to_moref',
return_value='fake-moref')
def test_delete_port_group_with_exception(self, fake_get_moref):
with (
mock.patch.object(self._dvs._session, 'wait_for_task',
mock.patch.object(self._dvs._dvs._session, 'wait_for_task',
side_effect=exp.NeutronException())
):
self.assertRaises(exp.NeutronException,
self._dvs.delete_port_group,
'fake-uuid')
fake_get_moref.assert_called_once_with('fake-uuid')
fake_get_moref.assert_called_once_with(mock.ANY, 'fake-uuid')
@mock.patch.object(dvs.DvsManager, '_update_vxlan_port_groups_config')
@mock.patch.object(dvs.DvsManager, '_get_port_group_spec',
@ -111,13 +111,10 @@ class DvsTestCase(base.BaseTestCase):
net_id = 'vxlan-uuid'
vlan = 7
self._dvs.add_port_group(net_id, vlan)
moref = self._dvs._net_id_to_moref(net_id)
fake_get_moref.assert_called_once_with(net_id)
self._dvs.net_id_to_moref(net_id)
fake_get_moref.assert_called_once_with(mock.ANY, net_id)
fake_get_spec.assert_called_once_with(net_id, vlan)
self._dvs.update_port_groups_config(net_id, moref, None, None)
fake_update_vxlan.assert_called_once_with(net_id, moref, None, None)
@mock.patch.object(dvs.DvsManager, '_update_net_port_groups_config')
@mock.patch.object(dvs.DvsManager, '_get_port_group_spec',
return_value='fake-spec')
@ -128,19 +125,16 @@ class DvsTestCase(base.BaseTestCase):
net_id = 'flat-uuid'
vlan = 7
self._dvs.add_port_group(net_id, vlan)
moref = self._dvs._net_id_to_moref(net_id)
fake_get_moref.assert_called_once_with(net_id)
self._dvs.net_id_to_moref(net_id)
fake_get_moref.assert_called_once_with(mock.ANY, net_id)
fake_get_spec.assert_called_once_with(net_id, vlan)
self._dvs.update_port_groups_config(net_id, moref, None, None)
fake_update_net.assert_called_once_with(moref, None, None)
class NeutronSimpleDvsTest(test_plugin.NeutronDbPluginV2TestCase):
@mock.patch.object(dvs_utils, 'dvs_create_session',
return_value=fake_session())
@mock.patch.object(dvs.DvsManager, '_get_dvs_moref',
@mock.patch.object(dvs.SingleDvsManager, '_get_dvs_moref_by_name',
return_value=mock.MagicMock())
def setUp(self, mock_moref, mock_session,
plugin=PLUGIN_NAME,
@ -197,7 +191,7 @@ class NeutronSimpleDvsTest(test_plugin.NeutronDbPluginV2TestCase):
def test_create_and_delete_dvs_network_flat(self):
self._create_and_delete_dvs_network()
@mock.patch.object(dvs.DvsManager, 'get_portgroup_info')
@mock.patch.object(dvs.DvsManager, 'get_port_group_info')
@mock.patch.object(dvs.DvsManager, '_net_id_to_moref')
def test_create_and_delete_dvs_network_portgroup(self, fake_get_moref,
fake_pg_info):
@ -206,7 +200,7 @@ class NeutronSimpleDvsTest(test_plugin.NeutronDbPluginV2TestCase):
self.assertTrue(fake_get_moref.call_count)
self.assertTrue(fake_pg_info.call_count)
@mock.patch.object(dvs.DvsManager, 'get_portgroup_info')
@mock.patch.object(dvs.DvsManager, 'get_port_group_info')
@mock.patch.object(dvs.DvsManager, '_net_id_to_moref')
def test_create_and_delete_dvs_network_portgroup_vlan(self,
fake_get_moref,
@ -303,7 +297,7 @@ class NeutronSimpleDvsTest(test_plugin.NeutronDbPluginV2TestCase):
{'network': {'port_security_enabled': True}})
self.assertEqual(True, updated_net['port_security_enabled'])
@mock.patch.object(dvs.DvsManager, 'get_portgroup_info')
@mock.patch.object(dvs.DvsManager, 'get_port_group_info')
@mock.patch.object(dvs.DvsManager, '_net_id_to_moref')
def test_create_and_delete_portgroup_network_invalid_name(self,
fake_get_moref,

View File

@ -188,8 +188,7 @@ class NsxVPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
cfg.CONF.set_default('use_dvs_features', True, 'nsxv')
plugin = directory.get_plugin()
with mock.patch.object(dvs_utils, 'dvs_create_session'):
with mock.patch.object(dvs.DvsManager, '_get_dvs_moref'):
plugin._dvs = dvs.DvsManager()
plugin._vcm = dvs.VCManager()
return plugin
def test_get_vlan_network_name(self):
@ -3888,7 +3887,7 @@ class TestNSXPortSecurity(test_psec.TestPortSecurity,
"""
plugin = self._get_core_plugin_with_dvs()
vm_moref = 'dummy_moref'
with mock.patch.object(plugin._dvs, 'get_vm_moref',
with mock.patch.object(plugin._vcm, 'get_vm_moref',
return_value=vm_moref):
with mock.patch.object(
plugin.nsx_v.vcns,
@ -3908,7 +3907,7 @@ class TestNSXPortSecurity(test_psec.TestPortSecurity,
"""
plugin = self._get_core_plugin_with_dvs()
vm_moref = 'dummy_moref'
with mock.patch.object(plugin._dvs, 'get_vm_moref',
with mock.patch.object(plugin._vcm, 'get_vm_moref',
return_value=vm_moref):
with mock.patch.object(
plugin.nsx_v.vcns,
@ -3928,7 +3927,7 @@ class TestNSXPortSecurity(test_psec.TestPortSecurity,
"""
plugin = self._get_core_plugin_with_dvs()
vm_moref = 'dummy_moref'
with mock.patch.object(plugin._dvs, 'get_vm_moref',
with mock.patch.object(plugin._vcm, 'get_vm_moref',
return_value=vm_moref):
with mock.patch.object(
plugin.nsx_v.vcns,
@ -4056,7 +4055,7 @@ class TestNSXPortSecurity(test_psec.TestPortSecurity,
plugin = self._get_core_plugin_with_dvs()
vm_moref = 'dummy_moref'
data = {'port': {'port_security_enabled': enable_port_security}}
with mock.patch.object(plugin._dvs, 'get_vm_moref',
with mock.patch.object(plugin._vcm, 'get_vm_moref',
return_value=vm_moref):
if enable_port_security:
with mock.patch.object(

View File

@ -21,7 +21,6 @@ from networking_l2gw.db.l2gateway import l2gateway_db
from neutron_lib import exceptions as n_exc
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.db import nsxv_db
from vmware_nsx.dvs import dvs
from vmware_nsx.dvs import dvs_utils
from vmware_nsx.services.l2gateway.nsx_v import driver as nsx_v_driver
from vmware_nsx.tests.unit.nsx_v import test_plugin
@ -180,7 +179,6 @@ class TestL2gatewayDriver(base.BaseTestCase):
class TestL2GatewayDriverRouter(test_plugin.NsxVPluginV2TestCase):
@mock.patch.object(dvs_utils, 'dvs_create_session')
@mock.patch.object(dvs.DvsManager, '_get_dvs_moref')
def setUp(self, *mocks):
# init the nsxv plugin, edge manager and fake vcns
super(TestL2GatewayDriverRouter, self).setUp(plugin=CORE_PLUGIN,

View File

@ -38,7 +38,6 @@ class TestQosNsxVNotification(test_plugin.NsxVPluginV2TestCase,
base.BaseQosTestCase):
@mock.patch.object(dvs_utils, 'dvs_create_session')
@mock.patch.object(dvs.DvsManager, '_get_dvs_moref')
def setUp(self, *mocks):
# init the nsx-v plugin for testing with DVS
self._init_dvs_config()