NSX|v: Support more than 2 hostgroups

If edge_ha=True and ha_placement_random=True
The user can configure more than 2 edge_host_groups globally or
per availability zone.
In this case 2 of those will be randomly selected for each deployed edge.

Change-Id: Iaa673e5acf78ecdf8cbc942499cce70a2fe0546c
This commit is contained in:
Adit Sarfaty 2017-04-30 15:11:58 +03:00
parent f02e3307be
commit 35d13ead0f
4 changed files with 62 additions and 43 deletions

View File

@ -484,8 +484,8 @@ nsxv_opts = [
default=[],
help=_('(Optional) If edge HA is used then this will ensure '
'that active/backup edges are placed in the listed '
'host groups. 2 predefined host groups need to be '
'configured.')),
'host groups. At least 2 predefined host groups need '
'to be configured.')),
cfg.StrOpt('external_network',
help=_('(Required) Network ID for physical network '
'connectivity')),
@ -709,8 +709,8 @@ nsxv_az_opts = [
default=[],
help=_('(Optional) If edge HA is used then this will ensure '
'that active/backup edges are placed in the listed '
'host groups. 2 predefined host groups need to be '
'configured.')),
'host groups. At least 2 predefined host groups need '
'to be configured.')),
cfg.StrOpt('datacenter_moid',
help=_('(Optional) Identifying the ID of datacenter to deploy '
'NSX Edges')),

View File

@ -161,7 +161,6 @@ class DvsManager(VCManagerBase):
'vlan_tag': vlan_tag,
'dvs': dvs_moref.value})
# DEBUG ADIT used only by the DVS plugin
def _net_id_to_moref(self, dvs_moref, net_id):
"""Gets the moref for the specific neutron network."""
# NOTE(garyk): return this from a cache if not found then invoke
@ -564,9 +563,9 @@ class ClusterManager(VCManagerBase):
reconfig_task = session.invoke_api(
session.vim, "ReconfigureComputeResource_Task",
cluster, spec=config_spec, modify=True)
session.wait_for_task(reconfig_task)
except Exception as excep:
LOG.exception('Failed to reconfigure cluster %s', excep)
session.wait_for_task(reconfig_task)
def _create_vm_group_spec(self, client_factory, name, vm_refs,
group=None):
@ -605,7 +604,7 @@ class ClusterManager(VCManagerBase):
rules_spec.info = rules_info
return rules_spec
def get_configured_vms(self, resource_id):
def get_configured_vms(self, resource_id, n_host_groups=2):
session = self._session
resource = vim_util.get_moref(resource_id, 'ResourcePool')
# TODO(garyk): cache the cluster details
@ -616,7 +615,7 @@ class ClusterManager(VCManagerBase):
vim_util, "get_object_property", self._session.vim, cluster,
"configurationEx")
configured_vms = []
for index in range(2):
for index in range(n_host_groups):
vm_group = None
entry_id = index + 1
groups = []
@ -632,8 +631,9 @@ class ClusterManager(VCManagerBase):
return configured_vms
def update_cluster_edge_failover(self, resource_id, vm_moids,
edge_id, host_group_names):
host_group_names):
"""Updates cluster for vm placement using DRS"""
# DEBUG ADIT edge-id is never used
session = self._session
resource = vim_util.get_moref(resource_id, 'ResourcePool')
# TODO(garyk): cache the cluster details
@ -643,12 +643,20 @@ class ClusterManager(VCManagerBase):
cluster_config = session.invoke_api(
vim_util, "get_object_property", self._session.vim, cluster,
"configurationEx")
vms = [vim_util.get_moref(vm_moid, 'VirtualMachine') for
vm_moid in vm_moids]
vms = [vim_util.get_moref(vm_moid, 'VirtualMachine')
if vm_moid else None
for vm_moid in vm_moids]
client_factory = session.vim.client.factory
config_spec = client_factory.create('ns0:ClusterConfigSpecEx')
num_host_groups = len(host_group_names)
rules = []
if hasattr(cluster_config, 'rule'):
rules = cluster_config.rule
for index, vm in enumerate(vms, start=1):
if not vm:
continue
vmGroup = None
groups = []
if hasattr(cluster_config, 'group'):
@ -664,9 +672,6 @@ class ClusterManager(VCManagerBase):
[vm], vmGroup)
config_spec.groupSpec.append(groupSpec)
config_rule = None
rules = []
if hasattr(cluster_config, 'rule'):
rules = cluster_config.rule
# Create the config rule if it does not exist
for rule in rules:
if 'neutron-rule-%s' % index == rule.name:
@ -691,11 +696,11 @@ class ClusterManager(VCManagerBase):
cluster_config = session.invoke_api(
vim_util, "get_object_property", self._session.vim, cluster,
"configurationEx")
groups = []
if hasattr(cluster_config, 'group'):
groups = cluster_config.group
for host_group_name in host_group_names:
found = False
groups = []
if hasattr(cluster_config, 'group'):
groups = cluster_config.group
for group in groups:
if host_group_name == group.name:
found = True
@ -703,15 +708,16 @@ class ClusterManager(VCManagerBase):
if not found:
LOG.error("%s does not exist", host_group_name)
raise exceptions.NotFound()
update_cluster = False
num_host_groups = len(host_group_names)
rules = []
if hasattr(cluster_config, 'rule'):
rules = cluster_config.rule
# Ensure that the VM groups are created
for index in range(2):
for index in range(num_host_groups):
entry_id = index + 1
vmGroup = None
groups = []
if hasattr(cluster_config, 'group'):
groups = cluster_config.group
for group in groups:
if 'neutron-group-%s' % entry_id == group.name:
vmGroup = group
@ -725,9 +731,6 @@ class ClusterManager(VCManagerBase):
update_cluster = True
config_rule = None
rules = []
if hasattr(cluster_config, 'rule'):
rules = cluster_config.rule
# Create the config rule if it does not exist
for rule in rules:
if 'neutron-rule-%s' % entry_id == rule.name:
@ -744,8 +747,7 @@ class ClusterManager(VCManagerBase):
try:
self._reconfigure_cluster(session, cluster, config_spec)
except Exception as e:
LOG.error('Unable to update cluster for host groups %s',
e)
LOG.error('Unable to update cluster for host groups %s', e)
def _delete_vm_group_spec(self, client_factory, name):
group_spec = client_factory.create('ns0:ClusterGroupSpec')
@ -768,7 +770,7 @@ class ClusterManager(VCManagerBase):
rules_spec.info = rules_info
return rules_spec
def cluster_host_group_cleanup(self, resource_id):
def cluster_host_group_cleanup(self, resource_id, n_host_groups=2):
session = self._session
resource = vim_util.get_moref(resource_id, 'ResourcePool')
# TODO(garyk): cache the cluster details
@ -780,21 +782,22 @@ class ClusterManager(VCManagerBase):
cluster_config = session.invoke_api(
vim_util, "get_object_property", self._session.vim, cluster,
"configurationEx")
groups = []
if hasattr(cluster_config, 'group'):
groups = cluster_config.group
rules = []
if hasattr(cluster_config, 'rule'):
rules = cluster_config.rule
groupSpec = []
ruleSpec = []
for index in range(2):
for index in range(n_host_groups):
entry_id = index + 1
groups = []
if hasattr(cluster_config, 'group'):
groups = cluster_config.group
for group in groups:
if 'neutron-group-%s' % entry_id == group.name:
groupSpec.append(self._delete_vm_group_spec(
client_factory, group.name))
rules = []
if hasattr(cluster_config, 'rule'):
rules = cluster_config.rule
# Create the config rule if it does not exist
# Delete the config rule if it exists
for rule in rules:
if 'neutron-rule-%s' % entry_id == rule.name:
ruleSpec.append(self._delete_cluster_rules_spec(

View File

@ -4193,6 +4193,18 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
azs = self.get_azs_list()
for az in azs:
if az.edge_host_groups and az.edge_ha:
if len(az.edge_host_groups) < 2:
error = _("edge_host_groups must have at least 2 "
"names")
raise nsx_exc.NsxPluginException(err_msg=error)
if (not az.ha_placement_random and
len(az.edge_host_groups) > 2):
LOG.warning("Availability zone %(az)s has %(count)s "
"hostgroups. only the first 2 will be "
"used until ha_placement_random is "
"enabled",
{'az': az.name,
'count': len(az.edge_host_groups)})
self._vcm.validate_host_groups(az.resource_pool,
az.edge_host_groups)

View File

@ -723,7 +723,6 @@ class EdgeManager(object):
context, edge_id, lrouter['name'], lrouter['id'], dist,
True, availability_zone=availability_zone,
deploy_metadata=deploy_metadata)
try:
self.nsxv_manager.rename_edge(edge_id, name)
except nsxapi_exc.VcnsApiException as e:
@ -2603,22 +2602,26 @@ def update_edge_host_groups(vcns, edge_id, dvs, availability_zone,
for appliance in appliances['appliances']]
if validate:
configured_vms = dvs.get_configured_vms(
availability_zone.resource_pool)
availability_zone.resource_pool,
len(availability_zone.edge_host_groups))
for vm in vms:
if vm in configured_vms:
LOG.info('Edge %s already configured', edge_id)
return
LOG.info('Create DRS groups for %(vms)s on edge %(edge_id)s',
{'vms': vms, 'edge_id': edge_id})
# Ensure random distribution of the VMs
if availability_zone.ha_placement_random:
if len(vms) < len(availability_zone.edge_host_groups):
# add some empty vms to the list, so it will randomize between
# all host groups
vms.extend([None] * (len(availability_zone.edge_host_groups) -
len(vms)))
random.shuffle(vms)
try:
LOG.info('Create DRS groups for '
'%(vms)s on edge %(edge_id)s',
{'vms': vms,
'edge_id': edge_id})
dvs.update_cluster_edge_failover(
availability_zone.resource_pool,
vms, edge_id, availability_zone.edge_host_groups)
vms, availability_zone.edge_host_groups)
except Exception as e:
LOG.error('Unable to create DRS groups for '
'%(vms)s on edge %(edge_id)s. Error: %(e)s',
@ -2632,7 +2635,8 @@ def clean_host_groups(dvs, availability_zone):
LOG.info('Cleaning up host groups for AZ %s',
availability_zone.name)
dvs.cluster_host_group_cleanup(
availability_zone.resource_pool)
availability_zone.resource_pool,
len(availability_zone.edge_host_groups))
except Exception as e:
LOG.error('Unable to cleanup. Error: %s', e)