NSX|V: Fix host groups for DRS HA for AZ

For the fire cell anti affinity to work as designed, there is a need to use different
groups & rules per host group, since those hostgroups can be different for differnet
availability zones

Change-Id: I092f5c228489a3a0d73f060380f1a1a6c526fb00
(cherry picked from commit cda47aa304)
This commit is contained in:
Adit Sarfaty 2018-08-16 13:08:39 +03:00
parent b8f1a793a5
commit 9eceea4d3d
3 changed files with 29 additions and 19 deletions

View File

@ -673,7 +673,14 @@ class ClusterManager(VCManagerBase):
rules_spec.info = rules_info
return rules_spec
def get_configured_vms(self, resource_id, n_host_groups=2):
def _group_name(self, index, host_group_names):
return 'neutron-group-%s-%s' % (index, host_group_names[index - 1])
def _rule_name(self, index, host_group_names):
return 'neutron-rule-%s-%s' % (index, host_group_names[index - 1])
def get_configured_vms(self, resource_id, host_group_names):
n_host_groups = len(host_group_names)
session = self._session
resource = vim_util.get_moref(resource_id, 'ResourcePool')
# TODO(garyk): cache the cluster details
@ -691,7 +698,7 @@ class ClusterManager(VCManagerBase):
if hasattr(cluster_config, 'group'):
groups = cluster_config.group
for group in groups:
if 'neutron-group-%s' % entry_id == group.name:
if self._group_name(entry_id, host_group_names) == group.name:
vm_group = group
break
if vm_group and hasattr(vm_group, 'vm'):
@ -730,25 +737,25 @@ class ClusterManager(VCManagerBase):
if hasattr(cluster_config, 'group'):
groups = cluster_config.group
for group in groups:
if 'neutron-group-%s' % index == group.name:
if self._group_name(index, host_group_names) == group.name:
vmGroup = group
break
# Create/update the VM group
groupSpec = self._create_vm_group_spec(
client_factory,
'neutron-group-%s' % index,
self._group_name(index, host_group_names),
[vm], vmGroup)
config_spec.groupSpec.append(groupSpec)
config_rule = None
# Create the config rule if it does not exist
for rule in rules:
if 'neutron-rule-%s' % index == rule.name:
if self._rule_name(index, host_group_names) == rule.name:
config_rule = rule
break
if config_rule is None and index <= num_host_groups:
ruleSpec = self._create_cluster_rules_spec(
client_factory, 'neutron-rule-%s' % index,
'neutron-group-%s' % index,
client_factory, self._rule_name(index, host_group_names),
self._group_name(index, host_group_names),
host_group_names[index - 1])
config_spec.rulesSpec.append(ruleSpec)
self._reconfigure_cluster(session, cluster, config_spec)
@ -787,13 +794,13 @@ class ClusterManager(VCManagerBase):
entry_id = index + 1
vmGroup = None
for group in groups:
if 'neutron-group-%s' % entry_id == group.name:
if self._group_name(entry_id, host_group_names) == group.name:
vmGroup = group
break
if vmGroup is None:
groupSpec = self._create_vm_group_spec(
client_factory,
'neutron-group-%s' % entry_id,
self._group_name(entry_id, host_group_names),
[], vmGroup)
config_spec.groupSpec.append(groupSpec)
update_cluster = True
@ -801,13 +808,14 @@ class ClusterManager(VCManagerBase):
config_rule = None
# Create the config rule if it does not exist
for rule in rules:
if 'neutron-rule-%s' % entry_id == rule.name:
if self._rule_name(entry_id, host_group_names) == rule.name:
config_rule = rule
break
if config_rule is None and index < num_host_groups:
ruleSpec = self._create_cluster_rules_spec(
client_factory, 'neutron-rule-%s' % entry_id,
'neutron-group-%s' % entry_id,
client_factory, self._rule_name(entry_id,
host_group_names),
self._group_name(entry_id, host_group_names),
host_group_names[index - 1])
config_spec.rulesSpec.append(ruleSpec)
update_cluster = True
@ -838,7 +846,8 @@ class ClusterManager(VCManagerBase):
rules_spec.info = rules_info
return rules_spec
def cluster_host_group_cleanup(self, resource_id, n_host_groups=2):
def cluster_host_group_cleanup(self, resource_id, host_group_names):
n_host_groups = len(host_group_names)
session = self._session
resource = vim_util.get_moref(resource_id, 'ResourcePool')
# TODO(garyk): cache the cluster details
@ -862,12 +871,12 @@ class ClusterManager(VCManagerBase):
for index in range(n_host_groups):
entry_id = index + 1
for group in groups:
if 'neutron-group-%s' % entry_id == group.name:
if self._group_name(entry_id, host_group_names) == group.name:
groupSpec.append(self._delete_vm_group_spec(
client_factory, group.name))
# Delete the config rule if it exists
for rule in rules:
if 'neutron-rule-%s' % entry_id == rule.name:
if self._rule_name(entry_id, host_group_names) == rule.name:
ruleSpec.append(self._delete_cluster_rules_spec(
client_factory, rule))

View File

@ -2523,7 +2523,7 @@ def update_edge_host_groups(vcns, edge_id, dvs, availability_zone,
if validate:
configured_vms = dvs.get_configured_vms(
availability_zone.resource_pool,
len(availability_zone.edge_host_groups))
availability_zone.edge_host_groups)
for vm in vms:
if vm in configured_vms:
LOG.info('Edge %s already configured', edge_id)
@ -2556,7 +2556,7 @@ def clean_host_groups(dvs, availability_zone):
availability_zone.name)
dvs.cluster_host_group_cleanup(
availability_zone.resource_pool,
len(availability_zone.edge_host_groups))
availability_zone.edge_host_groups)
except Exception as e:
LOG.error('Unable to cleanup. Error: %s', e)

View File

@ -508,8 +508,9 @@ def _update_host_group_for_edge(nsxv, cluster_mng, edge_id, edge):
cluster_mng, az,
validate=True)
else:
LOG.error("%s does not have HA enabled or no host "
"groups defined. Skipping %s.", az_name, edge_id)
LOG.error("Availability zone:%s does not have HA enabled or "
"no host groups defined. Skipping %s.",
az_name, edge_id)
except Exception as e:
LOG.error("Failed to update edge %(id)s - %(e)s",
{'id': edge['id'],