Fixes host stats for VMWareVCDriver

Host stats for VCDriver should collect aggregate cluster stats
rather than that of a single host in the cluster.

Fixes: bug #1190515
Change-Id: I37e46995c5da2e3052e8178098afee7c8061bb3c
This commit is contained in:
Sabari Kumar Murugesan 2013-06-13 01:30:47 -07:00
parent 9e4caae11d
commit 92983257bb
5 changed files with 91 additions and 50 deletions

View File

@ -31,6 +31,7 @@ from nova.compute import task_states
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
import nova.tests.image.fake
from nova.tests import matchers
@ -114,6 +115,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.flags(host_ip='test_url',
host_username='test_username',
host_password='test_pass',
cluster_name='test_cluster',
use_linked_clone=False, group='vmware')
self.flags(vnc_enabled=False)
self.user_id = 'fake'
@ -904,14 +906,19 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
def test_get_available_resource(self):
stats = self.conn.get_available_resource(self.node_name)
self.assertEquals(stats['vcpus'], 16)
cpu_info = {"model": ["Intel(R) Xeon(R)", "Intel(R) Xeon(R)"],
"vendor": ["Intel", "Intel"],
"topology": {"cores": 16,
"threads": 32}}
self.assertEquals(stats['vcpus'], 32)
self.assertEquals(stats['local_gb'], 1024)
self.assertEquals(stats['local_gb_used'], 1024 - 500)
self.assertEquals(stats['memory_mb'], 1024)
self.assertEquals(stats['memory_mb_used'], 1024 - 524)
self.assertEquals(stats['hypervisor_type'], 'VMware ESXi')
self.assertEquals(stats['hypervisor_version'], '5.0.0')
self.assertEquals(stats['memory_mb'], 1000)
self.assertEquals(stats['memory_mb_used'], 500)
self.assertEquals(stats['hypervisor_type'], 'VMware vCenter Server')
self.assertEquals(stats['hypervisor_version'], '5.1.0')
self.assertEquals(stats['hypervisor_hostname'], self.node_name)
self.assertEquals(stats['cpu_info'], jsonutils.dumps(cpu_info))
self.assertEquals(stats['supported_instances'],
'[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]')

View File

@ -60,6 +60,7 @@ def reset():
create_network()
create_host_network_system()
create_host()
create_host()
create_datacenter()
create_datastore()
create_res_pool()
@ -351,10 +352,9 @@ class Network(ManagedObject):
class ResourcePool(ManagedObject):
"""Resource Pool class."""
def __init__(self, name="test-rpool", value="resgroup-test"):
def __init__(self, name="test_ResPool", value="resgroup-test"):
super(ResourcePool, self).__init__("rp")
self.set("name", name)
self.set("name", "test_ResPool")
summary = DataObject()
runtime = DataObject()
config = DataObject()
@ -377,6 +377,7 @@ class ResourcePool(ManagedObject):
config.memoryAllocation = memoryAllocation
config.cpuAllocation = cpuAllocation
self.set("summary", summary)
self.set("summary.runtime.memory", memory)
self.set("config", config)
parent = ManagedObjectReference(value=value,
name=name)
@ -414,17 +415,12 @@ class ClusterComputeResource(ManagedObject):
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
summary.effectiveCpu = 10000
self.set("summary", summary)
self.set("summary.effectiveCpu", 10000)
def _add_resource_pool(self, r_pool):
def _add_root_resource_pool(self, r_pool):
if r_pool:
r_pools = self.get("resourcePool")
if r_pools is None:
r_pools = DataObject()
r_pools.ManagedObjectReference = []
self.set("resourcePool", r_pools)
r_pools.ManagedObjectReference.append(r_pool)
self.set("resourcePool", r_pool)
def _add_host(self, host_sys):
if host_sys:
@ -544,6 +540,7 @@ class HostSystem(ManagedObject):
self.set("capability.maxHostSupportedVcpus", 600)
self.set("summary.runtime.inMaintenanceMode", False)
self.set("runtime.connectionState", "connected")
self.set("summary.hardware", hardware)
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
@ -672,8 +669,9 @@ def create_network():
def create_cluster(name):
cluster = ClusterComputeResource(name=name)
cluster._add_host(_get_object_refs("HostSystem")[0])
cluster._add_host(_get_object_refs("HostSystem")[1])
cluster._add_datastore(_get_object_refs("Datastore")[0])
cluster._add_resource_pool(_get_object_refs("ResourcePool")[0])
cluster._add_root_resource_pool(_get_object_refs("ResourcePool")[0])
_create_object('ClusterComputeResource', cluster)

View File

@ -163,44 +163,30 @@ class VCState(object):
return self._stats
def update_status(self):
"""Update the current state of the host.
"""
host_mor = vm_util.get_host_ref(self._session, self._cluster)
if host_mor is None:
return
summary = self._session._call_method(vim_util,
"get_dynamic_property",
host_mor,
"HostSystem",
"summary")
if summary is None:
return
"""Update the current state of the cluster."""
# Get the datastore in the cluster
try:
ds = vm_util.get_datastore_ref_and_name(self._session,
self._cluster)
except exception.DatastoreNotFound:
ds = (None, None, 0, 0)
# Get cpu, memory stats from the cluster
stats = vm_util.get_stats_from_cluster(self._session, self._cluster)
about_info = self._session._call_method(vim_util, "get_about_info")
data = {}
data["vcpus"] = summary.hardware.numCpuThreads
data["cpu_info"] =\
{"vendor": summary.hardware.vendor,
"model": summary.hardware.cpuModel,
"topology": {"cores": summary.hardware.numCpuCores,
"sockets": summary.hardware.numCpuPkgs,
"threads": summary.hardware.numCpuThreads}
}
data["vcpus"] = stats['cpu']['vcpus']
data["cpu_info"] = {"vendor": stats['cpu']['vendor'],
"model": stats['cpu']['model'],
"topology": {"cores": stats['cpu']['cores'],
"threads": stats['cpu']['vcpus']}}
data["disk_total"] = ds[2] / (1024 * 1024 * 1024)
data["disk_available"] = ds[3] / (1024 * 1024 * 1024)
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = summary.hardware.memorySize / (1024 * 1024)
data["host_memory_free"] = data["host_memory_total"] -\
summary.quickStats.overallMemoryUsage
data["hypervisor_type"] = summary.config.product.name
data["hypervisor_version"] = summary.config.product.version
data["host_memory_total"] = stats['mem']['total']
data["host_memory_free"] = stats['mem']['free']
data["hypervisor_type"] = about_info.name
data["hypervisor_version"] = about_info.version
data["hypervisor_hostname"] = self._host_name
data["supported_instances"] = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]

View File

@ -167,15 +167,22 @@ def get_object_properties(vim, collector, mobj, type, properties):
def get_dynamic_property(vim, mobj, type, property_name):
"""Gets a particular property of the Managed Object."""
obj_content = get_object_properties(vim, None, mobj, type, [property_name])
property_dict = get_dynamic_properties(vim, mobj, type, [property_name])
return property_dict.get(property_name)
def get_dynamic_properties(vim, mobj, type, property_names):
"""Gets the specified properties of the Managed Object."""
obj_content = get_object_properties(vim, None, mobj, type, property_names)
if hasattr(obj_content, 'token'):
vim.CancelRetrievePropertiesEx(token=obj_content.token)
property_value = None
property_dict = {}
if obj_content.objects:
dynamic_property = obj_content.objects[0].propSet
if dynamic_property:
property_value = dynamic_property[0].val
return property_value
dynamic_properties = obj_content.objects[0].propSet
if dynamic_properties:
for prop in dynamic_properties:
property_dict[prop.name] = prop.val
return property_dict
def get_objects(vim, type, properties_to_collect=None, all=False):
@ -260,3 +267,8 @@ def get_properties_for_a_collection_of_objects(vim, type,
return vim.RetrievePropertiesEx(
vim.get_service_content().propertyCollector,
specSet=[prop_filter_spec], options=options)
def get_about_info(vim):
"""Get the About Info from the service content."""
return vim.get_service_content().about

View File

@ -760,6 +760,44 @@ def get_vm_state_from_name(session, vm_name):
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []}
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors, ["summary.hardware"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
cpu_info['vcpus'] += hardware_summary.numCpuThreads
cpu_info['cores'] += hardware_summary.numCpuCores
cpu_info['vendor'].append(hardware_summary.vendor)
cpu_info['model'].append(hardware_summary.cpuModel)
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / (1024 * 1024))
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / (1024 * 1024))
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats
def get_cluster_ref_from_name(session, cluster_name):
"""Get reference to the cluster with the name specified."""
cls = session._call_method(vim_util, "get_objects",