Merge "Implement Host Statistics for PowerVM"

This commit is contained in:
Jenkins
2015-07-12 12:43:49 +00:00
committed by Gerrit Code Review
6 changed files with 975 additions and 15 deletions

View File

@@ -0,0 +1,285 @@
####################################################
# This file was manually generated.
#
####################################################
INFO{
{'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/pcm/ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/RawMetrics/LongTermMonitor/LTM_8247-22L*2125D4A_phyp_20150527T074430+0000.json'}
END OF SECTION}
HEADERS{
{'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'}
END OF SECTION}
BODY{
{
"systemUtil": {
"utilInfo": {
"version": "1.3.0",
"metricType": "Raw",
"monitoringType": "LTM",
"mtms": "8247-22L*2125D4A",
"name": "dev-4"
},
"utilSample": {
"timeStamp": "2015-05-27T08:17:45+0000",
"status": 0,
"errorInfo": [],
"timeBasedCycles": 8.0629725893315e+14,
"systemFirmware": {
"utilizedProcCycles": 58599310268,
"assignedMem": 4096
},
"processor": {
"totalProcUnits": 20,
"configurableProcUnits": 20,
"availableProcUnits": 18.9,
"procCyclesPerSecond": 512000000
},
"memory": {
"totalMem": 65536,
"availableMem": 32512,
"configurableMem": 65536
},
"sharedProcessorPool": [
{
"id": 0,
"name": "DefaultPool",
"assignedProcCycles": 1.6125945162342e+16,
"utilizedPoolCycles": 683011326288,
"maxProcUnits": 20,
"borrowedPoolProcUnits": 18
}
],
"lparsUtil": [
{
"id": 6,
"uuid": "2545BCC5-BAE8-4414-AD49-EAFC2DEE2546",
"type": "aixlinux",
"name": "fkh4-99b8fdca-kyleh",
"state": "Not Activated",
"affinityScore": 100,
"memory": {
"logicalMem": 2048,
"backedPhysicalMem": 2048
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 2,
"maxProcUnits": 0.2,
"weight": 64,
"entitledProcCycles": 1000000,
"utilizedCappedProcCycles": 10000,
"utilizedUnCappedProcCycles": 5000,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 5,
"uuid": "3B0237F9-26F1-41C7-BE57-A08C9452AD9D",
"type": "aixlinux",
"name": "fake_npiv",
"state": "Not Activated",
"affinityScore": 100,
"memory": {
"logicalMem": 256,
"backedPhysicalMem": 256
},
"processor": {
"poolId": 0,
"mode": "cap",
"maxVirtualProcessors": 1,
"maxProcUnits": 0.1,
"weight": 0,
"entitledProcCycles": 0,
"utilizedCappedProcCycles": 0,
"utilizedUnCappedProcCycles": 0,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 4,
"uuid": "66A2E886-D05D-42F4-87E0-C3BA02CF7C7E",
"type": "aixlinux",
"name": "kh4-9fdaa1ba-kyleh",
"state": "Not Activated",
"affinityScore": 100,
"memory": {
"logicalMem": 2048,
"backedPhysicalMem": 2048
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 2,
"maxProcUnits": 0.2,
"weight": 64,
"entitledProcCycles": 500000,
"utilizedCappedProcCycles": 10000,
"utilizedUnCappedProcCycles": 5000,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 3,
"uuid": "3B7A3E07-E0B0-4F35-8997-6019D0D1CFC8",
"type": "aixlinux",
"name": "placeholder",
"state": "Not Activated",
"affinityScore": 0,
"memory": {
"logicalMem": 0,
"backedPhysicalMem": 0
},
"processor": {
"mode": "share_idle_procs",
"maxVirtualProcessors": 0,
"maxProcUnits": 0,
"entitledProcCycles": 1000000,
"utilizedCappedProcCycles": 10000,
"utilizedUnCappedProcCycles": 5000,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 2,
"uuid": "42AD4FD4-DC64-4935-9E29-9B7C6F35AFCC",
"type": "aixlinux",
"name": "Ubuntu1410",
"state": "Open Firmware",
"affinityScore": 100,
"memory": {
"logicalMem": 20480,
"backedPhysicalMem": 20480
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 4,
"maxProcUnits": 0.4,
"weight": 128,
"entitledProcCycles": 1665629232513,
"utilizedCappedProcCycles": 254619289721,
"utilizedUnCappedProcCycles": 631419282,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 150866895489,
"totalInstructionsExecutionTime": 183139925064
},
"network": {
"virtualEthernetAdapters": [
{
"vlanId": 2227,
"vswitchId": 0,
"physicalLocation": "U8247.22L.2125D4A-V2-C2",
"isPortVLANID": true,
"receivedPackets": 10,
"sentPackets": 100,
"droppedPackets": 5,
"sentBytes": 100,
"receivedBytes": 10000,
"receivedPhysicalPackets": 0,
"sentPhysicalPackets": 0,
"droppedPhysicalPackets": 0,
"sentPhysicalBytes": 0,
"receivedPhysicalBytes": 0
}
]
},
"storage": {
"genericVirtualAdapters": [
{
"physicalLocation": "U8247.22L.2125D4A-V2-C3",
"viosId": 1,
"viosAdapterSlotId": 1000
}
]
}
}
],
"viosUtil": [
{
"id": 1,
"uuid": "3443DB77-AED1-47ED-9AA5-3DB9C6CF7089",
"name": "IOServer - SN2125D4A",
"state": "Running",
"affinityScore": 100,
"memory": {
"assignedMem": 4096
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 4,
"maxProcUnits": 0.4,
"weight": 255,
"entitledProcCycles": 3503069246332,
"utilizedCappedProcCycles": 324805782979,
"utilizedUnCappedProcCycles": 209847016046,
"idleProcCycles": 250430293020,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 1189474458948,
"totalInstructionsExecutionTime": 510104519750
},
"network": {
"virtualEthernetAdapters": [
{
"vlanId": 2227,
"vswitchId": 0,
"physicalLocation": "U8247.22L.2125D4A-V1-C2",
"isPortVLANID": true,
"receivedPackets": 0,
"sentPackets": 0,
"droppedPackets": 0,
"sentBytes": 0,
"receivedBytes": 0,
"receivedPhysicalPackets": 0,
"sentPhysicalPackets": 0,
"droppedPhysicalPackets": 0,
"sentPhysicalBytes": 0,
"receivedPhysicalBytes": 0
},
{
"vlanId": 123,
"vswitchId": 0,
"physicalLocation": "U8247.22L.2125D4A-V1-C12",
"isPortVLANID": true,
"receivedPackets": 0,
"sentPackets": 0,
"droppedPackets": 0,
"sentBytes": 0,
"receivedBytes": 0,
"receivedPhysicalPackets": 0,
"sentPhysicalPackets": 0,
"droppedPhysicalPackets": 0,
"sentPhysicalBytes": 0,
"receivedPhysicalBytes": 0
}
]
}
}
]
}
}
}
END OF SECTION}

View File

@@ -0,0 +1,312 @@
####################################################
# This file was manually generated.
#
####################################################
INFO{
{'comment': 'Fake...This file was manually generated.', 'status': 200, 'pw': 'abc123', 'reason': 'OK', 'host': '9.1.2.3', 'user': 'hscroot', 'path': 'rest/api/pcm/ManagedSystem/c5d782c7-44e4-3086-ad15-b16fb039d63b/RawMetrics/LongTermMonitor/LTM_8247-22L*2125D4A_phyp_20150527T074430+0000.json'}
END OF SECTION}
HEADERS{
{'content-length': '1878', 'x-powered-by': 'Servlet/3.0', 'last-modified': 'Thu, 30 Apr 2015 03:53:05 GMT', 'etag': '1430365985674', 'date': 'Thu, 30 Apr 2015 03:53:04 GMT', 'content-type': 'application/atom+xml'}
END OF SECTION}
BODY{
{
"systemUtil": {
"utilInfo": {
"version": "1.3.0",
"metricType": "Raw",
"monitoringType": "LTM",
"mtms": "8247-22L*2125D4A",
"name": "dev-4"
},
"utilSample": {
"timeStamp": "2015-05-27T08:17:45+0000",
"status": 0,
"errorInfo": [],
"timeBasedCycles": 8.0629725893315e+14,
"systemFirmware": {
"utilizedProcCycles": 58599310268,
"assignedMem": 4096
},
"processor": {
"totalProcUnits": 20,
"configurableProcUnits": 20,
"availableProcUnits": 18.9,
"procCyclesPerSecond": 512000000
},
"memory": {
"totalMem": 65536,
"availableMem": 32512,
"configurableMem": 65536
},
"sharedProcessorPool": [
{
"id": 0,
"name": "DefaultPool",
"assignedProcCycles": 1.6125945162342e+16,
"utilizedPoolCycles": 683011326288,
"maxProcUnits": 20,
"borrowedPoolProcUnits": 18
}
],
"lparsUtil": [
{
"id": 7,
"uuid": "2545BCC5-BAE8-4414-AD49-EAFC2DEE2546",
"type": "aixlinux",
"name": "asdfasdfadsf",
"state": "Not Activated",
"affinityScore": 100,
"memory": {
"logicalMem": 2048,
"backedPhysicalMem": 2048
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 2,
"maxProcUnits": 0.2,
"weight": 64,
"entitledProcCycles": 23502,
"utilizedCappedProcCycles": 100,
"utilizedUnCappedProcCycles": 100,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 6,
"uuid": "2545BCC5-BAE8-4414-AD49-EAFC2DEE2546",
"type": "aixlinux",
"name": "diff-lpar-6",
"state": "Not Activated",
"affinityScore": 100,
"memory": {
"logicalMem": 2048,
"backedPhysicalMem": 2048
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 2,
"maxProcUnits": 0.2,
"weight": 64,
"entitledProcCycles": 1000000,
"utilizedCappedProcCycles": 10000,
"utilizedUnCappedProcCycles": 5000,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 5,
"uuid": "3B0237F9-26F1-41C7-BE57-A08C9452AD9D",
"type": "aixlinux",
"name": "fake_npiv",
"state": "Not Activated",
"affinityScore": 100,
"memory": {
"logicalMem": 256,
"backedPhysicalMem": 256
},
"processor": {
"poolId": 0,
"mode": "cap",
"maxVirtualProcessors": 1,
"maxProcUnits": 0.1,
"weight": 0,
"entitledProcCycles": 0,
"utilizedCappedProcCycles": 0,
"utilizedUnCappedProcCycles": 0,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 4,
"uuid": "66A2E886-D05D-42F4-87E0-C3BA02CF7C7E",
"type": "aixlinux",
"name": "kh4-9fdaa1ba-kyleh",
"state": "Not Activated",
"affinityScore": 100,
"memory": {
"logicalMem": 2048,
"backedPhysicalMem": 2048
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 2,
"maxProcUnits": 0.2,
"weight": 64,
"entitledProcCycles": 2000000,
"utilizedCappedProcCycles": 50000,
"utilizedUnCappedProcCycles": 10000,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 3,
"uuid": "3B7A3E07-E0B0-4F35-8997-6019D0D1CFC8",
"type": "aixlinux",
"name": "placeholder",
"state": "Not Activated",
"affinityScore": 0,
"memory": {
"logicalMem": 0,
"backedPhysicalMem": 0
},
"processor": {
"mode": "share_idle_procs",
"maxVirtualProcessors": 0,
"maxProcUnits": 0,
"entitledProcCycles": 0,
"entitledProcCycles": 2000000,
"utilizedCappedProcCycles": 50000,
"utilizedUnCappedProcCycles": 10000,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 0,
"totalInstructionsExecutionTime": 0
}
},
{
"id": 2,
"uuid": "42AD4FD4-DC64-4935-9E29-9B7C6F35AFCC",
"type": "aixlinux",
"name": "Ubuntu1410",
"state": "Open Firmware",
"affinityScore": 100,
"memory": {
"logicalMem": 20480,
"backedPhysicalMem": 20480
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 4,
"maxProcUnits": 0.4,
"weight": 128,
"entitledProcCycles": 1765629232513,
"utilizedCappedProcCycles": 264619289721,
"utilizedUnCappedProcCycles": 641419282,
"idleProcCycles": 0,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 160866895489,
"totalInstructionsExecutionTime": 193139925064
},
"network": {
"virtualEthernetAdapters": [
{
"vlanId": 2227,
"vswitchId": 0,
"physicalLocation": "U8247.22L.2125D4A-V2-C2",
"isPortVLANID": true,
"receivedPackets": 10,
"sentPackets": 100,
"droppedPackets": 5,
"sentBytes": 100,
"receivedBytes": 10000,
"receivedPhysicalPackets": 0,
"sentPhysicalPackets": 0,
"droppedPhysicalPackets": 0,
"sentPhysicalBytes": 0,
"receivedPhysicalBytes": 0
}
]
},
"storage": {
"genericVirtualAdapters": [
{
"physicalLocation": "U8247.22L.2125D4A-V2-C3",
"viosId": 1,
"viosAdapterSlotId": 1000
}
]
}
}
],
"viosUtil": [
{
"id": 1,
"uuid": "3443DB77-AED1-47ED-9AA5-3DB9C6CF7089",
"name": "IOServer - SN2125D4A",
"state": "Running",
"affinityScore": 100,
"memory": {
"assignedMem": 4096
},
"processor": {
"poolId": 0,
"mode": "uncap",
"maxVirtualProcessors": 4,
"maxProcUnits": 0.4,
"weight": 255,
"entitledProcCycles": 3603069246332,
"utilizedCappedProcCycles": 334805782979,
"utilizedUnCappedProcCycles": 219847016046,
"idleProcCycles": 260430293020,
"donatedProcCycles": 0,
"timeSpentWaitingForDispatch": 0,
"totalInstructions": 1289474458948,
"totalInstructionsExecutionTime": 520104519750
},
"network": {
"virtualEthernetAdapters": [
{
"vlanId": 2227,
"vswitchId": 0,
"physicalLocation": "U8247.22L.2125D4A-V1-C2",
"isPortVLANID": true,
"receivedPackets": 0,
"sentPackets": 0,
"droppedPackets": 0,
"sentBytes": 0,
"receivedBytes": 0,
"receivedPhysicalPackets": 0,
"sentPhysicalPackets": 0,
"droppedPhysicalPackets": 0,
"sentPhysicalBytes": 0,
"receivedPhysicalBytes": 0
},
{
"vlanId": 123,
"vswitchId": 0,
"physicalLocation": "U8247.22L.2125D4A-V1-C12",
"isPortVLANID": true,
"receivedPackets": 0,
"sentPackets": 0,
"droppedPackets": 0,
"sentBytes": 0,
"receivedBytes": 0,
"receivedPhysicalPackets": 0,
"sentPhysicalPackets": 0,
"droppedPhysicalPackets": 0,
"sentPhysicalBytes": 0,
"receivedPhysicalBytes": 0
}
]
}
}
]
}
}
}
END OF SECTION}

View File

@@ -31,9 +31,6 @@ MS_HTTPRESP_FILE = "managedsystem.txt"
class PyPowerVM(fixtures.Fixture):
"""Patch out PyPowerVM Session and Adapter."""
def __init__(self):
pass
def setUp(self):
super(PyPowerVM, self).setUp()
self._sess_patcher = mock.patch('pypowervm.adapter.Session')
@@ -48,9 +45,6 @@ class PyPowerVM(fixtures.Fixture):
class ImageAPI(fixtures.Fixture):
"""Mock out the Glance API."""
def __init__(self):
pass
def setUp(self):
super(ImageAPI, self).setUp()
self._img_api_patcher = mock.patch('nova.image.API')
@@ -62,9 +56,6 @@ class ImageAPI(fixtures.Fixture):
class DiskAdapter(fixtures.Fixture):
"""Mock out the DiskAdapter."""
def __init__(self):
pass
def setUp(self):
super(DiskAdapter, self).setUp()
self._std_disk_adpt = mock.patch('nova_powervm.virt.powervm.disk.'
@@ -73,12 +64,20 @@ class DiskAdapter(fixtures.Fixture):
self.addCleanup(self._std_disk_adpt.stop)
class HostCPUStats(fixtures.Fixture):
"""Mock out the HostCPUStats."""
def setUp(self):
super(HostCPUStats, self).setUp()
self._host_cpu_stats = mock.patch('nova_powervm.virt.powervm.host.'
'HostCPUStats')
self.host_cpu_stats = self._host_cpu_stats.start()
self.addCleanup(self._host_cpu_stats.stop)
class VolumeAdapter(fixtures.Fixture):
"""Mock out the VolumeAdapter."""
def __init__(self):
pass
def setUp(self):
super(VolumeAdapter, self).setUp()
self._std_vol_adpt = mock.patch('nova_powervm.virt.powervm.volume.'
@@ -90,9 +89,6 @@ class VolumeAdapter(fixtures.Fixture):
class PowerVMComputeDriver(fixtures.Fixture):
"""Construct a fake compute driver."""
def __init__(self):
pass
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage')
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver._get_adapter')
@mock.patch('nova_powervm.virt.powervm.mgmt.get_mgmt_partition')
@@ -110,6 +106,9 @@ class PowerVMComputeDriver(fixtures.Fixture):
self.pypvm.setUp()
self.addCleanup(self.pypvm.cleanUp)
# Set up the mock CPU stats (init_host uses it)
self.useFixture(HostCPUStats())
self.drv = driver.PowerVMDriver(fake.FakeVirtAPI())
self.drv.adapter = self.pypvm.apt
self._init_host()

View File

@@ -15,11 +15,16 @@
# under the License.
#
import mock
import logging
from nova import test
import os
from pypowervm.tests.wrappers.util import pvmhttp
import pypowervm.wrappers.managed_system as pvm_ms
from pypowervm.wrappers.pcm import phyp as pvm_phyp
from nova_powervm.tests.virt.powervm import fixtures as fx
from nova_powervm.virt.powervm import host as pvm_host
MS_HTTPRESP_FILE = "managedsystem.txt"
@@ -74,3 +79,153 @@ class TestPowerVMHost(test.TestCase):
else:
value = stats['stats'].get(stat, None)
self.assertIsNotNone(value)
class TestHostCPUStats(test.TestCase):
def setUp(self):
super(TestHostCPUStats, self).setUp()
# Fixture for the adapter
self.pypvm = self.useFixture(fx.PyPowerVM())
self.adpt = self.pypvm.apt
# Test data
dirname = os.path.dirname(__file__)
file_name = os.path.join(dirname, 'data', 'phyp_pcm_data.txt')
self.cur_json_resp = pvmhttp.PVMFile(file_name)
file_name = os.path.join(dirname, 'data', 'phyp_2_pcm_data.txt')
self.prev_json_resp = pvmhttp.PVMFile(file_name)
# Put in the samples.
self.phyp = pvm_phyp.PhypInfo(self.cur_json_resp.body)
self.prev_phyp = pvm_phyp.PhypInfo(self.prev_json_resp.body)
def _get_sample(self, lpar_id, sample):
for lpar in sample.lpars:
if lpar.id == lpar_id:
return lpar
return None
@mock.patch('pypowervm.tasks.monitor.util.MetricCache._refresh_if_needed')
@mock.patch('pypowervm.tasks.monitor.util.ensure_ltm_monitors')
def test_update_internal_metric(self, mock_ensure_ltm, mock_refresh):
host_stats = pvm_host.HostCPUStats(self.adpt, 'host_uuid')
# Make sure None is returned if there is no data.
host_stats.cur_phyp = None
host_stats._update_internal_metric()
self.assertEqual(None, host_stats.cur_data)
# Make the 'prev' the current...for the first pass
host_stats.cur_phyp = self.prev_phyp
host_stats.prev_phyp = None
host_stats._update_internal_metric()
# Validate the dictionary...
expect = {'iowait': 0, 'idle': 1.6125096675799704e+16,
'kernel': 58599310268, 'user': 789903553028}
self.assertEqual(expect, host_stats.cur_data)
# Now 'increment' it with a new current/previous
host_stats.cur_phyp = self.phyp
host_stats.prev_phyp = self.prev_phyp
host_stats._update_internal_metric()
# Validate this dictionary. Note that the values are still higher
# overall, even though we add the 'deltas' from each VM.
expect = {'iowait': 0, 'idle': 1.6125066665694504e+16,
'kernel': 58599310268, 'user': 819913658228}
self.assertEqual(expect, host_stats.cur_data)
@mock.patch('pypowervm.tasks.monitor.util.MetricCache._refresh_if_needed')
@mock.patch('pypowervm.tasks.monitor.util.ensure_ltm_monitors')
def test_gather_user_cycles(self, mock_ensure_ltm, mock_refresh):
host_stats = pvm_host.HostCPUStats(self.adpt, 'host_uuid')
# Test that we can run with previous samples and then without.
host_stats.cur_phyp = self.phyp
host_stats.prev_phyp = self.prev_phyp
resp = host_stats._gather_user_cycles()
self.assertEqual(30010105200, resp)
# Last, test to make sure the previous data is used.
host_stats.prev_data = {'user': 1000000}
resp = host_stats._gather_user_cycles()
self.assertEqual(30011105200, resp)
# Now test if there is no previous sample.
host_stats.prev_phyp = None
resp = host_stats._gather_user_cycles()
self.assertEqual(819914643228, resp)
@mock.patch('pypowervm.tasks.monitor.util.MetricCache._refresh_if_needed')
@mock.patch('pypowervm.tasks.monitor.util.ensure_ltm_monitors')
def test_delta_proc_cycles(self, mock_ensure_ltm, mock_refresh):
host_stats = pvm_host.HostCPUStats(self.adpt, 'host_uuid')
# Test that a previous sample allows us to gather the delta across all
# of the VMs. This should take into account the scenario where a LPAR
# is deleted and a new one takes its place (LPAR ID 6)
delta = host_stats._delta_proc_cycles(self.phyp.sample.lpars,
self.prev_phyp.sample.lpars)
self.assertEqual(10010105200, delta)
# Now test as if there is no previous data. Should result in higher
# numbers.
delta2 = host_stats._delta_proc_cycles(self.phyp.sample.lpars, None)
self.assertEqual(265260844203, delta2)
self.assertTrue(delta2 > delta)
# Test that we can do this with the VIOSes as well.
delta = host_stats._delta_proc_cycles(self.phyp.sample.vioses,
self.prev_phyp.sample.vioses)
self.assertEqual(20000000000, delta)
@mock.patch('pypowervm.tasks.monitor.util.MetricCache._refresh_if_needed')
@mock.patch('pypowervm.tasks.monitor.util.ensure_ltm_monitors')
def test_delta_user_cycles(self, mock_ensure_ltm, mock_refresh):
host_stats = pvm_host.HostCPUStats(self.adpt, 'host_uuid')
# Test that a previous sample allows us to gather just the delta.
new_elem = self._get_sample(4, self.phyp.sample)
old_elem = self._get_sample(4, self.prev_phyp.sample)
delta = host_stats._delta_user_cycles(new_elem, old_elem)
self.assertEqual(45000, delta)
# Validate the scenario where we don't have a previous
delta = host_stats._delta_user_cycles(new_elem, None)
self.assertEqual(60000, delta)
@mock.patch('pypowervm.tasks.monitor.util.MetricCache._refresh_if_needed')
@mock.patch('pypowervm.tasks.monitor.util.ensure_ltm_monitors')
def test_find_prev_sample(self, mock_ensure_ltm, mock_refresh):
host_stats = pvm_host.HostCPUStats(self.adpt, 'host_uuid')
# Sample 6 in the current shouldn't match the previous. It has the
# same LPAR ID, but a different name. This is considered different
new_elem = self._get_sample(6, self.phyp.sample)
prev = host_stats._find_prev_sample(new_elem,
self.prev_phyp.sample.lpars)
self.assertIsNone(prev)
# Lpar 4 should be in the old one. Match that up.
new_elem = self._get_sample(4, self.phyp.sample)
prev = host_stats._find_prev_sample(new_elem,
self.prev_phyp.sample.lpars)
self.assertIsNotNone(prev)
self.assertEqual(500000, prev.processor.entitled_proc_cycles)
# Test that we get None back if there are no previous samples
prev = host_stats._find_prev_sample(new_elem, None)
self.assertIsNone(prev)
@mock.patch('pypowervm.tasks.monitor.util.MetricCache._refresh_if_needed')
@mock.patch('pypowervm.tasks.monitor.util.ensure_ltm_monitors')
def test_get_total_cycles(self, mock_ensure_ltm, mock_refresh):
host_stats = pvm_host.HostCPUStats(self.adpt, 'host_uuid')
host_stats.cur_phyp = self.phyp
# Make sure we get the full system cycles.
max_cycles = host_stats._get_total_cycles()
self.assertEqual(1.6125945178663e+16, max_cycles)

View File

@@ -99,6 +99,10 @@ class PowerVMDriver(driver.ComputeDriver):
# Initialize the volume drivers
self.vol_drvs = _inst_dict(VOLUME_DRIVER_MAPPINGS)
# Init Host CPU Statistics
self.host_cpu_stats = pvm_host.HostCPUStats(self.adapter,
self.host_uuid)
LOG.info(_LI("The compute driver has been initialized."))
def _get_adapter(self):
@@ -166,6 +170,15 @@ class PowerVMDriver(driver.ComputeDriver):
lpar_list = vm.get_lpar_names(self.adapter)
return lpar_list
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
host_stats = self.host_cpu_stats.get_host_cpu_stats()
# TODO(thorst) Implement a frequency check.
host_stats['frequency'] = 3500
return host_stats
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):

View File

@@ -19,9 +19,12 @@ from nova.compute import arch
from nova.compute import hv_type
from nova.compute import vm_mode
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from pypowervm.tasks.monitor import util as pcm_util
LOG = logging.getLogger(__name__)
# Power VM hypervisor info
@@ -73,3 +76,196 @@ def build_host_resource_from_ms(ms_wrapper):
data["stats"] = stats
return data
class HostCPUStats(pcm_util.MetricCache):
"""Transforms the PowerVM CPU metrics into the Nova format.
PowerVM only gathers the CPU statistics once every 30 seconds. It does
this to reduce overhead. There is a function to gather statistics quicker,
but that can be very expensive. Therefore, to ensure that the client's
workload is not impacted, these 'longer term' metrics will be used.
This class builds off of a base pypowervm function where it can obtain
the samples through a PCM 'cache'. If a new sample is available, the cache
pulls the sample. If it is not, the existing sample is used.
This can result in multiple, quickly successive calls to the host stats
returning the same data (because a new sample may not be available yet).
The class analyzes the data and collapses it down to the format needed by
the Nova manager.
"""
def __init__(self, adapter, host_uuid):
"""Creates an instance of the HostCPUStats.
:param adapter: The pypowervm Adapter.
:param host_uuid: The UUID of the host CEC to maintain a metrics
cache for.
"""
# A dictionary to store the number of cycles spent. This is defined
# in the _update_internal_metric method.
self.cur_data, self.prev_data = None, None
# Invoke the parent to seed the metrics. Don't include VIO - will
# result in quicker calls.
super(HostCPUStats, self).__init__(adapter, host_uuid,
include_vio=False)
@lockutils.synchronized('pvm_host_metrics_get')
def get_host_cpu_stats(self):
"""Returns the currently known host CPU stats.
:return: The dictionary (as defined by the compute driver's
get_host_cpu_stats). If insufficient data is available,
then 'None' will be returned.
"""
# Refresh if needed. Will no-op if no refresh is required.
self._refresh_if_needed()
# The invoking code needs the total cycles for this to work properly.
# Return the dictionary format of the cycles as derived by the
# _update_internal_metric method. If there is no data yet, None would
# be the result.
return self.cur_data
def _update_internal_metric(self):
"""Uses the latest stats from the cache, and parses to Nova format.
This method is invoked by the parent class after the raw metrics are
updated.
"""
# If there is no 'new' data (perhaps sampling is not turned on) then
# return no data.
if self.cur_phyp is None:
self.cur_data = None
return
# Move the current data to the previous. The previous data is used
# for some internal calculations. Blank out the current data just
# in case of error. Don't want to persist two copies of same.
self.prev_data, self.cur_data = self.cur_data, None
# Now we need the firmware cycles.
fw_cycles = self.cur_phyp.sample.system_firmware.utilized_proc_cycles
# Compute the max cycles.
tot_cycles = self._get_total_cycles()
# Get the total user cycles.
user_cycles = self._gather_user_cycles()
# Idle is the subtraction of all.
idle_cycles = tot_cycles - user_cycles - fw_cycles
# Now save these cycles to the internal data structure.
self.cur_data = {'idle': idle_cycles, 'kernel': fw_cycles,
'user': user_cycles, 'iowait': 0}
def _gather_user_cycles(self):
"""The estimated total user cycles.
The sample data includes information about how much CPU has been used
by workloads and the Virtual I/O Servers. There is not one global
counter that can be used to obtain the CPU spent cycles.
This method will calculate the delta of workload (and I/O Server)
cycles between the previous sample and the current sample, and then
add it to the previous 'user cycles'.
There are edge cases for this however. If a VM is deleted or migrated
its cycles will no longer be taken into account. The algorithm takes
this into account by building on top of the previous sample's user
cycles.
:return: Estimated cycles spent on workload (including VMs and Virtual
I/O Server). This represents the entire server's current
'user' load.
"""
# Current samples should be guaranteed to be there.
vm_cur_samples = self.cur_phyp.sample.lpars
vios_cur_samples = self.cur_phyp.sample.vioses
# The previous samples may not have been there.
vm_prev_samples, vios_prev_samples = None, None
if self.prev_phyp is not None:
vm_prev_samples = self.prev_phyp.sample.lpars
vios_prev_samples = self.prev_phyp.sample.vioses
# Gather the delta cycles between the previous and current data sets
vm_delta_cycles = self._delta_proc_cycles(vm_cur_samples,
vm_prev_samples)
vios_delta_cycles = self._delta_proc_cycles(vios_cur_samples,
vios_prev_samples)
# The used cycles is the total of used cycles from before along with
# the new delta cycles.
prev_user_cycles = (0 if self.prev_data is None
else self.prev_data['user'])
return prev_user_cycles + vm_delta_cycles + vios_delta_cycles
def _delta_proc_cycles(self, samples, prev_samples):
"""Sums all the processor delta cycles for a set of VM/VIOS samples.
This sum is the difference from the last sample to the current sample.
:param samples: A set of PhypVMSample or PhypViosSample samples.
:param prev_samples: The set of the previous samples. May be None.
:return: The cycles spent on workload across all of the samples.
"""
# Determine the user cycles spent between the last sample and the
# current.
user_cycles = 0
for lpar_sample in samples:
prev_sample = self._find_prev_sample(lpar_sample, prev_samples)
user_cycles += self._delta_user_cycles(lpar_sample, prev_sample)
return user_cycles
@staticmethod
def _delta_user_cycles(cur_sample, prev_sample):
"""Determines the delta of user cycles from the cur and prev sample.
:param cur_sample: The current sample.
:param prev_sample: The previous sample. May be None.
:return: The difference in cycles between the two samples. If the data
only exists in the current sample (indicates a new workload),
then all of the cycles from the current sample will be
considered the delta.
"""
prev_amount = (0 if prev_sample is None else
prev_sample.processor.util_cap_proc_cycles +
prev_sample.processor.util_uncap_proc_cycles)
cur_amount = (cur_sample.processor.util_cap_proc_cycles +
cur_sample.processor.util_uncap_proc_cycles)
return cur_amount - prev_amount
@staticmethod
def _find_prev_sample(sample, prev_samples):
"""Finds the previous VM Sample for a given current sample.
:param sample: The current sample.
:param prev_samples: The previous samples to search through.
:return: The previous sample, if it exists. None otherwise.
"""
# Will occur if there are no previous samples.
if prev_samples is None:
return None
for prev_sample in prev_samples:
if prev_sample.id == sample.id and prev_sample.name == sample.name:
return prev_sample
return None
def _get_total_cycles(self):
"""Returns the 'total cycles' on the system.
:return: The estimated total cycles spent
"""
sample = self.cur_phyp.sample
# Gather the estimated cycle count
total_procs = sample.processor.configurable_proc_units
cycles_per_sec = sample.time_based_cycles
est_total_cycles_per_sec = total_procs * cycles_per_sec
return est_total_cycles_per_sec