Add Hyper-V storage QoS support

Hyper-V provides options to specify maximum/minimum IOPS per virtual
disk image.

By leveraging this feature, this blueprint proposes to add support
for setting QoS specs targeting instance local disks as well as
volumes exported through SMB.

Implements: bp hyperv-storage-qos
Change-Id: I4ce44c8b7ab54a0bfc8c7401e675876dbd109e2d
This commit is contained in:
Lucian Petrut
2015-04-28 10:08:31 +03:00
committed by Adelina Tuvenie
parent a08aa6a2c0
commit cb0486cc72
8 changed files with 273 additions and 10 deletions

View File

@@ -339,6 +339,7 @@ class VMOps(object):
serial_ports = self._get_image_serial_port_settings(image_meta)
self._create_vm_com_port_pipes(instance, serial_ports)
self._set_instance_disk_qos_specs(instance)
for vif in network_info:
LOG.debug('Creating nic for instance', instance=instance)
@@ -814,3 +815,27 @@ class VMOps(object):
vif_driver = self._get_vif_driver(vif.get('type'))
vif_driver.unplug(instance, vif)
self._vmutils.destroy_nic(instance.name, vif['id'])
def _set_instance_disk_qos_specs(self, instance):
min_iops, max_iops = self._get_storage_qos_specs(instance)
local_disks = self._get_instance_local_disks(instance.name)
for disk_path in local_disks:
self._vmutils.set_disk_qos_specs(instance.name, disk_path,
min_iops, max_iops)
def _get_instance_local_disks(self, instance_name):
instance_path = self._pathutils.get_instance_dir(instance_name)
instance_disks = self._vmutils.get_vm_storage_paths(instance_name)[0]
local_disks = [disk_path for disk_path in instance_disks
if disk_path.find(instance_path) != -1]
return local_disks
def _get_storage_qos_specs(self, instance):
extra_specs = instance.flavor.get('extra_specs') or {}
storage_qos_specs = {}
for spec, value in extra_specs.iteritems():
if ':' in spec:
scope, key = spec.split(':')
if scope == 'storage_qos':
storage_qos_specs[key] = value
return self._volumeops.parse_disk_qos_specs(storage_qos_specs)

View File

@@ -812,3 +812,7 @@ class VMUtils(object):
def get_vm_power_state(self, vm_enabled_state):
return self._enabled_states_map.get(vm_enabled_state,
constants.HYPERV_VM_STATE_OTHER)
def set_disk_qos_specs(self, vm_name, disk_path, min_iops, max_iops):
LOG.warn(_LW("The root/virtualization WMI namespace does not "
"support QoS. Ignoring QoS specs."))

View File

@@ -404,3 +404,11 @@ class VMUtilsV2(vmutils.VMUtils):
vm = self._lookup_vm_check(vm_name)
vmsettings = self._get_vm_setting_data(vm)
return [note for note in vmsettings.Notes if note]
def set_disk_qos_specs(self, vm_name, disk_path, min_iops, max_iops):
disk_resource = self._get_mounted_disk_resource_from_path(
disk_path, is_physical=False)
disk_resource.IOPSLimit = max_iops
disk_resource.IOPSReservation = min_iops
# VMUtilsV2._modify_virt_resource does not require the vm path.
self._modify_virt_resource(disk_resource, None)

View File

@@ -27,6 +27,7 @@ from nova.virt import driver
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from hyperv.i18n import _, _LE, _LW
from hyperv.nova import utilsfactory
@@ -61,6 +62,10 @@ class VolumeOps(object):
"""Management class for Volume-related tasks
"""
_SUPPORTED_QOS_SPECS = ['total_bytes_sec', 'min_bytes_sec',
'total_iops_sec', 'min_iops_sec']
_IOPS_BASE_SIZE = 8 * units.Ki
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
@@ -99,6 +104,13 @@ class VolumeOps(object):
connection_info=connection_info)
volume_driver.attach_volume(connection_info, instance_name, ebs_root)
qos_specs = connection_info['data'].get('qos_specs') or {}
min_iops, max_iops = self.parse_disk_qos_specs(qos_specs)
if min_iops or max_iops:
volume_driver.set_disk_qos_specs(connection_info,
instance_name,
min_iops, max_iops)
def detach_volume(self, connection_info, instance_name):
volume_driver = self._get_volume_driver(
connection_info=connection_info)
@@ -147,6 +159,42 @@ class VolumeOps(object):
connection_info=connection_info)
volume_driver.initialize_volume_connection(connection_info)
def parse_disk_qos_specs(self, qos_specs):
total_bytes_sec = int(qos_specs.get('total_bytes_sec', 0))
min_bytes_sec = int(qos_specs.get('min_bytes_sec', 0))
total_iops = int(qos_specs.get('total_iops_sec',
self._bytes_per_sec_to_iops(
total_bytes_sec)))
min_iops = int(qos_specs.get('min_iops_sec',
self._bytes_per_sec_to_iops(
min_bytes_sec)))
if total_iops and total_iops < min_iops:
err_msg = (_("Invalid QoS specs: minimum IOPS cannot be greater "
"than maximum IOPS. "
"Requested minimum IOPS: %(min_iops)s "
"Requested maximum IOPS: %(total_iops)s.") %
{'min_iops': min_iops,
'total_iops': total_iops})
raise vmutils.HyperVException(err_msg)
unsupported_specs = [spec for spec in qos_specs if
spec not in self._SUPPORTED_QOS_SPECS]
if unsupported_specs:
LOG.warn(_LW('Ignoring unsupported qos specs: '
'%(unsupported_specs)s. '
'Supported qos specs: %(supported_qos_speces)s'),
{'unsupported_specs': unsupported_specs,
'supported_qos_speces': self._SUPPORTED_QOS_SPECS})
return min_iops, total_iops
def _bytes_per_sec_to_iops(self, no_bytes):
# Hyper-v uses normalized IOPS (8 KB increments)
# as IOPS allocation units.
return (no_bytes + self._IOPS_BASE_SIZE - 1) / self._IOPS_BASE_SIZE
def _group_block_devices_by_type(self, block_device_mapping):
block_devices = collections.defaultdict(list)
for volume in block_device_mapping:
@@ -339,6 +387,11 @@ class ISCSIVolumeDriver(object):
def initialize_volume_connection(self, connection_info):
self.login_storage_target(connection_info)
def set_disk_qos_specs(self, connection_info, instance_name,
min_iops, max_iops):
LOG.warn(_LW("The iSCSI Hyper-V volume driver does not support QoS. "
"Ignoring QoS specs."))
class SMBFSVolumeDriver(object):
def __init__(self):
@@ -429,3 +482,9 @@ class SMBFSVolumeDriver(object):
def initialize_volume_connection(self, connection_info):
self.ensure_share_mounted(connection_info)
def set_disk_qos_specs(self, connection_info, instance_name,
min_iops, max_iops):
disk_path = self._get_disk_path(connection_info)
self._vmutils.set_disk_qos_specs(instance_name, disk_path,
min_iops, max_iops)

View File

@@ -51,6 +51,7 @@ from hyperv.nova import pathutils
from hyperv.nova import rdpconsoleutils
from hyperv.nova import serialconsoleops
from hyperv.nova import vhdutils
from hyperv.nova import vmops
from hyperv.nova import vmutils
from hyperv.nova import volumeops
from hyperv.nova import volumeutils
@@ -142,6 +143,8 @@ class HyperVAPIBaseTestCase(test.NoDBTestCase):
self._mox.StubOutWithMock(fake.PathUtils, 'get_instance_dir')
self._mox.StubOutWithMock(fake.PathUtils, 'get_vm_console_log_paths')
self._mox.StubOutWithMock(vmops.VMOps, '_set_instance_disk_qos_specs')
self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
@@ -414,6 +417,8 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
vmutils.VMUtils.set_vm_serial_port_connection(
mox.IsA(str), mox.IsA(int), mox.IsA(str))
vmops.VMOps._set_instance_disk_qos_specs(mox.IgnoreArg())
def _set_vm_name(self, vm_name):
self._test_vm_name = vm_name

View File

@@ -30,6 +30,7 @@ from oslo_utils import units
from hyperv.nova import constants
from hyperv.nova import vmops
from hyperv.nova import vmutils
from hyperv.nova import volumeops
from hyperv.tests import fake_instance
from hyperv.tests.unit import test_base
@@ -392,15 +393,16 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
@mock.patch('hyperv.nova.vif.get_vif_driver')
@mock.patch('hyperv.nova.volumeops.VolumeOps'
'.attach_volumes')
@mock.patch.object(vmops.VMOps, '_set_instance_disk_qos_specs')
@mock.patch.object(volumeops.VolumeOps, 'attach_volumes')
@mock.patch.object(vmops.VMOps, '_attach_drive')
@mock.patch.object(vmops.VMOps, '_get_image_serial_port_settings')
@mock.patch.object(vmops.VMOps, '_create_vm_com_port_pipes')
@mock.patch.object(vmops.VMOps, '_configure_remotefx')
def _test_create_instance(self, mock_configure_remotefx, mock_create_pipes,
mock_get_port_settings, mock_attach_drive,
mock_attach_volumes, mock_get_vif_driver,
mock_attach_volumes, mock_set_qos_specs,
mock_get_vif_driver,
fake_root_path, fake_ephemeral_path,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1, remotefx=False):
@@ -479,6 +481,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_enable = self._vmops._vmutils.enable_vm_metrics_collection
if enable_instance_metrics:
mock_enable.assert_called_once_with(mock_instance.name)
mock_set_qos_specs.assert_called_once_with(mock_instance)
def test_create_instance(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
@@ -1387,3 +1390,59 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
interactive_port: constants.SERIAL_PORT_TYPE_RW
}
self.assertEqual(expected_serial_ports, ret_val)
def test_get_instance_local_disks(self):
fake_instance_dir = 'fake_instance_dir'
fake_local_disks = [os.path.join(fake_instance_dir, disk_name)
for disk_name in ['root.vhd', 'configdrive.iso']]
fake_instance_disks = ['fake_remote_disk'] + fake_local_disks
mock_get_storage_paths = self._vmops._vmutils.get_vm_storage_paths
mock_get_storage_paths.return_value = [fake_instance_disks, []]
mock_get_instance_dir = self._vmops._pathutils.get_instance_dir
mock_get_instance_dir.return_value = fake_instance_dir
ret_val = self._vmops._get_instance_local_disks(
mock.sentinel.instance_name)
self.assertEqual(fake_local_disks, ret_val)
@mock.patch.object(vmops.VMOps, '_get_storage_qos_specs')
@mock.patch.object(vmops.VMOps, '_get_instance_local_disks')
def test_set_instance_disk_qos_specs(self, mock_get_local_disks,
mock_get_qos_specs):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_local_disks = [mock.sentinel.root_vhd_path,
mock.sentinel.eph_vhd_path]
mock_get_local_disks.return_value = mock_local_disks
mock_set_qos_specs = self._vmops._vmutils.set_disk_qos_specs
mock_get_qos_specs.return_value = [mock.sentinel.min_iops,
mock.sentinel.max_iops]
self._vmops._set_instance_disk_qos_specs(mock_instance)
mock_get_local_disks.assert_called_once_with(mock_instance.name)
expected_calls = [mock.call(mock_instance.name, disk_path,
mock.sentinel.min_iops,
mock.sentinel.max_iops)
for disk_path in mock_local_disks]
mock_set_qos_specs.assert_has_calls(expected_calls)
@mock.patch.object(volumeops.VolumeOps, 'parse_disk_qos_specs')
def test_get_storage_qos_specs(self, mock_parse_specs):
fake_extra_specs = {'spec_key': 'spec_value',
'storage_qos:min_bytes_sec':
mock.sentinel.min_bytes_sec,
'storage_qos:max_bytes_sec':
mock.sentinel.max_bytes_sec}
mock_instance = mock.Mock(flavor={'extra_specs': fake_extra_specs})
ret_val = self._vmops._get_storage_qos_specs(mock_instance)
expected_qos_specs_dict = {
'min_bytes_sec': mock.sentinel.min_bytes_sec,
'max_bytes_sec': mock.sentinel.max_bytes_sec
}
self.assertEqual(mock_parse_specs.return_value, ret_val)
mock_parse_specs.assert_called_once_with(expected_qos_specs_dict)

View File

@@ -267,15 +267,20 @@ class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
self.assertEqual(mock.sentinel.FAKE_DVD_PATH1, ret_val[0])
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_vm_setting_data')
def test_get_vm_gen(self, mock_get_vm_setting_data):
def _test_get_vm_gen(self, mock_get_vm_setting_data, vm_gen):
mock_vm = self._lookup_vm()
mock_vm_settings = mock.Mock(
VirtualSystemSubType=self._vmutils._VIRTUAL_SYSTEM_SUBTYPE_GEN2)
mock_get_vm_setting_data.return_value = mock_vm_settings
vm_gen_string = "Microsoft:Hyper-V:SubType:" + str(vm_gen)
mock_vssd = mock.MagicMock(VirtualSystemSubType=vm_gen_string)
mock_get_vm_setting_data.return_value = mock_vssd
vm_gen = self._vmutils.get_vm_gen(mock_vm)
ret = self._vmutils.get_vm_gen(mock_vm)
self.assertEqual(vm_gen, ret)
self.assertEqual(constants.VM_GEN_2, vm_gen)
def test_get_vm_generation_gen1(self):
self._test_get_vm_gen(vm_gen=constants.VM_GEN_1)
def test_get_vm_generation_gen2(self):
self._test_get_vm_gen(vm_gen=constants.VM_GEN_2)
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_resource_setting_data')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
@@ -355,3 +360,22 @@ class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
self._vmutils.enable_remotefx_video_adapter,
mock.sentinel.fake_vm_name, self._FAKE_MONITOR_COUNT,
constants.REMOTEFX_MAX_RES_1024x768)
@mock.patch.object(vmutilsv2.VMUtilsV2,
'_get_mounted_disk_resource_from_path')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_modify_virt_resource')
def test_set_disk_qos_specs(self, mock_modify_virt_resource,
mock_get_disk_resource):
mock_disk = mock_get_disk_resource.return_value
self._vmutils.set_disk_qos_specs(mock.sentinel.vm_name,
mock.sentinel.disk_path,
mock.sentinel.min_iops,
mock.sentinel.max_iops)
mock_get_disk_resource.assert_called_once_with(
mock.sentinel.disk_path, is_physical=False)
self.assertEqual(mock.sentinel.max_iops, mock_disk.IOPSLimit)
self.assertEqual(mock.sentinel.min_iops, mock_disk.IOPSReservation)
mock_modify_virt_resource.assert_called_once_with(mock_disk,
None)

View File

@@ -15,10 +15,11 @@
# under the License.
import contextlib
import mock
import os
import mock
from nova import exception
from oslo_utils import units
from hyperv.nova import pathutils
from hyperv.nova import vmutils
@@ -69,6 +70,33 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
mock.sentinel.instance_name,
fake_vol_conn_info, 0)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
@mock.patch.object(volumeops.VolumeOps, 'parse_disk_qos_specs')
def test_attach_volume(self, mock_parse_qos_specs,
mock_get_volume_driver):
fake_conn_info = {
'data': {'qos_specs': mock.sentinel.qos_specs}
}
mock_volume_driver = mock_get_volume_driver.return_value
mock_parse_qos_specs.return_value = [
mock.sentinel.min_iops,
mock.sentinel.max_iops
]
self._volumeops.attach_volume(fake_conn_info,
mock.sentinel.instance_name,
mock.sentinel.ebs_root)
mock_volume_driver.attach_volume.assert_called_once_with(
fake_conn_info,
mock.sentinel.instance_name,
mock.sentinel.ebs_root)
mock_parse_qos_specs.assert_called_once_with(mock.sentinel.qos_specs)
mock_volume_driver.set_disk_qos_specs.assert_called_once_with(
fake_conn_info, mock.sentinel.instance_name,
mock.sentinel.min_iops, mock.sentinel.max_iops)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_disconnect_volumes(self, mock_get_volume_driver):
block_device_info = db_fakes.get_fake_block_device_info(
@@ -82,6 +110,39 @@ class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
fake_volume_driver.disconnect_volumes.assert_called_once_with(
block_device_mapping)
def test_parse_disk_qos_specs_using_iops(self):
fake_qos_specs = {
'total_iops_sec': 10,
'min_iops_sec': 1,
}
ret_val = self._volumeops.parse_disk_qos_specs(fake_qos_specs)
expected_qos_specs = (fake_qos_specs['min_iops_sec'],
fake_qos_specs['total_iops_sec'])
self.assertEqual(expected_qos_specs, ret_val)
def test_parse_disk_qos_specs_using_bytes_per_sec(self):
fake_qos_specs = {
'total_bytes_sec': units.Ki * 15,
'min_bytes_sec': 0,
}
ret_val = self._volumeops.parse_disk_qos_specs(fake_qos_specs)
expected_qos_specs = (0, 2) # Normalized IOPS
self.assertEqual(expected_qos_specs, ret_val)
def test_parse_disk_qos_specs_exception(self):
fake_qos_specs = {
'total_iops_sec': 1,
'min_iops_sec': 2
}
self.assertRaises(vmutils.HyperVException,
self._volumeops.parse_disk_qos_specs,
fake_qos_specs)
class ISCSIVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for Hyper-V ISCSIVolumeDriver class."""
@@ -305,3 +366,21 @@ class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase):
self._volume_driver.disconnect_volumes(block_device_mapping)
mock_unmount_smb_share.assert_called_once_with(
self._FAKE_SHARE_NORMALIZED)
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path')
@mock.patch.object(vmutils.VMUtils, 'set_disk_qos_specs')
def test_set_disk_qos_specs(self, mock_set_qos_specs,
mock_get_disk_path):
self._volume_driver.set_disk_qos_specs(mock.sentinel.connection_info,
mock.sentinel.instance_name,
mock.sentinel.min_iops,
mock.sentinel.max_iops)
mock_disk_path = mock_get_disk_path.return_value
mock_get_disk_path.assert_called_once_with(
mock.sentinel.connection_info)
mock_set_qos_specs.assert_called_once_with(
mock.sentinel.instance_name,
mock_disk_path,
mock.sentinel.min_iops,
mock.sentinel.max_iops)