Hyper-V: attach volumes via SMB

Currently, the Hyper-V driver supports attaching volumes only via
iSCSI. This patch adds support for attaching volumes using VHD or
VHDX images hosted on SMB shares.

Note that the according Cinder drivers which allow exporting
images through SMB shares have been already merged.

The workflow is simple: using the connection info, the driver
chooses the right volume driver, mounts the according SMB share
using the according credentials. The only thing that needs to be
done next is actually attach the volume to an instance using its
UNC path.

Change-Id: Id5de0e6843c1035235598a4d72a00b7555187fb4
Implements: blueprint hyper-v-smbfs-volume-support
This commit is contained in:
Lucian Petrut 2014-10-09 11:32:27 +03:00
parent 6f5fe6d2d7
commit 0e02db2980
18 changed files with 791 additions and 298 deletions

View File

@ -0,0 +1,33 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
class HyperVBaseTestCase(test.NoDBTestCase):
def setUp(self):
super(HyperVBaseTestCase, self).setUp()
wmi_patcher = mock.patch('__builtin__.wmi', create=True)
platform_patcher = mock.patch('sys.platform', 'win32')
platform_patcher.start()
wmi_patcher.start()
self.addCleanup(wmi_patcher.stop)
self.addCleanup(platform_patcher.stop)

View File

@ -20,14 +20,14 @@ from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import units
from nova import test
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostops
CONF = cfg.CONF
class HostOpsTestCase(test.NoDBTestCase):
class HostOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V HostOps class."""
FAKE_ARCHITECTURE = 0
@ -40,10 +40,10 @@ class HostOpsTestCase(test.NoDBTestCase):
FAKE_TICK_COUNT = 1000000
def setUp(self):
super(HostOpsTestCase, self).setUp()
self._hostops = hostops.HostOps()
self._hostops._hostutils = mock.MagicMock()
self._hostops._pathutils = mock.MagicMock()
super(HostOpsTestCase, self).setUp()
def test_get_cpu_info(self):
mock_processors = mock.MagicMock()

View File

@ -16,7 +16,6 @@
Test suite for the Hyper-V driver and related APIs.
"""
import contextlib
import os
import shutil
import time
@ -170,6 +169,7 @@ class HyperVAPIBaseTestCase(test.NoDBTestCase):
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
self._mox.StubOutWithMock(vmutils.VMUtils,
'get_controller_volume_paths')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_free_controller_slot')
self._mox.StubOutWithMock(vmutils.VMUtils,
'enable_vm_metrics_collection')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id')
@ -449,10 +449,6 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
self._mox.VerifyAll()
def _setup_destroy_mocks(self, destroy_disks=True):
fake_volume_drives = ['fake_volume_drive']
fake_target_iqn = 'fake_target_iqn'
fake_target_lun = 'fake_target_lun'
m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
m.AndReturn(True)
@ -461,17 +457,8 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
self._setup_delete_vm_log_mocks()
m = vmutils.VMUtils.get_vm_storage_paths(func)
m.AndReturn(([], fake_volume_drives))
vmutils.VMUtils.destroy_vm(func)
m = self._conn._volumeops.get_target_from_disk_path(
fake_volume_drives[0])
m.AndReturn((fake_target_iqn, fake_target_lun))
self._mock_logout_storage_target(fake_target_iqn)
if destroy_disks:
m = fake.PathUtils.get_instance_dir(mox.IsA(str),
create_dir=False,
@ -776,8 +763,6 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_controller_path = 'fake_scsi_controller_path'
self._mox.StubOutWithMock(self._conn._volumeops,
'_get_free_controller_slot')
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
@ -797,7 +782,7 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
m.AndReturn(fake_controller_path)
fake_free_slot = 1
m = self._conn._volumeops._get_free_controller_slot(
m = vmutils.VMUtils.get_free_controller_slot(
fake_controller_path)
m.AndReturn(fake_free_slot)
@ -879,10 +864,10 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
self._volume_target_portal, self._volume_id)
mount_point = '/dev/sdc'
def fake_login_storage_target(connection_info):
def fake_login_storage_target(self, connection_info):
raise vmutils.HyperVException('Fake connection exception')
self.stubs.Set(self._conn._volumeops, '_login_storage_target',
self.stubs.Set(volumeops.ISCSIVolumeDriver, 'login_storage_target',
fake_login_storage_target)
self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
None, connection_info, instance_data, mount_point)
@ -1337,101 +1322,3 @@ class HyperVAPITestCase(HyperVAPIBaseTestCase):
self.assertEqual(CONF.my_ip, connect_info.host)
self.assertEqual(fake_port, connect_info.port)
self.assertEqual(fake_vm_id, connect_info.internal_access_path)
class VolumeOpsTestCase(HyperVAPIBaseTestCase):
"""Unit tests for VolumeOps class."""
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self.volumeops = volumeops.VolumeOps()
def test_get_mounted_disk_from_lun(self):
with contextlib.nested(
mock.patch.object(self.volumeops._volutils,
'get_device_number_for_target'),
mock.patch.object(self.volumeops._vmutils,
'get_mounted_disk_by_drive_number')
) as (mock_get_device_number_for_target,
mock_get_mounted_disk_by_drive_number):
mock_get_device_number_for_target.return_value = 0
mock_get_mounted_disk_by_drive_number.return_value = 'disk_path'
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
disk = self.volumeops._get_mounted_disk_from_lun(target_iqn,
target_lun)
self.assertEqual(disk, 'disk_path')
def test_get_mounted_disk_from_lun_failure(self):
self.flags(mounted_disk_query_retry_count=1, group='hyperv')
with mock.patch.object(self.volumeops._volutils,
'get_device_number_for_target') as m_device_num:
m_device_num.side_effect = [None, -1]
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
mapping = driver.block_device_info_get_mapping(block_device_info)
data = mapping[0]['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
for attempt in xrange(1):
self.assertRaises(exception.NotFound,
self.volumeops._get_mounted_disk_from_lun,
target_iqn, target_lun)
def test_get_free_controller_slot_exception(self):
fake_drive = mock.MagicMock()
type(fake_drive).AddressOnParent = mock.PropertyMock(
side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
fake_scsi_controller_path = 'fake_scsi_controller_path'
with mock.patch.object(self.volumeops._vmutils,
'get_attached_disks') as fake_get_attached_disks:
fake_get_attached_disks.return_value = (
[fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
self.assertRaises(vmutils.HyperVException,
self.volumeops._get_free_controller_slot,
fake_scsi_controller_path)
def test_fix_instance_volume_disk_paths(self):
block_device_info = db_fakes.get_fake_block_device_info(
self._volume_target_portal, self._volume_id)
with contextlib.nested(
mock.patch.object(self.volumeops,
'_get_mounted_disk_from_lun'),
mock.patch.object(self.volumeops._vmutils,
'get_vm_scsi_controller'),
mock.patch.object(self.volumeops._vmutils,
'set_disk_host_resource'),
mock.patch.object(self.volumeops,
'ebs_root_in_block_devices')
) as (mock_get_mounted_disk_from_lun,
mock_get_vm_scsi_controller,
mock_set_disk_host_resource,
mock_ebs_in_block_devices):
mock_ebs_in_block_devices.return_value = False
mock_get_mounted_disk_from_lun.return_value = "fake_mounted_path"
mock_set_disk_host_resource.return_value = "fake_controller_path"
self.volumeops.fix_instance_volume_disk_paths(
"test_vm_name",
block_device_info)
mock_get_mounted_disk_from_lun.assert_called_with(
'iqn.2010-10.org.openstack:volume-' + self._volume_id, 1, True)
mock_get_vm_scsi_controller.assert_called_with("test_vm_name")
mock_set_disk_host_resource("test_vm_name", "fake_controller_path",
0, "fake_mounted_path")

View File

@ -16,40 +16,31 @@
import mock
from oslo.config import cfg
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
class LiveMigrationOpsTestCase(test.NoDBTestCase):
class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V LiveMigrationOps class."""
def setUp(self):
hostutils_patcher = mock.patch('nova.virt.hyperv.utilsfactory'
'.get_hostutils')
hostutils_patcher.start()
self.addCleanup(hostutils_patcher.stop)
super(LiveMigrationOpsTestCase, self).setUp()
self.context = 'fake_context'
self._livemigrops = livemigrationops.LiveMigrationOps()
self._livemigrops._livemigrutils = mock.MagicMock()
super(LiveMigrationOpsTestCase, self).setUp()
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.logout_storage_target')
@mock.patch('nova.virt.hyperv.vmops.VMOps.copy_vm_console_logs')
def _test_live_migration(self, mock_copy_logs, mock_logout_storage_target,
side_effect):
def _test_live_migration(self, mock_copy_logs, side_effect):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_post = mock.MagicMock()
mock_recover = mock.MagicMock()
fake_dest = mock.sentinel.DESTINATION
self._livemigrops._livemigrutils.live_migrate_vm.return_value = {
mock.sentinel.IQN: mock.sentinel.LUN_COUNT}
mock_logout_storage_target.side_effect = [side_effect]
self._livemigrops._livemigrutils.live_migrate_vm.side_effect = [
side_effect]
if side_effect is vmutils.HyperVException:
self.assertRaises(vmutils.HyperVException,
self._livemigrops.live_migration,
@ -69,8 +60,6 @@ class LiveMigrationOpsTestCase(test.NoDBTestCase):
mock_live_migr = self._livemigrops._livemigrutils.live_migrate_vm
mock_live_migr.assert_called_once_with(mock_instance.name,
fake_dest)
mock_logout_storage_target.assert_called_once_with(
mock.sentinel.IQN, mock.sentinel.LUN_COUNT)
mock_post.assert_called_once_with(self.context, mock_instance,
fake_dest, False)
@ -93,8 +82,8 @@ class LiveMigrationOpsTestCase(test.NoDBTestCase):
'.ebs_root_in_block_devices')
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.login_storage_targets')
def test_pre_live_migration(self, mock_login_storage_targets,
'.initialize_volumes_connection')
def test_pre_live_migration(self, mock_initialize_connection,
mock_get_cached_image,
mock_ebs_root_in_block_devices):
mock_instance = fake_instance.fake_instance_obj(self.context)
@ -112,9 +101,17 @@ class LiveMigrationOpsTestCase(test.NoDBTestCase):
mock.sentinel.BLOCK_INFO)
mock_get_cached_image.assert_called_once_with(self.context,
mock_instance)
mock_login_storage_targets.assert_called_once_with(
mock_initialize_connection.assert_called_once_with(
mock.sentinel.BLOCK_INFO)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes')
def test_post_live_migration(self, mock_disconnect_volumes):
self._livemigrops.post_live_migration(
self.context, mock.sentinel.instance,
mock.sentinel.block_device_info)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.block_device_info)
@mock.patch('nova.virt.hyperv.vmops.VMOps.log_vm_serial_output')
def test_post_live_migration_at_destination(self, mock_log_vm):
mock_instance = fake_instance.fake_instance_obj(self.context)

View File

@ -14,13 +14,13 @@
import mock
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import vmutils
class MigrationOpsTestCase(test.NoDBTestCase):
class MigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V MigrationOps class."""
_FAKE_TIMEOUT = 10
@ -30,23 +30,17 @@ class MigrationOpsTestCase(test.NoDBTestCase):
super(MigrationOpsTestCase, self).setUp()
self.context = 'fake-context'
# utilsfactory will check the host OS version via get_hostutils,
# in order to return the proper Utils Class, so it must be mocked.
patched_func = mock.patch.object(migrationops.utilsfactory,
"get_hostutils")
patched_func.start()
self.addCleanup(patched_func.stop)
self._migrationops = migrationops.MigrationOps()
self._migrationops._vmops = mock.MagicMock()
self._migrationops._vmutils = mock.MagicMock()
self._migrationops._pathutils = mock.Mock()
def test_check_and_attach_config_drive_unknown_path(self):
instance = fake_instance.fake_instance_obj(self.context,
expected_attrs=['system_metadata'])
instance.config_drive = 'True'
self._migrationops._pathutils.lookup_configdrive_path = mock.MagicMock(
return_value=None)
self._migrationops._pathutils.lookup_configdrive_path.return_value = (
None)
self.assertRaises(vmutils.HyperVException,
self._migrationops._check_and_attach_config_drive,
instance)

View File

@ -16,19 +16,20 @@ import os
import mock
from nova import test
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import pathutils
class PathUtilsTestCase(test.NoDBTestCase):
class PathUtilsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V PathUtils class."""
def setUp(self):
super(PathUtilsTestCase, self).setUp()
self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir')
self.fake_instance_name = 'fake_instance_name'
self._pathutils = pathutils.PathUtils()
super(PathUtilsTestCase, self).setUp()
def _mock_lookup_configdrive_path(self, ext):
self._pathutils.get_instance_dir = mock.MagicMock(
@ -56,3 +57,62 @@ class PathUtilsTestCase(test.NoDBTestCase):
configdrive_path = self._pathutils.lookup_configdrive_path(
self.fake_instance_name)
self.assertIsNone(configdrive_path)
@mock.patch.object(pathutils.PathUtils, 'unmount_smb_share')
@mock.patch('os.path.exists')
def _test_check_smb_mapping(self, mock_exists, mock_unmount_smb_share,
existing_mappings=True, share_available=False):
mock_exists.return_value = share_available
fake_mappings = (
[mock.sentinel.smb_mapping] if existing_mappings else [])
self._pathutils._smb_conn.Msft_SmbMapping.return_value = (
fake_mappings)
ret_val = self._pathutils.check_smb_mapping(
mock.sentinel.share_path)
self.assertEqual(existing_mappings and share_available, ret_val)
if existing_mappings and not share_available:
mock_unmount_smb_share.assert_called_once_with(
mock.sentinel.share_path, force=True)
def test_check_mapping(self):
self._test_check_smb_mapping()
def test_remake_unavailable_mapping(self):
self._test_check_smb_mapping(existing_mappings=True,
share_available=False)
def test_available_mapping(self):
self._test_check_smb_mapping(existing_mappings=True,
share_available=True)
def test_mount_smb_share(self):
fake_create = self._pathutils._smb_conn.Msft_SmbMapping.Create
self._pathutils.mount_smb_share(mock.sentinel.share_path,
mock.sentinel.username,
mock.sentinel.password)
fake_create.assert_called_once_with(
RemotePath=mock.sentinel.share_path,
UserName=mock.sentinel.username,
Password=mock.sentinel.password)
def _test_unmount_smb_share(self, force=False):
fake_mapping = mock.Mock()
smb_mapping_class = self._pathutils._smb_conn.Msft_SmbMapping
smb_mapping_class.return_value = [fake_mapping]
self._pathutils.unmount_smb_share(mock.sentinel.share_path,
force)
smb_mapping_class.assert_called_once_with(
RemotePath=mock.sentinel.share_path)
fake_mapping.Remove.assert_called_once_with(Force=force)
def test_soft_unmount_smb_share(self):
self._test_unmount_smb_share()
def test_force_unmount_smb_share(self):
self._test_unmount_smb_share(force=True)

View File

@ -18,28 +18,22 @@ import os
import mock
from nova.compute import task_states
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import snapshotops
class SnapshotOpsTestCase(test.NoDBTestCase):
class SnapshotOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V SnapshotOps class."""
def setUp(self):
# utilsfactory will check the host OS version via get_hostutils,
# in order to return the proper Utils Class, so it must be mocked.
patched_func = mock.patch.object(snapshotops.utilsfactory,
'get_hostutils')
patched_func.start()
self.addCleanup(patched_func.stop)
super(SnapshotOpsTestCase, self).setUp()
self.context = 'fake_context'
self._snapshotops = snapshotops.SnapshotOps()
self._snapshotops._pathutils = mock.MagicMock()
self._snapshotops._vmutils = mock.MagicMock()
self._snapshotops._vhdutils = mock.MagicMock()
super(SnapshotOpsTestCase, self).setUp()
@mock.patch('nova.image.glance.get_remote_image_service')
def test_save_glance_image(self, mock_get_remote_image_service):

View File

@ -21,8 +21,8 @@ from oslo.config import cfg
from oslo.utils import units
from nova import exception
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
@ -30,7 +30,7 @@ from nova.virt.hyperv import vmutils
CONF = cfg.CONF
class VMOpsTestCase(test.NoDBTestCase):
class VMOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V VMOps class."""
_FAKE_TIMEOUT = 2
@ -49,13 +49,6 @@ class VMOpsTestCase(test.NoDBTestCase):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
# utilsfactory will check the host OS version via get_hostutils,
# in order to return the proper Utils Class, so it must be mocked.
patched_func = mock.patch.object(vmops.utilsfactory,
"get_hostutils")
patched_func.start()
self.addCleanup(patched_func.stop)
self._vmops = vmops.VMOps()
self._vmops._vmutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()

View File

@ -250,6 +250,19 @@ class VMUtilsTestCase(test.NoDBTestCase):
path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME)
self.assertEqual(self._FAKE_RES_PATH, path)
def test_get_free_controller_slot_exception(self):
fake_drive = mock.MagicMock()
type(fake_drive).AddressOnParent = mock.PropertyMock(
side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
with mock.patch.object(self._vmutils,
'get_attached_disks') as fake_get_attached_disks:
fake_get_attached_disks.return_value = (
[fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
self.assertRaises(vmutils.HyperVException,
self._vmutils.get_free_controller_slot,
mock.sentinel.scsi_controller_path)
def test_get_vm_ide_controller(self):
self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_ide_controller(self._FAKE_VM_NAME,
@ -506,7 +519,7 @@ class VMUtilsTestCase(test.NoDBTestCase):
self._vmutils._conn.query.return_value = [mock_disk_1, mock_disk_2]
physical_disk = self._vmutils._get_mounted_disk_resource_from_path(
self._FAKE_MOUNTED_DISK_PATH)
self._FAKE_MOUNTED_DISK_PATH, True)
self.assertEqual(mock_disk_2, physical_disk)

View File

@ -0,0 +1,307 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import mock
from nova import exception
from nova.tests.unit.virt.hyperv import db_fakes
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
FAKE_TARGET_PORTAL = 'fakeportal:3260'
FAKE_VOLUME_ID = 'fake_volume_id'
class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for VolumeOps class."""
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self._volumeops = volumeops.VolumeOps()
def test_get_volume_driver_exception(self):
fake_conn_info = {'driver_volume_type': 'fake_driver'}
self.assertRaises(exception.VolumeDriverNotFound,
self._volumeops._get_volume_driver,
connection_info=fake_conn_info)
def test_fix_instance_volume_disk_paths(self):
block_device_info = db_fakes.get_fake_block_device_info(
FAKE_TARGET_PORTAL, FAKE_VOLUME_ID)
fake_vol_conn_info = (
block_device_info['block_device_mapping'][0]['connection_info'])
with contextlib.nested(
mock.patch.object(self._volumeops,
'_get_volume_driver'),
mock.patch.object(self._volumeops,
'ebs_root_in_block_devices')
) as (mock_get_volume_driver,
mock_ebs_in_block_devices):
fake_vol_driver = mock_get_volume_driver.return_value
mock_ebs_in_block_devices.return_value = False
self._volumeops.fix_instance_volume_disk_paths(
mock.sentinel.instance_name,
block_device_info)
func = fake_vol_driver.fix_instance_volume_disk_path
func.assert_called_once_with(
mock.sentinel.instance_name,
fake_vol_conn_info, 0)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_disconnect_volumes(self, mock_get_volume_driver):
block_device_info = db_fakes.get_fake_block_device_info(
FAKE_TARGET_PORTAL, FAKE_VOLUME_ID)
block_device_mapping = (
block_device_info['block_device_mapping'])
fake_volume_driver = mock_get_volume_driver.return_value
self._volumeops.disconnect_volumes(block_device_info)
fake_volume_driver.disconnect_volumes.assert_called_once_with(
block_device_mapping)
class ISCSIVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for Hyper-V ISCSIVolumeDriver class."""
def setUp(self):
super(ISCSIVolumeDriverTestCase, self).setUp()
self._volume_driver = volumeops.ISCSIVolumeDriver()
def test_get_mounted_disk_from_lun(self):
with contextlib.nested(
mock.patch.object(self._volume_driver._volutils,
'get_device_number_for_target'),
mock.patch.object(self._volume_driver._vmutils,
'get_mounted_disk_by_drive_number')
) as (mock_get_device_number_for_target,
mock_get_mounted_disk_by_drive_number):
mock_get_device_number_for_target.return_value = 0
mock_get_mounted_disk_by_drive_number.return_value = (
mock.sentinel.disk_path)
disk = self._volume_driver._get_mounted_disk_from_lun(
mock.sentinel.target_iqn,
mock.sentinel.target_lun)
self.assertEqual(disk, mock.sentinel.disk_path)
def test_fix_instace_volume_disk_path(self):
block_device_info = db_fakes.get_fake_block_device_info(
FAKE_TARGET_PORTAL, FAKE_VOLUME_ID)
fake_vol_conn_info = (
block_device_info['block_device_mapping'][0]['connection_info'])
with contextlib.nested(
mock.patch.object(self._volume_driver,
'_get_mounted_disk_from_lun'),
mock.patch.object(self._volume_driver._vmutils,
'get_vm_scsi_controller'),
mock.patch.object(self._volume_driver._vmutils,
'set_disk_host_resource')
) as (mock_get_mounted_disk_from_lun,
mock_get_vm_scsi_controller,
mock_set_disk_host_resource):
mock_get_mounted_disk_from_lun.return_value = (
mock.sentinel.mounted_path)
mock_get_vm_scsi_controller.return_value = (
mock.sentinel.controller_path)
self._volume_driver.fix_instance_volume_disk_path(
mock.sentinel.instance_name,
fake_vol_conn_info,
mock.sentinel.disk_address)
mock_get_mounted_disk_from_lun.assert_called_with(
'iqn.2010-10.org.openstack:volume-' + FAKE_VOLUME_ID,
1, True)
mock_get_vm_scsi_controller.assert_called_with(
mock.sentinel.instance_name)
mock_set_disk_host_resource.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.controller_path,
mock.sentinel.disk_address, mock.sentinel.mounted_path)
@mock.patch('time.sleep')
def test_get_mounted_disk_from_lun_failure(self, fake_sleep):
self.flags(mounted_disk_query_retry_count=1, group='hyperv')
with mock.patch.object(self._volume_driver._volutils,
'get_device_number_for_target') as m_device_num:
m_device_num.side_effect = [None, -1]
self.assertRaises(exception.NotFound,
self._volume_driver._get_mounted_disk_from_lun,
mock.sentinel.target_iqn,
mock.sentinel.target_lun)
@mock.patch.object(volumeops.ISCSIVolumeDriver, 'logout_storage_target')
def test_disconnect_volumes(self, mock_logout_storage_target):
block_device_info = db_fakes.get_fake_block_device_info(
FAKE_TARGET_PORTAL, FAKE_VOLUME_ID)
block_device_mapping = block_device_info['block_device_mapping']
fake_target_iqn = 'iqn.2010-10.org.openstack:volume-' + FAKE_VOLUME_ID
self._volume_driver.disconnect_volumes(block_device_mapping)
mock_logout_storage_target.assert_called_once_with(fake_target_iqn, 1)
class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V SMBFSVolumeDriver class."""
_FAKE_SHARE = '//1.2.3.4/fake_share'
_FAKE_SHARE_NORMALIZED = _FAKE_SHARE.replace('/', '\\')
_FAKE_DISK_NAME = 'fake_volume_name.vhdx'
_FAKE_USERNAME = 'fake_username'
_FAKE_PASSWORD = 'fake_password'
_FAKE_SMB_OPTIONS = '-o username=%s,password=%s' % (_FAKE_USERNAME,
_FAKE_PASSWORD)
_FAKE_CONNECTION_INFO = {'data': {'export': _FAKE_SHARE,
'name': _FAKE_DISK_NAME,
'options': _FAKE_SMB_OPTIONS}}
def setUp(self):
super(SMBFSVolumeDriverTestCase, self).setUp()
self._volume_driver = volumeops.SMBFSVolumeDriver()
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_parse_credentials')
@mock.patch.object(volumeops.SMBFSVolumeDriver, 'ensure_share_mounted')
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path')
@mock.patch.object(vmutils.VMUtils, 'get_vm_scsi_controller')
@mock.patch.object(vmutils.VMUtils, 'get_free_controller_slot')
@mock.patch.object(vmutils.VMUtils, 'attach_drive')
def _test_attach_volume(self, mock_attach_drive,
mock_get_free_controller_slot,
mock_get_vm_scsi_controller,
mock_get_disk_path,
mock_ensure_share_mounted,
mock_parse_credentials,
image_exists=True):
mock_parse_credentials.return_value = (
mock.sentinel.username, self._FAKE_PASSWORD)
mock_get_vm_scsi_controller.return_value = (
mock.sentinel.controller_path)
mock_get_free_controller_slot.return_value = (
mock.sentinel.controller_slot)
mock_get_disk_path.return_value = (
mock.sentinel.disk_path)
if image_exists:
self._volume_driver.attach_volume(
self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name)
mock_ensure_share_mounted.assert_called_with(
self._FAKE_CONNECTION_INFO)
mock_get_disk_path.assert_called_with(
self._FAKE_CONNECTION_INFO)
mock_get_vm_scsi_controller.assert_called_with(
mock.sentinel.instance_name)
mock_get_free_controller_slot.assert_called_with(
mock.sentinel.controller_path)
mock_attach_drive.assert_called_with(
mock.sentinel.instance_name, mock.sentinel.disk_path,
mock.sentinel.controller_path,
mock.sentinel.controller_slot)
else:
mock_attach_drive.side_effect = (
vmutils.HyperVException())
self.assertRaises(vmutils.HyperVException,
self._volume_driver.attach_volume,
self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name)
def test_attach_volume(self):
self._test_attach_volume()
def test_attach_non_existing_image(self):
self._test_attach_volume(image_exists=False)
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path')
@mock.patch.object(vmutils.VMUtils, 'detach_vm_disk')
@mock.patch.object(pathutils.PathUtils, 'unmount_smb_share')
def test_detach_volume(self, mock_unmount_smb_share, mock_detach_vm_disk,
mock_get_disk_path):
mock_get_disk_path.return_value = (
mock.sentinel.disk_path)
self._volume_driver.detach_volume(self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name)
mock_detach_vm_disk.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.disk_path,
is_physical=False)
def test_parse_credentials(self):
username, password = self._volume_driver._parse_credentials(
self._FAKE_SMB_OPTIONS)
self.assertEqual(self._FAKE_USERNAME, username)
self.assertEqual(self._FAKE_PASSWORD, password)
def test_get_disk_path(self):
expected = os.path.join(self._FAKE_SHARE_NORMALIZED,
self._FAKE_DISK_NAME)
disk_path = self._volume_driver._get_disk_path(
self._FAKE_CONNECTION_INFO)
self.assertEqual(expected, disk_path)
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_parse_credentials')
@mock.patch.object(pathutils.PathUtils, 'check_smb_mapping')
@mock.patch.object(pathutils.PathUtils, 'mount_smb_share')
def _test_ensure_mounted(self, mock_mount_smb_share,
mock_check_smb_mapping, mock_parse_credentials,
is_mounted=False):
mock_check_smb_mapping.return_value = is_mounted
mock_parse_credentials.return_value = (
self._FAKE_USERNAME, self._FAKE_PASSWORD)
self._volume_driver.ensure_share_mounted(
self._FAKE_CONNECTION_INFO)
if is_mounted:
self.assertFalse(
mock_mount_smb_share.called)
else:
mock_mount_smb_share.assert_called_once_with(
self._FAKE_SHARE_NORMALIZED,
username=self._FAKE_USERNAME,
password=self._FAKE_PASSWORD)
def test_ensure_mounted_new_share(self):
self._test_ensure_mounted()
def test_ensure_already_mounted(self):
self._test_ensure_mounted(is_mounted=True)
@mock.patch.object(pathutils.PathUtils, 'unmount_smb_share')
def test_disconnect_volumes(self, mock_unmount_smb_share):
block_device_mapping = [
{'connection_info': self._FAKE_CONNECTION_INFO}]
self._volume_driver.disconnect_volumes(block_device_mapping)
mock_unmount_smb_share.assert_called_once_with(
self._FAKE_SHARE_NORMALIZED)

View File

@ -148,6 +148,11 @@ class HyperVDriver(driver.ComputeDriver):
block_device_info,
network_info)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
self._livemigrationops.post_live_migration(context, instance,
block_device_info)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,

View File

@ -66,11 +66,8 @@ class LiveMigrationOps(object):
try:
self._vmops.copy_vm_console_logs(instance_name, dest)
iscsi_targets = self._livemigrutils.live_migrate_vm(instance_name,
dest)
for target_iqn, target_luns_count in iscsi_targets.items():
self._volumeops.logout_storage_target(target_iqn,
target_luns_count)
self._livemigrutils.live_migrate_vm(instance_name,
dest)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Calling live migration recover_method "
@ -93,7 +90,11 @@ class LiveMigrationOps(object):
if not boot_from_volume:
self._imagecache.get_cached_image(context, instance)
self._volumeops.login_storage_targets(block_device_info)
self._volumeops.initialize_volumes_connection(block_device_info)
@check_os_version_requirement
def post_live_migration(self, context, instance, block_device_info):
self._volumeops.disconnect_volumes(block_device_info)
@check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
import sys
if sys.platform == 'win32':
@ -121,14 +120,11 @@ class LiveMigrationUtils(object):
volutils_remote = volumeutilsv2.VolumeUtilsV2(dest_host)
disk_paths_remote = {}
iscsi_targets = collections.defaultdict(int)
for (rasd_rel_path, disk_path) in disk_paths.items():
target = self._volutils.get_target_from_disk_path(disk_path)
if target:
(target_iqn, target_lun) = target
iscsi_targets[target_iqn] += 1
dev_num = volutils_remote.get_device_number_for_target(
target_iqn, target_lun)
disk_path_remote = (
@ -139,7 +135,7 @@ class LiveMigrationUtils(object):
LOG.debug("Could not retrieve iSCSI target "
"from disk path: %s", disk_path)
return (disk_paths_remote, iscsi_targets)
return disk_paths_remote
def _update_planned_vm_disk_resources(self, vmutils_remote, conn_v2_remote,
planned_vm, vm_name,
@ -229,15 +225,13 @@ class LiveMigrationUtils(object):
rmt_ip_addr_list = self._get_remote_ip_address_list(conn_v2_remote,
dest_host)
iscsi_targets = {}
planned_vm = None
disk_paths = self._get_physical_disk_paths(vm_name)
if disk_paths:
vmutils_remote = vmutilsv2.VMUtilsV2(dest_host)
(disk_paths_remote,
iscsi_targets) = self._get_remote_disk_data(vmutils_remote,
disk_paths,
dest_host)
disk_paths_remote = self._get_remote_disk_data(vmutils_remote,
disk_paths,
dest_host)
planned_vm = self._create_remote_planned_vm(conn_v2_local,
conn_v2_remote,
@ -251,6 +245,3 @@ class LiveMigrationUtils(object):
new_resource_setting_data = self._get_vhd_setting_data(vm)
self._live_migrate_vm(conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host)
# In case the caller wants to log off the targets after migration
return iscsi_targets

View File

@ -15,6 +15,10 @@
import os
import shutil
import sys
if sys.platform == 'win32':
import wmi
from oslo.config import cfg
@ -22,6 +26,7 @@ from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
@ -41,6 +46,9 @@ CONF.import_opt('instances_path', 'nova.compute.manager')
class PathUtils(object):
def __init__(self):
self._smb_conn = wmi.WMI(moniker=r"root\Microsoft\Windows\SMB")
def open(self, path, mode):
"""Wrapper on __builtin__.open used to simplify unit testing."""
import __builtin__
@ -171,3 +179,52 @@ class PathUtils(object):
remote_server)
console_log_path = os.path.join(instance_dir, 'console.log')
return console_log_path, console_log_path + '.1'
def check_smb_mapping(self, smbfs_share):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
return False
if os.path.exists(smbfs_share):
LOG.debug('Share already mounted: %s', smbfs_share)
return True
else:
LOG.debug('Share exists but is unavailable: %s ', smbfs_share)
self.unmount_smb_share(smbfs_share, force=True)
return False
def mount_smb_share(self, smbfs_share, username=None, password=None):
try:
LOG.debug('Mounting share: %s', smbfs_share)
self._smb_conn.Msft_SmbMapping.Create(RemotePath=smbfs_share,
UserName=username,
Password=password)
except wmi.x_wmi as exc:
err_msg = (_(
'Unable to mount SMBFS share: %(smbfs_share)s '
'WMI exception: %(wmi_exc)s'), {'smbfs_share': smbfs_share,
'wmi_exc': exc})
raise vmutils.HyperVException(err_msg)
def unmount_smb_share(self, smbfs_share, force=False):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
LOG.debug('Share %s is not mounted. Skipping unmount.',
smbfs_share)
for mapping in mappings:
# Due to a bug in the WMI module, getting the output of
# methods returning None will raise an AttributeError
try:
mapping.Remove(Force=force)
except AttributeError:
pass
except wmi.x_wmi:
# If this fails, a 'Generic Failure' exception is raised.
# This happens even if we unforcefully unmount an in-use
# share, for which reason we'll simply ignore it in this
# case.
if force:
raise vmutils.HyperVException(
_("Could not unmount share: %s"), smbfs_share)

View File

@ -396,11 +396,8 @@ class VMOps(object):
# Stop the VM first.
self.power_off(instance)
storage = self._vmutils.get_vm_storage_paths(instance_name)
(disk_files, volume_drives) = storage
self._vmutils.destroy_vm(instance_name)
self._volumeops.disconnect_volumes(volume_drives)
self._volumeops.disconnect_volumes(block_device_info)
else:
LOG.debug("Instance not found", instance=instance)

View File

@ -170,6 +170,7 @@ class VMUtils(object):
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
@ -315,13 +316,16 @@ class VMUtils(object):
def get_attached_disks(self, scsi_controller_path):
volumes = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"Parent = '%(parent)s'" %
"WHERE (ResourceSubType = "
"'%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s')"
" AND Parent = '%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE,
'res_sub_type_virt':
self._DISK_RES_SUB_TYPE,
'parent':
scsi_controller_path.replace("'", "''")})
return volumes
@ -362,11 +366,14 @@ class VMUtils(object):
drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
self._attach_drive(vm, path, ctrller_path, drive_addr, drive_type)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def _attach_drive(self, vm, path, ctrller_path, drive_addr, drive_type):
def attach_drive(self, vm_name, path, ctrller_path, drive_addr,
drive_type=constants.DISK):
"""Create a drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if drive_type == constants.DISK:
res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE
elif drive_type == constants.DVD:
@ -647,23 +654,39 @@ class VMUtils(object):
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path):
def detach_vm_disk(self, vm_name, disk_path, is_physical=True):
vm = self._lookup_vm_check(vm_name)
physical_disk = self._get_mounted_disk_resource_from_path(disk_path)
if physical_disk:
self._remove_virt_resource(physical_disk, vm.path_())
disk_resource = self._get_mounted_disk_resource_from_path(disk_path,
is_physical)
def _get_mounted_disk_resource_from_path(self, disk_path):
physical_disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE})
for physical_disk in physical_disks:
if physical_disk.HostResource:
if physical_disk.HostResource[0].lower() == disk_path.lower():
return physical_disk
if disk_resource:
parent = self._conn.query("SELECT * FROM "
"Msvm_ResourceAllocationSettingData "
"WHERE __PATH = '%s'" %
disk_resource.Parent)[0]
self._remove_virt_resource(disk_resource, vm.path_())
if not is_physical:
self._remove_virt_resource(parent, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path, is_physical):
if is_physical:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._PHYS_DISK_RES_SUB_TYPE
else:
class_name = self._STORAGE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
disk_resources = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s'" %
{"class_name": class_name,
"res_sub_type": res_sub_type})
for disk_resource in disk_resources:
if disk_resource.HostResource:
if disk_resource.HostResource[0].lower() == disk_path.lower():
return disk_resource
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
@ -688,6 +711,15 @@ class VMUtils(object):
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def get_free_controller_slot(self, scsi_controller_path):
attached_disks = self.get_attached_disks(scsi_controller_path)
used_slots = [int(disk.AddressOnParent) for disk in attached_disks]
for slot in xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
if slot not in used_slots:
return slot
raise HyperVException(_("Exceeded the maximum number of slots"))
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))

View File

@ -117,9 +117,12 @@ class VMUtilsV2(vmutils.VMUtils):
return [s for s in vmsettings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _attach_drive(self, vm, path, ctrller_path, drive_addr, drive_type):
def attach_drive(self, vm_name, path, ctrller_path, drive_addr,
drive_type=constants.DISK):
"""Create a drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if drive_type == constants.DISK:
res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE
elif drive_type == constants.DVD:

View File

@ -18,6 +18,8 @@
Management class for Storage-related functions (attach, detach, etc).
"""
import collections
import os
import re
import time
from oslo.config import cfg
@ -27,7 +29,6 @@ from nova import exception
from nova.i18n import _, _LE, _LW
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import constants
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
@ -61,19 +62,19 @@ class VolumeOps(object):
"""
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
self._initiator = None
self._default_root_device = 'vda'
self.volume_drivers = {'smbfs': SMBFSVolumeDriver(),
'iscsi': ISCSIVolumeDriver()}
def ebs_root_in_block_devices(self, block_device_info):
if block_device_info:
root_device = block_device_info.get('root_device_name')
if not root_device:
root_device = self._default_root_device
return self._volutils.volume_in_mapping(root_device,
block_device_info)
def _get_volume_driver(self, driver_type=None, connection_info=None):
if connection_info:
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def attach_volumes(self, block_device_info, instance_name, ebs_root):
mapping = driver.block_device_info_get_mapping(block_device_info)
@ -85,12 +86,82 @@ class VolumeOps(object):
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
def login_storage_targets(self, block_device_info):
def disconnect_volumes(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
block_devices = self._group_block_devices_by_type(
mapping)
for driver_type, block_device_mapping in block_devices.items():
volume_driver = self._get_volume_driver(driver_type)
volume_driver.disconnect_volumes(block_device_mapping)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.attach_volume(connection_info, instance_name, ebs_root)
def detach_volume(self, connection_info, instance_name):
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.detach_volume(connection_info, instance_name)
def ebs_root_in_block_devices(self, block_device_info):
if block_device_info:
root_device = block_device_info.get('root_device_name')
if not root_device:
root_device = self._default_root_device
return self._volutils.volume_in_mapping(root_device,
block_device_info)
def fix_instance_volume_disk_paths(self, instance_name, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
if self.ebs_root_in_block_devices(block_device_info):
mapping = mapping[1:]
disk_address = 0
for vol in mapping:
connection_info = vol['connection_info']
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.fix_instance_volume_disk_path(
instance_name, connection_info, disk_address)
disk_address += 1
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warning(_LW('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_block_storage_ip,
'host': CONF.host,
'initiator': self._initiator,
}
def initialize_volumes_connection(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self._login_storage_target(vol['connection_info'])
connection_info = vol['connection_info']
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.initialize_volume_connection(connection_info)
def _login_storage_target(self, connection_info):
def _group_block_devices_by_type(self, block_device_mapping):
block_devices = collections.defaultdict(list)
for volume in block_device_mapping:
connection_info = volume['connection_info']
volume_type = connection_info.get('driver_volume_type')
block_devices[volume_type].append(volume)
return block_devices
class ISCSIVolumeDriver(object):
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
def login_storage_target(self, connection_info):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
@ -125,6 +196,26 @@ class VolumeOps(object):
# Wait for the target to be mounted
self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def disconnect_volumes(self, block_device_mapping):
iscsi_targets = collections.defaultdict(int)
for vol in block_device_mapping:
target_iqn = vol['connection_info']['data']['target_iqn']
iscsi_targets[target_iqn] += 1
for target_iqn, disconnected_luns in iscsi_targets.items():
self.logout_storage_target(target_iqn, disconnected_luns)
def logout_storage_target(self, target_iqn, disconnected_luns_count=1):
total_available_luns = self._volutils.get_target_lun_count(
target_iqn)
if total_available_luns == disconnected_luns_count:
LOG.debug("Logging off storage target %s", target_iqn)
self._volutils.logout_storage_target(target_iqn)
else:
LOG.debug("Skipping disconnecting target %s as there "
"are LUNs still being used.", target_iqn)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""Attach a volume to the SCSI controller or to the IDE controller if
ebs_root is True
@ -134,7 +225,7 @@ class VolumeOps(object):
{'connection_info': connection_info,
'instance_name': instance_name})
try:
self._login_storage_target(connection_info)
self.login_storage_target(connection_info)
data = connection_info['data']
target_lun = data['target_lun']
@ -154,7 +245,7 @@ class VolumeOps(object):
# Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
slot = self._vmutils.get_free_controller_slot(ctrller_path)
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
@ -167,31 +258,6 @@ class VolumeOps(object):
if target_iqn:
self.logout_storage_target(target_iqn)
def _get_free_controller_slot(self, scsi_controller_path):
attached_disks = self._vmutils.get_attached_disks(scsi_controller_path)
used_slots = [int(disk.AddressOnParent) for disk in attached_disks]
for slot in xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
if slot not in used_slots:
return slot
raise vmutils.HyperVException("Exceeded the maximum number of slots")
def detach_volumes(self, block_device_info, instance_name):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self.detach_volume(vol['connection_info'], instance_name)
def logout_storage_target(self, target_iqn, disconnected_luns_count=1):
total_available_luns = self._volutils.get_target_lun_count(
target_iqn)
if total_available_luns == disconnected_luns_count:
LOG.debug("Logging off storage target %s", target_iqn)
self._volutils.logout_storage_target(target_iqn)
else:
LOG.debug("Skipping disconnecting target %s as there "
"are LUNs still being used.", target_iqn)
def detach_volume(self, connection_info, instance_name):
"""Detach a volume to the SCSI controller."""
LOG.debug("Detach_volume: %(connection_info)s "
@ -213,18 +279,6 @@ class VolumeOps(object):
self.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warning(_LW('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_block_storage_ip,
'host': CONF.host,
'initiator': self._initiator,
}
def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
wait_for_device=False):
# The WMI query in get_device_number_for_target can incorrectly
@ -261,39 +315,114 @@ class VolumeOps(object):
'for target_iqn: %s') % target_iqn)
return mounted_disk_path
def disconnect_volumes(self, volume_drives):
targets = collections.defaultdict(int)
for volume_drive in volume_drives:
target = self._volutils.get_target_from_disk_path(
volume_drive)
if target:
target_iqn = target[0]
targets[target_iqn] += 1
else:
LOG.debug("Could not retrieve iSCSI target from disk path: ",
volume_drive)
for target_iqn in targets:
self.logout_storage_target(target_iqn, targets[target_iqn])
def get_target_from_disk_path(self, physical_drive_path):
return self._volutils.get_target_from_disk_path(physical_drive_path)
def fix_instance_volume_disk_paths(self, instance_name, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
def fix_instance_volume_disk_path(self, instance_name, connection_info,
disk_address):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
if self.ebs_root_in_block_devices(block_device_info):
mapping = mapping[1:]
mounted_disk_path = self._get_mounted_disk_from_lun(
target_iqn, target_lun, True)
ctrller_path = self._vmutils.get_vm_scsi_controller(instance_name)
self._vmutils.set_disk_host_resource(
instance_name, ctrller_path, disk_address, mounted_disk_path)
disk_address = 0
for vol in mapping:
data = vol['connection_info']['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
def get_target_lun_count(self, target_iqn):
return self._volutils.get_target_lun_count(target_iqn)
mounted_disk_path = self._get_mounted_disk_from_lun(
target_iqn, target_lun, True)
ctrller_path = self._vmutils.get_vm_scsi_controller(instance_name)
self._vmutils.set_disk_host_resource(
instance_name, ctrller_path, disk_address, mounted_disk_path)
disk_address += 1
def initialize_volume_connection(self, connection_info):
self.login_storage_target(connection_info)
class SMBFSVolumeDriver(object):
def __init__(self):
self._pathutils = utilsfactory.get_pathutils()
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
self._username_regex = re.compile(r'user(?:name)?=([^, ]+)')
self._password_regex = re.compile(r'pass(?:word)?=([^, ]+)')
def attach_volume(self, connection_info, instance_name, ebs_root=False):
self.ensure_share_mounted(connection_info)
disk_path = self._get_disk_path(connection_info)
try:
if ebs_root:
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
slot = 0
else:
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._vmutils.get_free_controller_slot(ctrller_path)
self._vmutils.attach_drive(instance_name,
disk_path,
ctrller_path,
slot)
except vmutils.HyperVException as exn:
LOG.exception(_LE('Attach volume failed: %s'), exn)
raise vmutils.HyperVException(_('Unable to attach volume '
'to instance %s') % instance_name)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
"from %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
disk_path = self._get_disk_path(connection_info)
export_path = self._get_export_path(connection_info)
self._vmutils.detach_vm_disk(instance_name, disk_path,
is_physical=False)
self._pathutils.unmount_smb_share(export_path)
def disconnect_volumes(self, block_device_mapping):
export_paths = set()
for vol in block_device_mapping:
connection_info = vol['connection_info']
export_path = self._get_export_path(connection_info)
export_paths.add(export_path)
for export_path in export_paths:
self._pathutils.unmount_smb_share(export_path)
def _get_export_path(self, connection_info):
return connection_info['data']['export'].replace('/', '\\')
def _get_disk_path(self, connection_info):
export = self._get_export_path(connection_info)
disk_name = connection_info['data']['name']
disk_path = os.path.join(export, disk_name)
return disk_path
def ensure_share_mounted(self, connection_info):
export_path = self._get_export_path(connection_info)
if not self._pathutils.check_smb_mapping(export_path):
opts_str = connection_info['data'].get('options', '')
username, password = self._parse_credentials(opts_str)
self._pathutils.mount_smb_share(export_path,
username=username,
password=password)
def _parse_credentials(self, opts_str):
match = self._username_regex.findall(opts_str)
username = match[0] if match and match[0] != 'guest' else None
match = self._password_regex.findall(opts_str)
password = match[0] if match else None
return username, password
def fix_instance_volume_disk_path(self, instance_name, connection_info,
disk_address):
self.ensure_share_mounted(connection_info)
def initialize_volume_connection(self, connection_info):
self.ensure_share_mounted(connection_info)