Merge "VMware: Implement retype for VMDK driver"

This commit is contained in:
Jenkins 2014-09-27 22:24:11 +00:00 committed by Gerrit Code Review
commit e097567905
4 changed files with 520 additions and 32 deletions

View File

@ -29,6 +29,7 @@ from cinder.openstack.common import units
from cinder import test
from cinder.volume import configuration
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim
from cinder.volume.drivers.vmware import vim_util
@ -254,6 +255,9 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
self.assertRaises(error_util.VimFaultException, driver.create_volume,
volume)
# Clear side effects.
driver._select_ds_for_volume.side_effect = None
def test_success_wait_for_task(self):
"""Test successful wait_for_task."""
m = self.mox
@ -501,12 +505,29 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size, dss)
def test_get_disk_type(self):
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
def test_get_disk_type(self, get_volume_type_extra_specs):
"""Test _get_disk_type."""
volume = FakeObject()
volume['volume_type_id'] = None
self.assertEqual(vmdk.VMwareEsxVmdkDriver._get_disk_type(volume),
'thin')
# Test with no volume type.
volume = {'volume_type_id': None}
self.assertEqual(vmdk.THIN_VMDK_TYPE,
vmdk.VMwareEsxVmdkDriver._get_disk_type(volume))
# Test with valid vmdk_type.
volume_type_id = mock.sentinel.volume_type_id
volume = {'volume_type_id': volume_type_id}
get_volume_type_extra_specs.return_value = vmdk.THICK_VMDK_TYPE
self.assertEqual(vmdk.THICK_VMDK_TYPE,
vmdk.VMwareEsxVmdkDriver._get_disk_type(volume))
get_volume_type_extra_specs.assert_called_once_with(volume_type_id,
'vmware:vmdk_type')
# Test with invalid vmdk_type.
get_volume_type_extra_specs.return_value = 'sparse'
self.assertRaises(error_util.InvalidDiskTypeException,
vmdk.VMwareEsxVmdkDriver._get_disk_type,
volume)
def test_init_conn_with_instance_no_backing(self):
"""Test initialize_connection with instance and without backing."""
@ -1400,6 +1421,163 @@ class VMwareEsxVmdkDriverTestCase(test.TestCase):
m.UnsetStubs()
m.VerifyAll()
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
def _test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, genereate_uuid,
delete_temp_backing):
self._driver._storage_policy_enabled = True
context = mock.sentinel.context
diff = mock.sentinel.diff
host = mock.sentinel.host
new_type = {'id': 'abc'}
# Test with in-use volume.
vol = {'size': 1, 'status': 'retyping', 'name': 'vol-1',
'volume_type_id': 'def', 'instance_uuid': '583a8dbb'}
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no backing.
vops.get_backing.return_value = None
vol['instance_uuid'] = None
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, no profile change and
# compliant datastore.
ds_value = mock.sentinel.datastore_value
datastore = mock.Mock(value=ds_value)
vops.get_datastore.return_value = datastore
backing = mock.sentinel.backing
vops.get_backing.return_value = backing
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
None,
None]
ds_sel.is_datastore_compliant.return_value = True
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
# Test with no disk type conversion, profile change and
# compliant datastore.
new_profile = mock.sentinel.new_profile
get_volume_type_extra_specs.side_effect = [vmdk.THIN_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
ds_sel.is_datastore_compliant.return_value = True
profile_id = mock.sentinel.profile_id
ds_sel.get_profile_id.return_value = profile_id
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Test with disk type conversion, profile change and a backing with
# snapshots. Also test the no candidate datastore case.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
ds_sel.select_datastore.return_value = ()
self.assertFalse(self._driver.retype(context, vol, new_type, diff,
host))
exp_req = {hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: [ds_value],
hub.DatastoreSelector.PROFILE_NAME: new_profile,
hub.DatastoreSelector.SIZE_BYTES: units.Gi}
ds_sel.select_datastore.assert_called_once_with(exp_req)
# Modify the previous case with a candidate datastore which is
# different than the backing's current datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = True
host = mock.sentinel.host
rp = mock.sentinel.rp
candidate_ds = mock.Mock(value=mock.sentinel.candidate_ds_value)
summary = mock.Mock(datastore=candidate_ds)
ds_sel.select_datastore.return_value = (host, rp, summary)
folder = mock.sentinel.folder
get_volume_group_folder.return_value = folder
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.relocate_backing.assert_called_once_with(
backing, candidate_ds, rp, host, vmdk.THIN_VMDK_TYPE)
vops.move_backing_to_folder.assert_called_once_with(backing, folder)
vops.change_backing_profile.assert_called_once_with(backing,
profile_id)
# Test with disk type conversion, profile change, backing with
# no snapshots and candidate datastore which is same as the backing
# datastore.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.snapshot_exists.return_value = False
summary.datastore = datastore
uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc'
genereate_uuid.return_value = uuid
clone = mock.sentinel.clone
vops.clone_backing.return_value = clone
vops.change_backing_profile.reset_mock()
self.assertTrue(self._driver.retype(context, vol, new_type, diff,
host))
vops.rename_backing.assert_called_once_with(backing, uuid)
vops.clone_backing.assert_called_once_with(
vol['name'], backing, None, volumeops.FULL_CLONE_TYPE,
datastore, vmdk.THIN_VMDK_TYPE)
delete_temp_backing.assert_called_once_with(backing)
vops.change_backing_profile.assert_called_once_with(clone,
profile_id)
# Modify the previous case with exception during clone.
get_volume_type_extra_specs.side_effect = [vmdk.THICK_VMDK_TYPE,
vmdk.THIN_VMDK_TYPE,
'gold-1',
new_profile]
vops.clone_backing.side_effect = error_util.VimException('error')
vops.rename_backing.reset_mock()
vops.change_backing_profile.reset_mock()
self.assertRaises(
error_util.VimException, self._driver.retype, context, vol,
new_type, diff, host)
exp_rename_calls = [mock.call(backing, uuid),
mock.call(backing, vol['name'])]
self.assertEqual(exp_rename_calls, vops.rename_backing.call_args_list)
self.assertFalse(vops.change_backing_profile.called)
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
@ -2288,6 +2466,9 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
driver._select_datastore_summary.assert_called_once_with(size,
filtered_dss)
# Clear side effects.
driver._filter_ds_by_profile.side_effect = None
@mock.patch.object(VMDK_DRIVER, 'volumeops')
def test_extend_vmdk_virtual_disk(self, volume_ops):
"""Test vmdk._extend_vmdk_virtual_disk."""
@ -2364,6 +2545,19 @@ class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
_extend_virtual_disk,
fetch_optimized_image)
@mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
@mock.patch('cinder.openstack.common.uuidutils.generate_uuid')
@mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
@mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs')
@mock.patch.object(VMDK_DRIVER, 'volumeops')
@mock.patch.object(VMDK_DRIVER, 'ds_sel')
def test_retype(self, ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing):
self._test_retype(ds_sel, vops, get_volume_type_extra_specs,
get_volume_group_folder, generate_uuid,
delete_temp_backing)
@mock.patch.object(VMDK_DRIVER, '_select_ds_for_volume')
@mock.patch.object(VMDK_DRIVER, '_extend_vmdk_virtual_disk')
@mock.patch.object(VMDK_DRIVER, 'volumeops')

View File

@ -688,22 +688,33 @@ class VolumeOpsTestCase(test.TestCase):
self.assertEqual('', backing.fileName)
self.assertEqual('persistent', backing.diskMode)
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_disk_device')
@mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps.'
'_get_relocate_spec')
def test_relocate_backing(self, get_relocate_spec):
def test_relocate_backing(self, get_relocate_spec, get_disk_device):
disk_device = mock.sentinel.disk_device
get_disk_device.return_value = disk_device
spec = mock.sentinel.relocate_spec
get_relocate_spec.return_value = spec
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
datastore = mock.sentinel.datastore
resource_pool = mock.sentinel.resource_pool
host = mock.sentinel.host
self.vops.relocate_backing(backing, datastore, resource_pool, host)
disk_type = mock.sentinel.disk_type
self.vops.relocate_backing(backing, datastore, resource_pool, host,
disk_type)
# Verify calls
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
get_disk_device.assert_called_once_with(backing)
get_relocate_spec.assert_called_once_with(datastore, resource_pool,
host, disk_move_type)
host, disk_move_type,
disk_type, disk_device)
self.session.invoke_api.assert_called_once_with(self.session.vim,
'RelocateVM_Task',
backing,
@ -1001,6 +1012,50 @@ class VolumeOpsTestCase(test.TestCase):
newName=new_name)
self.session.wait_for_task.assert_called_once_with(task)
def test_change_backing_profile(self):
# Test change to empty profile.
reconfig_spec = mock.Mock()
empty_profile_spec = mock.sentinel.empty_profile_spec
self.session.vim.client.factory.create.side_effect = [
reconfig_spec, empty_profile_spec]
task = mock.sentinel.task
self.session.invoke_api.return_value = task
backing = mock.sentinel.backing
unique_profile_id = mock.sentinel.unique_profile_id
profile_id = mock.Mock(uniqueId=unique_profile_id)
self.vops.change_backing_profile(backing, profile_id)
self.assertEqual([empty_profile_spec], reconfig_spec.vmProfile)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
# Test change to non-empty profile.
profile_spec = mock.Mock()
self.session.vim.client.factory.create.side_effect = [
reconfig_spec, profile_spec]
self.session.invoke_api.reset_mock()
self.session.wait_for_task.reset_mock()
self.vops.change_backing_profile(backing, profile_id)
self.assertEqual([profile_spec], reconfig_spec.vmProfile)
self.assertEqual(unique_profile_id,
reconfig_spec.vmProfile[0].profileId)
self.session.invoke_api.assert_called_once_with(self.session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
self.session.wait_for_task.assert_called_once_with(task)
# Clear side effects.
self.session.vim.client.factory.create.side_effect = None
def test_delete_file(self):
file_mgr = mock.sentinel.file_manager
self.session.vim.service_content.fileManager = file_mgr

View File

@ -38,6 +38,7 @@ from cinder.openstack.common import units
from cinder.openstack.common import uuidutils
from cinder.volume import driver
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import datastore as hub
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim
from cinder.volume.drivers.vmware import vim_util
@ -208,6 +209,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
# No storage policy based placement possible when connecting
# directly to ESX
self._storage_policy_enabled = False
self._ds_sel = None
@property
def session(self):
@ -232,6 +234,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
max_objects)
return self._volumeops
@property
def ds_sel(self):
if not self._ds_sel:
self._ds_sel = hub.DatastoreSelector(self.volumeops,
self.session)
return self._ds_sel
def do_setup(self, context):
"""Perform validations and establish connection to server.
@ -396,6 +405,13 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
{'datastore': best_summary, 'host_count': max_host_count})
return best_summary
def _get_extra_spec_storage_profile(self, type_id):
"""Get storage profile name in the given volume type's extra spec.
If there is no storage profile in the extra spec, default is None.
"""
return _get_volume_type_extra_spec(type_id, 'storage_profile')
def _get_storage_profile(self, volume):
"""Get storage profile associated with the given volume's volume_type.
@ -403,10 +419,7 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
:return: String value of storage profile if volume type is associated
and contains storage_profile extra_spec option; None otherwise
"""
type_id = volume['volume_type_id']
if type_id is None:
return None
return _get_volume_type_extra_spec(type_id, 'storage_profile')
return self._get_extra_spec_storage_profile(volume['volume_type_id'])
def _filter_ds_by_profile(self, datastores, storage_profile):
"""Filter out datastores that do not match given storage profile.
@ -462,18 +475,27 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
datastores)
return (folder, datastore_summary)
@staticmethod
def _get_extra_spec_disk_type(type_id):
"""Get disk type from the given volume type's extra spec.
If there is no disk type option, default is THIN_VMDK_TYPE.
"""
disk_type = _get_volume_type_extra_spec(type_id,
'vmdk_type',
default_value=THIN_VMDK_TYPE)
volumeops.VirtualDiskType.validate(disk_type)
return disk_type
@staticmethod
def _get_disk_type(volume):
"""Get disk type from volume type.
"""Get disk type from the given volume's volume type.
:param volume: Volume object
:return: Disk type
"""
return _get_volume_type_extra_spec(volume['volume_type_id'],
'vmdk_type',
(THIN_VMDK_TYPE, THICK_VMDK_TYPE,
EAGER_ZEROED_THICK_VMDK_TYPE),
THIN_VMDK_TYPE)
return VMwareEsxVmdkDriver._get_extra_spec_disk_type(
volume['volume_type_id'])
def _get_storage_profile_id(self, volume):
storage_profile = self._get_storage_profile(volume)
@ -1347,6 +1369,173 @@ class VMwareEsxVmdkDriver(driver.VolumeDriver):
LOG.info(_("Done copying volume %(vol)s to a new image %(img)s") %
{'vol': volume['name'], 'img': image_meta['name']})
def _in_use(self, volume):
"""Check if the given volume is in use."""
return volume['instance_uuid'] is not None
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
The retype is performed only if the volume is not in use. Retype is NOP
if the backing doesn't exist. If disk type conversion is needed, the
volume is cloned. If disk type conversion is needed and the volume
contains snapshots, the backing is relocated instead of cloning. The
backing is also relocated if the current datastore is not compliant
with the new storage profile (if any). Finally, the storage profile of
the backing VM is updated.
:param ctxt: Context
:param volume: A dictionary describing the volume to retype
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (unused)
:returns: True if the retype occurred; False otherwise.
"""
# Can't attempt retype if the volume is in use.
if self._in_use(volume):
LOG.warn(_("Volume: %s is in use, can't retype."),
volume['name'])
return False
# If the backing doesn't exist, retype is NOP.
backing = self.volumeops.get_backing(volume['name'])
if backing is None:
LOG.debug("Backing for volume: %s doesn't exist; retype is NOP.",
volume['name'])
return True
# Check whether we need disk type conversion.
disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
new_disk_type = VMwareEsxVmdkDriver._get_extra_spec_disk_type(
new_type['id'])
need_disk_type_conversion = disk_type != new_disk_type
# Check whether we need to relocate the backing. If the backing
# contains snapshots, relocate is the only way to achieve disk type
# conversion.
need_relocate = (need_disk_type_conversion and
self.volumeops.snapshot_exists(backing))
datastore = self.volumeops.get_datastore(backing)
# Check whether we need to change the storage profile.
need_profile_change = False
is_compliant = True
new_profile = None
if self._storage_policy_enabled:
profile = self._get_storage_profile(volume)
new_profile = self._get_extra_spec_storage_profile(new_type['id'])
need_profile_change = profile != new_profile
# The current datastore may be compliant with the new profile.
is_compliant = self.ds_sel.is_datastore_compliant(datastore,
new_profile)
# No need to relocate or clone if there is no disk type conversion and
# the current datastore is compliant with the new profile or storage
# policy is disabled.
if not need_disk_type_conversion and is_compliant:
LOG.debug("Backing: %(backing)s for volume: %(name)s doesn't need "
"disk type conversion.",
{'backing': backing,
'name': volume['name']})
if self._storage_policy_enabled:
LOG.debug("Backing: %(backing)s for volume: %(name)s is "
"compliant with the new profile: %(new_profile)s.",
{'backing': backing,
'name': volume['name'],
'new_profile': new_profile})
else:
# Set requirements for datastore selection.
req = {}
req[hub.DatastoreSelector.SIZE_BYTES] = (volume['size'] *
units.Gi)
if need_relocate:
LOG.debug("Backing: %s should be relocated.", backing)
req[hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS] = (
[datastore.value])
if need_profile_change:
LOG.debug("Backing: %(backing)s needs a profile change to: "
"%(profile)s.",
{'backing': backing,
'profile': new_profile})
req[hub.DatastoreSelector.PROFILE_NAME] = new_profile
# Select datastore satisfying the requirements.
best_candidate = self.ds_sel.select_datastore(req)
if not best_candidate:
# No candidate datastores; can't retype.
LOG.warn(_("There are no datastores matching new requirements;"
" can't retype volume: %s."),
volume['name'])
return False
(host, rp, summary) = best_candidate
new_datastore = summary.datastore
if datastore.value != new_datastore.value:
# Datastore changed; relocate the backing.
LOG.debug("Backing: %s needs to be relocated for retype.",
backing)
self.volumeops.relocate_backing(
backing, new_datastore, rp, host, new_disk_type)
dc = self.volumeops.get_dc(rp)
folder = self._get_volume_group_folder(dc)
self.volumeops.move_backing_to_folder(backing, folder)
elif need_disk_type_conversion:
# Same datastore, but clone is needed for disk type conversion.
LOG.debug("Backing: %s needs to be cloned for retype.",
backing)
new_backing = None
renamed = False
tmp_name = uuidutils.generate_uuid()
try:
self.volumeops.rename_backing(backing, tmp_name)
renamed = True
new_backing = self.volumeops.clone_backing(
volume['name'], backing, None,
volumeops.FULL_CLONE_TYPE, datastore, new_disk_type)
self._delete_temp_backing(backing)
backing = new_backing
except error_util.VimException:
with excutils.save_and_reraise_exception():
LOG.exception(_("Error occurred while cloning backing:"
" %s during retype."),
backing)
if renamed:
LOG.debug("Undo rename of backing: %(backing)s; "
"changing name from %(new_name)s to "
"%(old_name)s.",
{'backing': backing,
'new_name': tmp_name,
'old_name': volume['name']})
try:
self.volumeops.rename_backing(backing,
volume['name'])
except error_util.VimException:
LOG.warn(_("Changing backing: %(backing)s name"
" from %(new_name)s to %(old_name)s"
" failed."),
{'backing': backing,
'new_name': tmp_name,
'old_name': volume['name']})
# Update the backing's storage profile if needed.
if need_profile_change:
profile_id = None
if new_profile is not None:
profile_id = self.ds_sel.get_profile_id(new_profile)
self.volumeops.change_backing_profile(backing, profile_id)
# Retype is done.
LOG.debug("Volume: %s retype is done.", volume['name'])
return True
def extend_volume(self, volume, new_size):
"""Extend vmdk to new_size.

View File

@ -831,7 +831,8 @@ class VMwareVolumeOps(object):
LOG.debug("Spec for relocating the backing: %s.", relocate_spec)
return relocate_spec
def relocate_backing(self, backing, datastore, resource_pool, host):
def relocate_backing(
self, backing, datastore, resource_pool, host, disk_type=None):
"""Relocates backing to the input datastore and resource pool.
The implementation uses moveAllDiskBackingsAndAllowSharing disk move
@ -841,15 +842,27 @@ class VMwareVolumeOps(object):
:param datastore: Reference to the datastore
:param resource_pool: Reference to the resource pool
:param host: Reference to the host
:param disk_type: destination disk type
"""
LOG.debug("Relocating backing: %(backing)s to datastore: %(ds)s "
"and resource pool: %(rp)s." %
{'backing': backing, 'ds': datastore, 'rp': resource_pool})
"and resource pool: %(rp)s with destination disk type: "
"%(disk_type)s.",
{'backing': backing,
'ds': datastore,
'rp': resource_pool,
'disk_type': disk_type})
# Relocate the volume backing
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
disk_device = None
if disk_type is not None:
disk_device = self._get_disk_device(backing)
relocate_spec = self._get_relocate_spec(datastore, resource_pool, host,
disk_move_type)
disk_move_type, disk_type,
disk_device)
task = self._session.invoke_api(self._session.vim, 'RelocateVM_Task',
backing, spec=relocate_spec)
LOG.debug("Initiated relocation of volume backing: %s." % backing)
@ -1044,6 +1057,19 @@ class VMwareVolumeOps(object):
LOG.info(_("Successfully created clone: %s.") % new_backing)
return new_backing
def _reconfigure_backing(self, backing, reconfig_spec):
"""Reconfigure backing VM with the given spec."""
LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.",
{'backing': backing,
'spec': reconfig_spec})
reconfig_task = self._session.invoke_api(self._session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
LOG.debug("Task: %s created for reconfiguring backing VM.",
reconfig_task)
self._session.wait_for_task(reconfig_task)
def attach_disk_to_backing(self, backing, size_in_kb, disk_type,
adapter_type, vmdk_ds_file_path):
"""Attach an existing virtual disk to the backing VM.
@ -1055,6 +1081,13 @@ class VMwareVolumeOps(object):
:param vmdk_ds_file_path: datastore file path of the virtual disk to
be attached
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to add new disk: "
"%(path)s with size (KB): %(size)d and adapter type: "
"%(adapter_type)s.",
{'backing': backing,
'path': vmdk_ds_file_path,
'size': size_in_kb,
'adapter_type': adapter_type})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
specs = self._create_specs_for_disk_add(size_in_kb,
@ -1062,16 +1095,7 @@ class VMwareVolumeOps(object):
adapter_type,
vmdk_ds_file_path)
reconfig_spec.deviceChange = specs
LOG.debug("Reconfiguring backing VM: %(backing)s with spec: %(spec)s.",
{'backing': backing,
'spec': reconfig_spec})
reconfig_task = self._session.invoke_api(self._session.vim,
"ReconfigVM_Task",
backing,
spec=reconfig_spec)
LOG.debug("Task: %s created for reconfiguring backing VM.",
reconfig_task)
self._session.wait_for_task(reconfig_task)
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %s reconfigured with new disk.", backing)
def rename_backing(self, backing, new_name):
@ -1093,6 +1117,32 @@ class VMwareVolumeOps(object):
{'backing': backing,
'new_name': new_name})
def change_backing_profile(self, backing, profile_id):
"""Change storage profile of the backing VM.
The current profile is removed if the new profile is None.
"""
LOG.debug("Reconfiguring backing VM: %(backing)s to change profile to:"
" %(profile)s.",
{'backing': backing,
'profile': profile_id})
cf = self._session.vim.client.factory
reconfig_spec = cf.create('ns0:VirtualMachineConfigSpec')
if profile_id is None:
vm_profile = cf.create('ns0:VirtualMachineEmptyProfileSpec')
vm_profile.dynamicType = 'profile'
else:
vm_profile = cf.create('ns0:VirtualMachineDefinedProfileSpec')
vm_profile.profileId = profile_id.uniqueId
reconfig_spec.vmProfile = [vm_profile]
self._reconfigure_backing(backing, reconfig_spec)
LOG.debug("Backing VM: %(backing)s reconfigured with new profile: "
"%(profile)s.",
{'backing': backing,
'profile': profile_id})
def delete_file(self, file_path, datacenter=None):
"""Delete file or folder on the datastore.