Adds Nova Hyper-V Driver as is

This commit includes all the modules from
nova.virt.hyperv and nova.tests.unit.virt.hyperv
as is.
This commit is contained in:
Claudiu Belu
2015-03-02 01:34:14 -08:00
parent 62de0250e0
commit 5714d32fd0
59 changed files with 11709 additions and 0 deletions

0
hyperv/__init__.py Normal file
View File

44
hyperv/nova/README.rst Normal file
View File

@@ -0,0 +1,44 @@
Hyper-V Volumes Management
=============================================
To enable the volume features, the first thing that needs to be done is to
enable the iSCSI service on the Windows compute nodes and set it to start
automatically.
sc config msiscsi start= auto
net start msiscsi
In Windows Server 2012, it's important to execute the following commands to
prevent having the volumes being online by default:
diskpart
san policy=OfflineAll
exit
How to check if your iSCSI configuration is working properly:
On your OpenStack controller:
1. Create a volume with e.g. "nova volume-create 1" and note the generated
volume id
On Windows:
2. iscsicli QAddTargetPortal <your_iSCSI_target>
3. iscsicli ListTargets
The output should contain the iqn related to your volume:
iqn.2010-10.org.openstack:volume-<volume_id>
How to test Boot from volume in Hyper-V from the OpenStack dashboard:
1. Fist of all create a volume
2. Get the volume ID of the created volume
3. Upload and untar to the Cloud controller the next VHD image:
http://dev.opennebula.org/attachments/download/482/ttylinux.vhd.gz
4. sudo dd if=/path/to/vhdfileofstep3
of=/dev/nova-volumes/volume-XXXXX <- Related to the ID of step 2
5. Launch an instance from any image (this is not important because we are
just booting from a volume) from the dashboard, and don't forget to select
boot from volume and select the volume created in step2. Important: Device
name must be "vda".

17
hyperv/nova/__init__.py Normal file
View File

@@ -0,0 +1,17 @@
# Copyright (c) 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.virt.hyperv import driver
HyperVDriver = driver.HyperVDriver

View File

@@ -0,0 +1,149 @@
#
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
"""
import abc
import re
import sys
if sys.platform == 'win32':
import _winreg
import wmi
from oslo_log import log as logging
from nova import block_device
from nova.i18n import _LI
from nova.virt import driver
LOG = logging.getLogger(__name__)
class BaseVolumeUtils(object):
_FILE_DEVICE_DISK = 7
def __init__(self, host='.'):
if sys.platform == 'win32':
self._conn_wmi = wmi.WMI(moniker='//%s/root/wmi' % host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
self._drive_number_regex = re.compile(r'DeviceID=\"[^,]*\\(\d+)\"')
@abc.abstractmethod
def login_storage_target(self, target_lun, target_iqn, target_portal):
pass
@abc.abstractmethod
def logout_storage_target(self, target_iqn):
pass
@abc.abstractmethod
def execute_log_out(self, session_id):
pass
def get_iscsi_initiator(self):
"""Get iscsi initiator name for this machine."""
computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
hostname = computer_system.name
keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\"
"iSCSI\\Discovery")
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
_winreg.KEY_ALL_ACCESS)
temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
initiator_name = str(temp[0])
_winreg.CloseKey(key)
except Exception:
LOG.info(_LI("The ISCSI initiator name can't be found. "
"Choosing the default one"))
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
if computer_system.PartofDomain:
initiator_name += '.' + computer_system.Domain.lower()
return initiator_name
def volume_in_mapping(self, mount_device, block_device_info):
block_device_list = [block_device.strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(
block_device.strip_dev(swap['device_name']))
block_device_list += [block_device.strip_dev(
ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(block_device_info)]
LOG.debug("block_device_list %s", block_device_list)
return block_device.strip_dev(mount_device) in block_device_list
def _get_drive_number_from_disk_path(self, disk_path):
drive_number = self._drive_number_regex.findall(disk_path)
if drive_number:
return int(drive_number[0])
def get_session_id_from_mounted_disk(self, physical_drive_path):
drive_number = self._get_drive_number_from_disk_path(
physical_drive_path)
if not drive_number:
return None
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass()
for initiator_session in initiator_sessions:
devices = initiator_session.Devices
for device in devices:
device_number = device.DeviceNumber
if device_number == drive_number:
return initiator_session.SessionId
def _get_devices_for_target(self, target_iqn):
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass(
TargetName=target_iqn)
if not initiator_sessions:
return []
return initiator_sessions[0].Devices
def get_device_number_for_target(self, target_iqn, target_lun):
devices = self._get_devices_for_target(target_iqn)
for device in devices:
if device.ScsiLun == target_lun:
return device.DeviceNumber
def get_target_lun_count(self, target_iqn):
devices = self._get_devices_for_target(target_iqn)
disk_devices = [device for device in devices
if device.DeviceType == self._FILE_DEVICE_DISK]
return len(disk_devices)
def get_target_from_disk_path(self, disk_path):
initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass()
drive_number = self._get_drive_number_from_disk_path(disk_path)
if not drive_number:
return None
for initiator_session in initiator_sessions:
devices = initiator_session.Devices
for device in devices:
if device.DeviceNumber == drive_number:
return (device.TargetName, device.ScsiLun)

101
hyperv/nova/constants.py Normal file
View File

@@ -0,0 +1,101 @@
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Constants used in ops classes
"""
from nova.compute import arch
from nova.compute import power_state
HYPERV_VM_STATE_ENABLED = 2
HYPERV_VM_STATE_DISABLED = 3
HYPERV_VM_STATE_SHUTTING_DOWN = 4
HYPERV_VM_STATE_REBOOT = 10
HYPERV_VM_STATE_PAUSED = 32768
HYPERV_VM_STATE_SUSPENDED = 32769
HYPERV_POWER_STATE = {
HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN,
HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN,
HYPERV_VM_STATE_ENABLED: power_state.RUNNING,
HYPERV_VM_STATE_PAUSED: power_state.PAUSED,
HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
}
WMI_WIN32_PROCESSOR_ARCHITECTURE = {
0: arch.I686,
1: arch.MIPS,
2: arch.ALPHA,
3: arch.PPC,
5: arch.ARMV7,
6: arch.IA64,
9: arch.X86_64,
}
PROCESSOR_FEATURE = {
7: '3dnow',
3: 'mmx',
12: 'nx',
9: 'pae',
8: 'rdtsc',
20: 'slat',
13: 'sse3',
21: 'vmx',
6: 'sse',
10: 'sse2',
17: 'xsave',
}
WMI_JOB_STATUS_STARTED = 4096
WMI_JOB_STATE_RUNNING = 4
WMI_JOB_STATE_COMPLETED = 7
VM_SUMMARY_NUM_PROCS = 4
VM_SUMMARY_ENABLED_STATE = 100
VM_SUMMARY_MEMORY_USAGE = 103
VM_SUMMARY_UPTIME = 105
CTRL_TYPE_IDE = "IDE"
CTRL_TYPE_SCSI = "SCSI"
DISK = "VHD"
DISK_FORMAT = DISK
DVD = "DVD"
DVD_FORMAT = "ISO"
DISK_FORMAT_MAP = {
DISK_FORMAT.lower(): DISK,
DVD_FORMAT.lower(): DVD
}
DISK_FORMAT_VHD = "VHD"
DISK_FORMAT_VHDX = "VHDX"
VHD_TYPE_FIXED = 2
VHD_TYPE_DYNAMIC = 3
SCSI_CONTROLLER_SLOTS_NUMBER = 64
HOST_POWER_ACTION_SHUTDOWN = "shutdown"
HOST_POWER_ACTION_REBOOT = "reboot"
HOST_POWER_ACTION_STARTUP = "startup"
IMAGE_PROP_VM_GEN = "hw_machine_type"
IMAGE_PROP_VM_GEN_1 = "hyperv-gen1"
IMAGE_PROP_VM_GEN_2 = "hyperv-gen2"
VM_GEN_1 = 1
VM_GEN_2 = 2

243
hyperv/nova/driver.py Normal file
View File

@@ -0,0 +1,243 @@
# Copyright (c) 2010 Cloud.com, Inc
# Copyright (c) 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A Hyper-V Nova Compute driver.
"""
import platform
from oslo_log import log as logging
from nova.i18n import _
from nova.virt import driver
from nova.virt.hyperv import hostops
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import rdpconsoleops
from nova.virt.hyperv import snapshotops
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
def __init__(self, virtapi):
super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()
self._rdpconsoleops = rdpconsoleops.RDPConsoleOps()
def init_host(self, host):
self._vmops.restart_vm_log_writers()
def list_instance_uuids(self):
return self._vmops.list_instance_uuids()
def list_instances(self):
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def get_info(self, instance):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self._volumeops.attach_volume(connection_info,
instance.name)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self._volumeops.detach_volume(connection_info,
instance.name)
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_available_nodes(self, refresh=False):
return [platform.node()]
def host_power_action(self, action):
return self._hostops.host_power_action(action)
def snapshot(self, context, instance, image_id, update_task_state):
self._snapshotops.snapshot(context, instance, image_id,
update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
def unpause(self, instance):
self._vmops.unpause(instance)
def suspend(self, context, instance):
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance, block_device_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self._vmops.resume_state_on_host_boot(context, instance, network_info,
block_device_info)
def live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
self._livemigrationops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
self.destroy(context, instance, network_info, block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info,
network_info)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
self._livemigrationops.post_live_migration(context, instance,
block_device_info)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(
context,
instance,
network_info,
block_migration)
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return self._livemigrationops.check_can_live_migrate_destination(
context, instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
self._livemigrationops.check_can_live_migrate_destination_cleanup(
context, dest_check_data)
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return self._livemigrationops.check_can_live_migrate_source(
context, instance, dest_check_data)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance, network_info):
LOG.debug("ensure_filtering_rules_for_instance called",
instance=instance)
def unfilter_instance(self, instance, network_info):
LOG.debug("unfilter_instance called", instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
network_info,
block_device_info,
timeout,
retry_interval)
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
block_device_info, power_on)
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
def get_host_uptime(self):
return self._hostops.get_host_uptime()
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
def get_console_output(self, context, instance):
return self._vmops.get_console_output(instance)

186
hyperv/nova/hostops.py Normal file
View File

@@ -0,0 +1,186 @@
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for host operations.
"""
import datetime
import os
import platform
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import vm_mode
from nova.i18n import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import utilsfactory
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
class HostOps(object):
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._pathutils = utilsfactory.get_pathutils()
def _get_cpu_info(self):
"""Get the CPU information.
:returns: A dictionary containing the main properties
of the central processor in the hypervisor.
"""
cpu_info = dict()
processors = self._hostutils.get_cpus_info()
w32_arch_dict = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE
cpu_info['arch'] = w32_arch_dict.get(processors[0]['Architecture'],
'Unknown')
cpu_info['model'] = processors[0]['Name']
cpu_info['vendor'] = processors[0]['Manufacturer']
topology = dict()
topology['sockets'] = len(processors)
topology['cores'] = processors[0]['NumberOfCores']
topology['threads'] = (processors[0]['NumberOfLogicalProcessors'] /
processors[0]['NumberOfCores'])
cpu_info['topology'] = topology
features = list()
for fkey, fname in constants.PROCESSOR_FEATURE.items():
if self._hostutils.is_cpu_feature_present(fkey):
features.append(fname)
cpu_info['features'] = features
return cpu_info
def _get_memory_info(self):
(total_mem_kb, free_mem_kb) = self._hostutils.get_memory_info()
total_mem_mb = total_mem_kb / 1024
free_mem_mb = free_mem_kb / 1024
return (total_mem_mb, free_mem_mb, total_mem_mb - free_mem_mb)
def _get_local_hdd_info_gb(self):
drive = os.path.splitdrive(self._pathutils.get_instances_dir())[0]
(size, free_space) = self._hostutils.get_volume_info(drive)
total_gb = size / units.Gi
free_gb = free_space / units.Gi
used_gb = total_gb - free_gb
return (total_gb, free_gb, used_gb)
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 6003)
"""
# NOTE(claudiub): The hypervisor_version will be stored in the database
# as an Integer and it will be used by the scheduler, if required by
# the image property 'hypervisor_version_requires'.
# The hypervisor_version will then be converted back to a version
# by splitting the int in groups of 3 digits.
# E.g.: hypervisor_version 6003 is converted to '6.3'.
version = self._hostutils.get_windows_version().split('.')
version = int(version[0]) * 1000 + int(version[1])
LOG.debug('Windows version: %s ', version)
return version
def get_available_resource(self):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
LOG.debug('get_available_resource called')
(total_mem_mb,
free_mem_mb,
used_mem_mb) = self._get_memory_info()
(total_hdd_gb,
free_hdd_gb,
used_hdd_gb) = self._get_local_hdd_info_gb()
cpu_info = self._get_cpu_info()
cpu_topology = cpu_info['topology']
vcpus = (cpu_topology['sockets'] *
cpu_topology['cores'] *
cpu_topology['threads'])
dic = {'vcpus': vcpus,
'memory_mb': total_mem_mb,
'memory_mb_used': used_mem_mb,
'local_gb': total_hdd_gb,
'local_gb_used': used_hdd_gb,
'hypervisor_type': "hyperv",
'hypervisor_version': self._get_hypervisor_version(),
'hypervisor_hostname': platform.node(),
'vcpus_used': 0,
'cpu_info': jsonutils.dumps(cpu_info),
'supported_instances': jsonutils.dumps(
[(arch.I686, hv_type.HYPERV, vm_mode.HVM),
(arch.X86_64, hv_type.HYPERV, vm_mode.HVM)]),
'numa_topology': None,
}
return dic
def host_power_action(self, action):
"""Reboots, shuts down or powers up the host."""
if action in [constants.HOST_POWER_ACTION_SHUTDOWN,
constants.HOST_POWER_ACTION_REBOOT]:
self._hostutils.host_power_action(action)
else:
if action == constants.HOST_POWER_ACTION_STARTUP:
raise NotImplementedError(
_("Host PowerOn is not supported by the Hyper-V driver"))
def get_host_ip_addr(self):
host_ip = CONF.my_ip
if not host_ip:
# Return the first available address
host_ip = self._hostutils.get_local_ips()[0]
LOG.debug("Host IP address is: %s", host_ip)
return host_ip
def get_host_uptime(self):
"""Returns the host uptime."""
tick_count64 = self._hostutils.get_host_tick_count64()
# format the string to match libvirt driver uptime
# Libvirt uptime returns a combination of the following
# - curent host time
# - time since host is up
# - number of logged in users
# - cpu load
# Since the Windows function GetTickCount64 returns only
# the time since the host is up, returning 0s for cpu load
# and number of logged in users.
# This is done to ensure the format of the returned
# value is same as in libvirt
return "%s up %s, 0 users, load average: 0, 0, 0" % (
str(time.strftime("%H:%M:%S")),
str(datetime.timedelta(milliseconds=long(tick_count64))))

116
hyperv/nova/hostutils.py Normal file
View File

@@ -0,0 +1,116 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ctypes
import socket
import sys
if sys.platform == 'win32':
import wmi
from nova.i18n import _
from nova.virt.hyperv import constants
class HostUtils(object):
_HOST_FORCED_REBOOT = 6
_HOST_FORCED_SHUTDOWN = 12
_DEFAULT_VM_GENERATION = constants.IMAGE_PROP_VM_GEN_1
def __init__(self):
if sys.platform == 'win32':
self._conn_cimv2 = wmi.WMI(privileges=["Shutdown"])
def get_cpus_info(self):
cpus = self._conn_cimv2.query("SELECT * FROM Win32_Processor "
"WHERE ProcessorType = 3")
cpus_list = []
for cpu in cpus:
cpu_info = {'Architecture': cpu.Architecture,
'Name': cpu.Name,
'Manufacturer': cpu.Manufacturer,
'NumberOfCores': cpu.NumberOfCores,
'NumberOfLogicalProcessors':
cpu.NumberOfLogicalProcessors}
cpus_list.append(cpu_info)
return cpus_list
def is_cpu_feature_present(self, feature_key):
return ctypes.windll.kernel32.IsProcessorFeaturePresent(feature_key)
def get_memory_info(self):
"""Returns a tuple with total visible memory and free physical memory
expressed in kB.
"""
mem_info = self._conn_cimv2.query("SELECT TotalVisibleMemorySize, "
"FreePhysicalMemory "
"FROM win32_operatingsystem")[0]
return (long(mem_info.TotalVisibleMemorySize),
long(mem_info.FreePhysicalMemory))
def get_volume_info(self, drive):
"""Returns a tuple with total size and free space
expressed in bytes.
"""
logical_disk = self._conn_cimv2.query("SELECT Size, FreeSpace "
"FROM win32_logicaldisk "
"WHERE DeviceID='%s'"
% drive)[0]
return (long(logical_disk.Size), long(logical_disk.FreeSpace))
def check_min_windows_version(self, major, minor, build=0):
version_str = self.get_windows_version()
return map(int, version_str.split('.')) >= [major, minor, build]
def get_windows_version(self):
return self._conn_cimv2.Win32_OperatingSystem()[0].Version
def get_local_ips(self):
addr_info = socket.getaddrinfo(socket.gethostname(), None, 0, 0, 0)
# Returns IPv4 and IPv6 addresses, ordered by protocol family
addr_info.sort()
return [a[4][0] for a in addr_info]
def get_host_tick_count64(self):
return ctypes.windll.kernel32.GetTickCount64()
def host_power_action(self, action):
win32_os = self._conn_cimv2.Win32_OperatingSystem()[0]
if action == constants.HOST_POWER_ACTION_SHUTDOWN:
win32_os.Win32Shutdown(self._HOST_FORCED_SHUTDOWN)
elif action == constants.HOST_POWER_ACTION_REBOOT:
win32_os.Win32Shutdown(self._HOST_FORCED_REBOOT)
else:
raise NotImplementedError(
_("Host %(action)s is not supported by the Hyper-V driver") %
{"action": action})
def get_supported_vm_types(self):
"""Get the supported Hyper-V VM generations.
Hyper-V Generation 2 VMs are supported in Windows 8.1,
Windows Server / Hyper-V Server 2012 R2 or newer.
:returns: array of supported VM generations (ex. ['hyperv-gen1'])
"""
if self.check_min_windows_version(6, 3):
return [constants.IMAGE_PROP_VM_GEN_1,
constants.IMAGE_PROP_VM_GEN_2]
else:
return [constants.IMAGE_PROP_VM_GEN_1]
def get_default_vm_generation(self):
return self._DEFAULT_VM_GENERATION

140
hyperv/nova/imagecache.py Normal file
View File

@@ -0,0 +1,140 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image caching and management.
"""
import os
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from nova.i18n import _
from nova import utils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
from nova.virt import images
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
class ImageCache(object):
def __init__(self):
self._pathutils = utilsfactory.get_pathutils()
self._vhdutils = utilsfactory.get_vhdutils()
def _get_root_vhd_size_gb(self, instance):
if instance.old_flavor:
return instance.old_flavor.root_gb
else:
return instance.root_gb
def _resize_and_cache_vhd(self, instance, vhd_path):
vhd_info = self._vhdutils.get_vhd_info(vhd_path)
vhd_size = vhd_info['MaxInternalSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * units.Gi
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
if root_vhd_internal_size < vhd_size:
raise vmutils.HyperVException(
_("Cannot resize the image to a size smaller than the VHD "
"max. internal size: %(vhd_size)s. Requested disk size: "
"%(root_vhd_size)s") %
{'vhd_size': vhd_size, 'root_vhd_size': root_vhd_size}
)
if root_vhd_internal_size > vhd_size:
path_parts = os.path.splitext(vhd_path)
resized_vhd_path = '%s_%s%s' % (path_parts[0],
root_vhd_size_gb,
path_parts[1])
@utils.synchronized(resized_vhd_path)
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
self._pathutils.remove(resized_vhd_path)
copy_and_resize_vhd()
return resized_vhd_path
def get_cached_image(self, context, instance):
image_id = instance.image_ref
base_vhd_dir = self._pathutils.get_base_vhd_dir()
base_vhd_path = os.path.join(base_vhd_dir, image_id)
@utils.synchronized(base_vhd_path)
def fetch_image_if_not_existing():
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = base_vhd_path + '.' + format_ext
if self._pathutils.exists(test_path):
vhd_path = test_path
break
if not vhd_path:
try:
images.fetch(context, image_id, base_vhd_path,
instance.user_id,
instance.project_id)
format_ext = self._vhdutils.get_vhd_format(base_vhd_path)
vhd_path = base_vhd_path + '.' + format_ext.lower()
self._pathutils.rename(base_vhd_path, vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_path):
self._pathutils.remove(base_vhd_path)
return vhd_path
vhd_path = fetch_image_if_not_existing()
if CONF.use_cow_images and vhd_path.split('.')[-1].lower() == 'vhd':
# Resize the base VHD image as it's not possible to resize a
# differencing VHD. This does not apply to VHDX images.
resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path)
if resized_vhd_path:
return resized_vhd_path
return vhd_path
def get_image_details(self, context, instance):
image_id = instance.image_ref
return images.get_info(context, image_id)

69
hyperv/nova/ioutils.py Normal file
View File

@@ -0,0 +1,69 @@
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
from eventlet import patcher
from oslo_log import log as logging
from nova.i18n import _LE
LOG = logging.getLogger(__name__)
native_threading = patcher.original('threading')
class IOThread(native_threading.Thread):
def __init__(self, src, dest, max_bytes):
super(IOThread, self).__init__()
self.setDaemon(True)
self._src = src
self._dest = dest
self._dest_archive = dest + '.1'
self._max_bytes = max_bytes
self._stopped = native_threading.Event()
def run(self):
try:
self._copy()
except IOError as err:
# Invalid argument error means that the vm console pipe was closed,
# probably the vm was stopped. The worker can stop it's execution.
if err.errno != errno.EINVAL:
LOG.error(_LE("Error writing vm console log file from "
"serial console pipe. Error: %s") % err)
def _copy(self):
with open(self._src, 'rb') as src:
with open(self._dest, 'ab', 0) as dest:
dest.seek(0, os.SEEK_END)
log_size = dest.tell()
while (not self._stopped.isSet()):
# Read one byte at a time to avoid blocking.
data = src.read(1)
dest.write(data)
log_size += len(data)
if (log_size >= self._max_bytes):
dest.close()
if os.path.exists(self._dest_archive):
os.remove(self._dest_archive)
os.rename(self._dest, self._dest_archive)
dest = open(self._dest, 'ab', 0)
log_size = 0
def join(self):
self._stopped.set()
super(IOThread, self).join()

View File

@@ -0,0 +1,130 @@
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for live migration VM operations.
"""
import functools
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from nova.i18n import _
from nova.virt import configdrive
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('config_drive_cdrom', 'nova.virt.hyperv.vmops', 'hyperv')
def check_os_version_requirement(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
if not self._livemigrutils:
raise NotImplementedError(_('Live migration is supported '
'starting with Hyper-V Server '
'2012'))
return function(self, *args, **kwds)
return wrapper
class LiveMigrationOps(object):
def __init__(self):
# Live migration is supported starting from Hyper-V Server 2012
if utilsfactory.get_hostutils().check_min_windows_version(6, 2):
self._livemigrutils = utilsfactory.get_livemigrationutils()
else:
self._livemigrutils = None
self._pathutils = utilsfactory.get_pathutils()
self._vmops = vmops.VMOps()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
@check_os_version_requirement
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
LOG.debug("live_migration called", instance=instance_ref)
instance_name = instance_ref["name"]
try:
self._vmops.copy_vm_console_logs(instance_name, dest)
if (configdrive.required_by(instance_ref) and
CONF.hyperv.config_drive_cdrom):
self._pathutils.copy_configdrive(instance_name, dest)
self._livemigrutils.live_migrate_vm(instance_name,
dest)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Calling live migration recover_method "
"for instance: %s", instance_name)
recover_method(context, instance_ref, dest, block_migration)
LOG.debug("Calling live migration post_method for instance: %s",
instance_name)
post_method(context, instance_ref, dest, block_migration)
@check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug("pre_live_migration called", instance=instance)
self._livemigrutils.check_live_migration_config()
if CONF.use_cow_images:
boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
if not boot_from_volume and instance.image_ref:
self._imagecache.get_cached_image(context, instance)
self._volumeops.initialize_volumes_connection(block_device_info)
@check_os_version_requirement
def post_live_migration(self, context, instance, block_device_info):
self._volumeops.disconnect_volumes(block_device_info)
@check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug("post_live_migration_at_destination called",
instance=instance_ref)
self._vmops.log_vm_serial_output(instance_ref['name'],
instance_ref['uuid'])
@check_os_version_requirement
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
LOG.debug("check_can_live_migrate_destination called", instance_ref)
return {}
@check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug("check_can_live_migrate_destination_cleanup called")
@check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug("check_can_live_migrate_source called", instance_ref)
return dest_check_data

View File

@@ -0,0 +1,252 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
if sys.platform == 'win32':
import wmi
from oslo_log import log as logging
from nova import exception
from nova.i18n import _, _LE
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
class LiveMigrationUtils(object):
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
self._volutils = volumeutilsv2.VolumeUtilsV2()
def _get_conn_v2(self, host='localhost'):
try:
return wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
except wmi.x_wmi as ex:
LOG.exception(_LE('Get version 2 connection error'))
if ex.com_error.hresult == -2147217394:
msg = (_('Live migration is not supported on target host "%s"')
% host)
elif ex.com_error.hresult == -2147023174:
msg = (_('Target live migration host "%s" is unreachable')
% host)
else:
msg = _('Live migration failed: %s') % ex.message
raise vmutils.HyperVException(msg)
def check_live_migration_config(self):
conn_v2 = self._get_conn_v2()
migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0]
vsmssds = migration_svc.associators(
wmi_association_class='Msvm_ElementSettingData',
wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData')
vsmssd = vsmssds[0]
if not vsmssd.EnableVirtualSystemMigration:
raise vmutils.HyperVException(
_('Live migration is not enabled on this host'))
if not migration_svc.MigrationServiceListenerIPAddressList:
raise vmutils.HyperVException(
_('Live migration networks are not configured on this host'))
def _get_vm(self, conn_v2, vm_name):
vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if not n:
raise exception.NotFound(_('VM not found: %s') % vm_name)
elif n > 1:
raise vmutils.HyperVException(_('Duplicate VM name found: %s')
% vm_name)
return vms[0]
def _destroy_planned_vm(self, conn_v2_remote, planned_vm):
LOG.debug("Destroying existing remote planned VM: %s",
planned_vm.ElementName)
vs_man_svc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_())
self._vmutils.check_ret_val(ret_val, job_path)
def _check_existing_planned_vm(self, conn_v2_remote, vm):
# Make sure that there's not yet a remote planned VM on the target
# host for this VM
planned_vms = conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)
if planned_vms:
self._destroy_planned_vm(conn_v2_remote, planned_vms[0])
def _create_remote_planned_vm(self, conn_v2_local, conn_v2_remote,
vm, rmt_ip_addr_list, dest_host):
# Staged
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32770")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
migration_setting_data = vsmsd.GetText_(1)
LOG.debug("Creating remote planned VM for VM: %s",
vm.ElementName)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
return conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)[0]
def _get_physical_disk_paths(self, vm_name):
ide_ctrl_path = self._vmutils.get_vm_ide_controller(vm_name, 0)
if ide_ctrl_path:
ide_paths = self._vmutils.get_controller_volume_paths(
ide_ctrl_path)
else:
ide_paths = {}
scsi_ctrl_path = self._vmutils.get_vm_scsi_controller(vm_name)
scsi_paths = self._vmutils.get_controller_volume_paths(scsi_ctrl_path)
return dict(ide_paths.items() + scsi_paths.items())
def _get_remote_disk_data(self, vmutils_remote, disk_paths, dest_host):
volutils_remote = volumeutilsv2.VolumeUtilsV2(dest_host)
disk_paths_remote = {}
for (rasd_rel_path, disk_path) in disk_paths.items():
target = self._volutils.get_target_from_disk_path(disk_path)
if target:
(target_iqn, target_lun) = target
dev_num = volutils_remote.get_device_number_for_target(
target_iqn, target_lun)
disk_path_remote = (
vmutils_remote.get_mounted_disk_by_drive_number(dev_num))
disk_paths_remote[rasd_rel_path] = disk_path_remote
else:
LOG.debug("Could not retrieve iSCSI target "
"from disk path: %s", disk_path)
return disk_paths_remote
def _update_planned_vm_disk_resources(self, vmutils_remote, conn_v2_remote,
planned_vm, vm_name,
disk_paths_remote):
vm_settings = planned_vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
updated_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent')
for sasd in sasds:
if (sasd.ResourceType == 17 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Physical Disk Drive" and
sasd.HostResource):
# Replace the local disk target with the correct remote one
old_disk_path = sasd.HostResource[0]
new_disk_path = disk_paths_remote.pop(sasd.path().RelPath)
LOG.debug("Replacing host resource "
"%(old_disk_path)s with "
"%(new_disk_path)s on planned VM %(vm_name)s",
{'old_disk_path': old_disk_path,
'new_disk_path': new_disk_path,
'vm_name': vm_name})
sasd.HostResource = [new_disk_path]
updated_resource_setting_data.append(sasd.GetText_(1))
LOG.debug("Updating remote planned VM disk paths for VM: %s",
vm_name)
vsmsvc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0]
(res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings(
ResourceSettings=updated_resource_setting_data)
vmutils_remote.check_ret_val(ret_val, job_path)
def _get_vhd_setting_data(self, vm):
vm_settings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')[0]
new_resource_setting_data = []
sasds = vm_settings.associators(
wmi_association_class='Msvm_VirtualSystemSettingDataComponent',
wmi_result_class='Msvm_StorageAllocationSettingData')
for sasd in sasds:
if (sasd.ResourceType == 31 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Virtual Hard Disk"):
new_resource_setting_data.append(sasd.GetText_(1))
return new_resource_setting_data
def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host):
# VirtualSystemAndStorage
vsmsd = conn_v2_local.query("select * from "
"Msvm_VirtualSystemMigrationSettingData "
"where MigrationType = 32771")[0]
vsmsd.DestinationIPAddressList = rmt_ip_addr_list
if planned_vm:
vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name
migration_setting_data = vsmsd.GetText_(1)
migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0]
LOG.debug("Starting live migration for VM: %s", vm.ElementName)
(job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost(
ComputerSystem=vm.path_(),
DestinationHost=dest_host,
MigrationSettingData=migration_setting_data,
NewResourceSettingData=new_resource_setting_data)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_remote_ip_address_list(self, conn_v2_remote, dest_host):
LOG.debug("Getting live migration networks for remote host: %s",
dest_host)
migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0]
return migr_svc_rmt.MigrationServiceListenerIPAddressList
def live_migrate_vm(self, vm_name, dest_host):
self.check_live_migration_config()
conn_v2_local = self._get_conn_v2()
conn_v2_remote = self._get_conn_v2(dest_host)
vm = self._get_vm(conn_v2_local, vm_name)
self._check_existing_planned_vm(conn_v2_remote, vm)
rmt_ip_addr_list = self._get_remote_ip_address_list(conn_v2_remote,
dest_host)
planned_vm = None
disk_paths = self._get_physical_disk_paths(vm_name)
if disk_paths:
vmutils_remote = vmutilsv2.VMUtilsV2(dest_host)
disk_paths_remote = self._get_remote_disk_data(vmutils_remote,
disk_paths,
dest_host)
planned_vm = self._create_remote_planned_vm(conn_v2_local,
conn_v2_remote,
vm, rmt_ip_addr_list,
dest_host)
self._update_planned_vm_disk_resources(vmutils_remote,
conn_v2_remote, planned_vm,
vm_name, disk_paths_remote)
new_resource_setting_data = self._get_vhd_setting_data(vm)
self._live_migrate_vm(conn_v2_local, vm, planned_vm, rmt_ip_addr_list,
new_resource_setting_data, dest_host)

296
hyperv/nova/migrationops.py Normal file
View File

@@ -0,0 +1,296 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for migration / resize operations.
"""
import os
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from nova import exception
from nova.i18n import _, _LE
from nova.virt import configdrive
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
class MigrationOps(object):
def __init__(self):
self._hostutils = utilsfactory.get_hostutils()
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._imagecache = imagecache.ImageCache()
def _migrate_disk_files(self, instance_name, disk_files, dest):
# TODO(mikal): it would be nice if this method took a full instance,
# because it could then be passed to the log messages below.
same_host = False
if dest in self._hostutils.get_local_ips():
same_host = True
LOG.debug("Migration target is the source host")
else:
LOG.debug("Migration target host: %s", dest)
instance_path = self._pathutils.get_instance_dir(instance_name)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name, remove_dir=True)
dest_path = None
try:
if same_host:
# Since source and target are the same, we copy the files to
# a temporary location before moving them into place
dest_path = '%s_tmp' % instance_path
if self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
self._pathutils.makedirs(dest_path)
else:
dest_path = self._pathutils.get_instance_dir(
instance_name, dest, remove_dir=True)
for disk_file in disk_files:
# Skip the config drive as the instance is already configured
if os.path.basename(disk_file).lower() != 'configdrive.vhd':
LOG.debug('Copying disk "%(disk_file)s" to '
'"%(dest_path)s"',
{'disk_file': disk_file, 'dest_path': dest_path})
self._pathutils.copy(disk_file, dest_path)
self._pathutils.rename(instance_path, revert_path)
if same_host:
self._pathutils.rename(dest_path, instance_path)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_failed_disk_migration(instance_path, revert_path,
dest_path)
def _cleanup_failed_disk_migration(self, instance_path,
revert_path, dest_path):
try:
if dest_path and self._pathutils.exists(dest_path):
self._pathutils.rmtree(dest_path)
if self._pathutils.exists(revert_path):
self._pathutils.rename(revert_path, instance_path)
except Exception as ex:
# Log and ignore this exception
LOG.exception(ex)
LOG.error(_LE("Cannot cleanup migration files"))
def _check_target_flavor(self, instance, flavor):
new_root_gb = flavor['root_gb']
curr_root_gb = instance.root_gb
if new_root_gb < curr_root_gb:
raise exception.InstanceFaultRollback(
vmutils.VHDResizeException(
_("Cannot resize the root disk to a smaller size. "
"Current size: %(curr_root_gb)s GB. Requested size: "
"%(new_root_gb)s GB") %
{'curr_root_gb': curr_root_gb,
'new_root_gb': new_root_gb}))
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None, timeout=0,
retry_interval=0):
LOG.debug("migrate_disk_and_power_off called", instance=instance)
self._check_target_flavor(instance, flavor)
self._vmops.power_off(instance, timeout, retry_interval)
(disk_files,
volume_drives) = self._vmutils.get_vm_storage_paths(instance.name)
if disk_files:
self._migrate_disk_files(instance.name, disk_files, dest)
self._vmops.destroy(instance, destroy_disks=False)
# disk_info is not used
return ""
def confirm_migration(self, migration, instance, network_info):
LOG.debug("confirm_migration called", instance=instance)
self._pathutils.get_instance_migr_revert_dir(instance.name,
remove_dir=True)
def _revert_migration_files(self, instance_name):
instance_path = self._pathutils.get_instance_dir(
instance_name, create_dir=False, remove_dir=True)
revert_path = self._pathutils.get_instance_migr_revert_dir(
instance_name)
self._pathutils.rename(revert_path, instance_path)
def _check_and_attach_config_drive(self, instance, vm_gen):
if configdrive.required_by(instance):
configdrive_path = self._pathutils.lookup_configdrive_path(
instance.name)
if configdrive_path:
self._vmops.attach_config_drive(instance, configdrive_path,
vm_gen)
else:
raise vmutils.HyperVException(
_("Config drive is required by instance: %s, "
"but it does not exist.") % instance.name)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("finish_revert_migration called", instance=instance)
instance_name = instance.name
self._revert_migration_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
image_meta = self._imagecache.get_image_details(context, instance)
vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
self._check_and_attach_config_drive(instance, vm_gen)
if power_on:
self._vmops.power_on(instance)
def _merge_base_vhd(self, diff_vhd_path, base_vhd_path):
base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path),
os.path.basename(base_vhd_path))
try:
LOG.debug('Copying base disk %(base_vhd_path)s to '
'%(base_vhd_copy_path)s',
{'base_vhd_path': base_vhd_path,
'base_vhd_copy_path': base_vhd_copy_path})
self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path)
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_copy_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_copy_path)
LOG.debug("Merging base disk %(base_vhd_copy_path)s and "
"diff disk %(diff_vhd_path)s",
{'base_vhd_copy_path': base_vhd_copy_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path)
# Replace the differential VHD with the merged one
self._pathutils.rename(base_vhd_copy_path, diff_vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_copy_path):
self._pathutils.remove(base_vhd_copy_path)
def _check_resize_vhd(self, vhd_path, vhd_info, new_size):
curr_size = vhd_info['MaxInternalSize']
if new_size < curr_size:
raise vmutils.VHDResizeException(_("Cannot resize a VHD "
"to a smaller size"))
elif new_size > curr_size:
self._resize_vhd(vhd_path, new_size)
def _resize_vhd(self, vhd_path, new_size):
if vhd_path.split('.')[-1].lower() == "vhd":
LOG.debug("Getting parent disk info for disk: %s", vhd_path)
base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path)
if base_disk_path:
# A differential VHD cannot be resized. This limitation
# does not apply to the VHDX format.
self._merge_base_vhd(vhd_path, base_disk_path)
LOG.debug("Resizing disk \"%(vhd_path)s\" to new max "
"size %(new_size)s",
{'vhd_path': vhd_path, 'new_size': new_size})
self._vhdutils.resize_vhd(vhd_path, new_size)
def _check_base_disk(self, context, instance, diff_vhd_path,
src_base_disk_path):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
# If the location of the base host differs between source
# and target hosts we need to reconnect the base disk
if src_base_disk_path.lower() != base_vhd_path.lower():
LOG.debug("Reconnecting copied base VHD "
"%(base_vhd_path)s and diff "
"VHD %(diff_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'diff_vhd_path': diff_vhd_path})
self._vhdutils.reconnect_parent_vhd(diff_vhd_path,
base_vhd_path)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
LOG.debug("finish_migration called", instance=instance)
instance_name = instance.name
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
if not root_vhd_path:
raise vmutils.HyperVException(_("Cannot find boot VHD "
"file for instance: %s") %
instance_name)
root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path)
src_base_disk_path = root_vhd_info.get("ParentPath")
if src_base_disk_path:
self._check_base_disk(context, instance, root_vhd_path,
src_base_disk_path)
if resize_instance:
new_size = instance.root_gb * units.Gi
self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size)
eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name)
if resize_instance:
new_size = instance.get('ephemeral_gb', 0) * units.Gi
if not eph_vhd_path:
if new_size:
eph_vhd_path = self._vmops.create_ephemeral_vhd(instance)
else:
eph_vhd_info = self._vhdutils.get_vhd_info(eph_vhd_path)
self._check_resize_vhd(eph_vhd_path, eph_vhd_info, new_size)
vm_gen = self._vmops.get_image_vm_generation(root_vhd_path, image_meta)
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
self._check_and_attach_config_drive(instance, vm_gen)
if power_on:
self._vmops.power_on(instance)

View File

@@ -0,0 +1,68 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for network related operations.
"""
import sys
import uuid
if sys.platform == 'win32':
import wmi
from nova.i18n import _
from nova.virt.hyperv import vmutils
class NetworkUtils(object):
def __init__(self):
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization')
def get_external_vswitch(self, vswitch_name):
if vswitch_name:
vswitches = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name)
else:
# Find the vswitch that is connected to the first physical nic.
ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]
port = ext_port.associators(wmi_result_class='Msvm_SwitchPort')[0]
vswitches = port.associators(wmi_result_class='Msvm_VirtualSwitch')
if not len(vswitches):
raise vmutils.HyperVException(_('vswitch "%s" not found')
% vswitch_name)
return vswitches[0].path_()
def create_vswitch_port(self, vswitch_path, port_name):
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
# Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=str(uuid.uuid4()),
FriendlyName=port_name,
ScopeOfResidence="",
VirtualSwitch=vswitch_path)
if ret_val != 0:
raise vmutils.HyperVException(_("Failed to create vswitch port "
"%(port_name)s on switch "
"%(vswitch_path)s") %
{'port_name': port_name,
'vswitch_path': vswitch_path})
return new_port
def vswitch_port_needed(self):
# NOTE(alexpilotti): In WMI V2 the vswitch_path is set in the VM
# setting data without the need for a vswitch port.
return True

View File

@@ -0,0 +1,63 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for network related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from nova.i18n import _
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import vmutils
class NetworkUtilsV2(networkutils.NetworkUtils):
def __init__(self):
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization/v2')
def get_external_vswitch(self, vswitch_name):
if vswitch_name:
vswitches = self._conn.Msvm_VirtualEthernetSwitch(
ElementName=vswitch_name)
if not len(vswitches):
raise vmutils.HyperVException(_('vswitch "%s" not found')
% vswitch_name)
else:
# Find the vswitch that is connected to the first physical nic.
ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]
lep = ext_port.associators(wmi_result_class='Msvm_LANEndpoint')[0]
lep1 = lep.associators(wmi_result_class='Msvm_LANEndpoint')[0]
esw = lep1.associators(
wmi_result_class='Msvm_EthernetSwitchPort')[0]
vswitches = esw.associators(
wmi_result_class='Msvm_VirtualEthernetSwitch')
if not len(vswitches):
raise vmutils.HyperVException(_('No external vswitch found'))
return vswitches[0].path_()
def create_vswitch_port(self, vswitch_path, port_name):
raise NotImplementedError()
def vswitch_port_needed(self):
return False

252
hyperv/nova/pathutils.py Normal file
View File

@@ -0,0 +1,252 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import sys
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _
from nova import utils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.StrOpt('instances_path_share',
default="",
help='The name of a Windows share name mapped to the '
'"instances_path" dir and used by the resize feature '
'to copy files to the target host. If left blank, an '
'administrative share will be used, looking for the same '
'"instances_path" used locally'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('instances_path', 'nova.compute.manager')
ERROR_INVALID_NAME = 123
class PathUtils(object):
def __init__(self):
self._smb_conn = wmi.WMI(moniker=r"root\Microsoft\Windows\SMB")
def open(self, path, mode):
"""Wrapper on __builtin__.open used to simplify unit testing."""
import __builtin__
return __builtin__.open(path, mode)
def exists(self, path):
return os.path.exists(path)
def makedirs(self, path):
os.makedirs(path)
def remove(self, path):
os.remove(path)
def rename(self, src, dest):
os.rename(src, dest)
def copyfile(self, src, dest):
self.copy(src, dest)
def copy(self, src, dest):
# With large files this is 2x-3x faster than shutil.copy(src, dest),
# especially when copying to a UNC target.
# shutil.copyfileobj(...) with a proper buffer is better than
# shutil.copy(...) but still 20% slower than a shell copy.
# It can be replaced with Win32 API calls to avoid the process
# spawning overhead.
output, ret = utils.execute('cmd.exe', '/C', 'copy', '/Y', src, dest)
if ret:
raise IOError(_('The file copy from %(src)s to %(dest)s failed')
% {'src': src, 'dest': dest})
def rmtree(self, path):
shutil.rmtree(path)
def get_instances_dir(self, remote_server=None):
local_instance_path = os.path.normpath(CONF.instances_path)
if remote_server:
if CONF.hyperv.instances_path_share:
path = CONF.hyperv.instances_path_share
else:
# Use an administrative share
path = local_instance_path.replace(':', '$')
return ('\\\\%(remote_server)s\\%(path)s' %
{'remote_server': remote_server, 'path': path})
else:
return local_instance_path
def _check_create_dir(self, path):
if not self.exists(path):
LOG.debug('Creating directory: %s', path)
self.makedirs(path)
def _check_remove_dir(self, path):
if self.exists(path):
LOG.debug('Removing directory: %s', path)
self.rmtree(path)
def _get_instances_sub_dir(self, dir_name, remote_server=None,
create_dir=True, remove_dir=False):
instances_path = self.get_instances_dir(remote_server)
path = os.path.join(instances_path, dir_name)
try:
if remove_dir:
self._check_remove_dir(path)
if create_dir:
self._check_create_dir(path)
return path
except WindowsError as ex:
if ex.winerror == ERROR_INVALID_NAME:
raise vmutils.HyperVException(_(
"Cannot access \"%(instances_path)s\", make sure the "
"path exists and that you have the proper permissions. "
"In particular Nova-Compute must not be executed with the "
"builtin SYSTEM account or other accounts unable to "
"authenticate on a remote host.") %
{'instances_path': instances_path})
raise
def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
remove_dir=False):
dir_name = '%s_revert' % instance_name
return self._get_instances_sub_dir(dir_name, None, create_dir,
remove_dir)
def get_instance_dir(self, instance_name, remote_server=None,
create_dir=True, remove_dir=False):
return self._get_instances_sub_dir(instance_name, remote_server,
create_dir, remove_dir)
def _lookup_vhd_path(self, instance_name, vhd_path_func):
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = vhd_path_func(instance_name, format_ext)
if self.exists(test_path):
vhd_path = test_path
break
return vhd_path
def lookup_root_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name, self.get_root_vhd_path)
def lookup_configdrive_path(self, instance_name):
configdrive_path = None
for format_ext in constants.DISK_FORMAT_MAP:
test_path = self.get_configdrive_path(instance_name, format_ext)
if self.exists(test_path):
configdrive_path = test_path
break
return configdrive_path
def lookup_ephemeral_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name,
self.get_ephemeral_vhd_path)
def get_root_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.' + format_ext.lower())
def get_configdrive_path(self, instance_name, format_ext,
remote_server=None):
instance_path = self.get_instance_dir(instance_name, remote_server)
return os.path.join(instance_path, 'configdrive.' + format_ext.lower())
def get_ephemeral_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
def get_base_vhd_dir(self):
return self._get_instances_sub_dir('_base')
def get_export_dir(self, instance_name):
dir_name = os.path.join('export', instance_name)
return self._get_instances_sub_dir(dir_name, create_dir=True,
remove_dir=True)
def get_vm_console_log_paths(self, vm_name, remote_server=None):
instance_dir = self.get_instance_dir(vm_name,
remote_server)
console_log_path = os.path.join(instance_dir, 'console.log')
return console_log_path, console_log_path + '.1'
def check_smb_mapping(self, smbfs_share):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
return False
if os.path.exists(smbfs_share):
LOG.debug('Share already mounted: %s', smbfs_share)
return True
else:
LOG.debug('Share exists but is unavailable: %s ', smbfs_share)
self.unmount_smb_share(smbfs_share, force=True)
return False
def mount_smb_share(self, smbfs_share, username=None, password=None):
try:
LOG.debug('Mounting share: %s', smbfs_share)
self._smb_conn.Msft_SmbMapping.Create(RemotePath=smbfs_share,
UserName=username,
Password=password)
except wmi.x_wmi as exc:
err_msg = (_(
'Unable to mount SMBFS share: %(smbfs_share)s '
'WMI exception: %(wmi_exc)s'), {'smbfs_share': smbfs_share,
'wmi_exc': exc})
raise vmutils.HyperVException(err_msg)
def unmount_smb_share(self, smbfs_share, force=False):
mappings = self._smb_conn.Msft_SmbMapping(RemotePath=smbfs_share)
if not mappings:
LOG.debug('Share %s is not mounted. Skipping unmount.',
smbfs_share)
for mapping in mappings:
# Due to a bug in the WMI module, getting the output of
# methods returning None will raise an AttributeError
try:
mapping.Remove(Force=force)
except AttributeError:
pass
except wmi.x_wmi:
# If this fails, a 'Generic Failure' exception is raised.
# This happens even if we unforcefully unmount an in-use
# share, for which reason we'll simply ignore it in this
# case.
if force:
raise vmutils.HyperVException(
_("Could not unmount share: %s"), smbfs_share)
def copy_configdrive(self, instance_name, dest_host):
local_configdrive_path = self.get_configdrive_path(
instance_name, constants.DVD_FORMAT)
remote_configdrive_path = self.get_configdrive_path(
instance_name, constants.DVD_FORMAT, remote_server=dest_host)
self.copyfile(local_configdrive_path,
remote_configdrive_path)

View File

@@ -0,0 +1,41 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.console import type as ctype
from nova.virt.hyperv import hostops
from nova.virt.hyperv import utilsfactory
LOG = logging.getLogger(__name__)
class RDPConsoleOps(object):
def __init__(self):
self._hostops = hostops.HostOps()
self._vmutils = utilsfactory.get_vmutils()
self._rdpconsoleutils = utilsfactory.get_rdpconsoleutils()
def get_rdp_console(self, instance):
LOG.debug("get_rdp_console called", instance=instance)
host = self._hostops.get_host_ip_addr()
port = self._rdpconsoleutils.get_rdp_console_port()
vm_id = self._vmutils.get_vm_id(instance.name)
LOG.debug("RDP console: %(host)s:%(port)s, %(vm_id)s",
{"host": host, "port": port, "vm_id": vm_id})
return ctype.ConsoleRDP(
host=host, port=port, internal_access_path=vm_id)

View File

@@ -0,0 +1,21 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class RDPConsoleUtils(object):
_DEFAULT_HYPERV_RDP_PORT = 2179
def get_rdp_console_port(self):
return self._DEFAULT_HYPERV_RDP_PORT

View File

@@ -0,0 +1,31 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from nova.virt.hyperv import rdpconsoleutils
if sys.platform == 'win32':
import wmi
class RDPConsoleUtilsV2(rdpconsoleutils.RDPConsoleUtils):
def __init__(self):
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization/v2')
def get_rdp_console_port(self):
rdp_setting_data = self._conn.Msvm_TerminalServiceSettingData()[0]
return rdp_setting_data.ListenerPort

123
hyperv/nova/snapshotops.py Normal file
View File

@@ -0,0 +1,123 @@
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM snapshot operations.
"""
import os
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import task_states
from nova.i18n import _LW
from nova.image import glance
from nova.virt.hyperv import utilsfactory
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SnapshotOps(object):
def __init__(self):
self._pathutils = utilsfactory.get_pathutils()
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
def _save_glance_image(self, context, image_id, image_vhd_path):
(glance_image_service,
image_id) = glance.get_remote_image_service(context, image_id)
image_metadata = {"is_public": False,
"disk_format": "vhd",
"container_format": "bare",
"properties": {}}
with self._pathutils.open(image_vhd_path, 'rb') as f:
glance_image_service.update(context, image_id, image_metadata, f)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
instance_name = instance.name
LOG.debug("Creating snapshot for instance %s", instance_name)
snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
export_dir = None
try:
src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name)
LOG.debug("Getting info for VHD %s", src_vhd_path)
src_base_disk_path = self._vhdutils.get_vhd_parent_path(
src_vhd_path)
export_dir = self._pathutils.get_export_dir(instance_name)
dest_vhd_path = os.path.join(export_dir, os.path.basename(
src_vhd_path))
LOG.debug('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s',
{'src_vhd_path': src_vhd_path,
'dest_vhd_path': dest_vhd_path})
self._pathutils.copyfile(src_vhd_path, dest_vhd_path)
image_vhd_path = None
if not src_base_disk_path:
image_vhd_path = dest_vhd_path
else:
basename = os.path.basename(src_base_disk_path)
dest_base_disk_path = os.path.join(export_dir, basename)
LOG.debug('Copying base disk %(src_vhd_path)s to '
'%(dest_base_disk_path)s',
{'src_vhd_path': src_vhd_path,
'dest_base_disk_path': dest_base_disk_path})
self._pathutils.copyfile(src_base_disk_path,
dest_base_disk_path)
LOG.debug("Reconnecting copied base VHD "
"%(dest_base_disk_path)s and diff "
"VHD %(dest_vhd_path)s",
{'dest_base_disk_path': dest_base_disk_path,
'dest_vhd_path': dest_vhd_path})
self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
dest_base_disk_path)
LOG.debug("Merging base disk %(dest_base_disk_path)s and "
"diff disk %(dest_vhd_path)s",
{'dest_base_disk_path': dest_base_disk_path,
'dest_vhd_path': dest_vhd_path})
self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
image_vhd_path = dest_base_disk_path
LOG.debug("Updating Glance image %(image_id)s with content from "
"merged disk %(image_vhd_path)s",
{'image_id': image_id, 'image_vhd_path': image_vhd_path})
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._save_glance_image(context, image_id, image_vhd_path)
LOG.debug("Snapshot image %(image_id)s updated for VM "
"%(instance_name)s",
{'image_id': image_id, 'instance_name': instance_name})
finally:
try:
LOG.debug("Removing snapshot %s", image_id)
self._vmutils.remove_vm_snapshot(snapshot_path)
except Exception as ex:
LOG.exception(ex)
LOG.warning(_LW('Failed to remove snapshot for VM %s'),
instance_name)
if export_dir:
LOG.debug('Removing directory: %s', export_dir)
self._pathutils.rmtree(export_dir)

106
hyperv/nova/utilsfactory.py Normal file
View File

@@ -0,0 +1,106 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import livemigrationutils
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import networkutilsv2
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import rdpconsoleutils
from nova.virt.hyperv import rdpconsoleutilsv2
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vhdutilsv2
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
hyper_opts = [
cfg.BoolOpt('force_hyperv_utils_v1',
default=False,
help='Force V1 WMI utility classes'),
cfg.BoolOpt('force_volumeutils_v1',
default=False,
help='Force V1 volume utility class'),
]
CONF = cfg.CONF
CONF.register_opts(hyper_opts, 'hyperv')
LOG = logging.getLogger(__name__)
def _get_class(v1_class, v2_class, force_v1_flag):
# V2 classes are supported starting from Hyper-V Server 2012 and
# Windows Server 2012 (kernel version 6.2)
if not force_v1_flag and get_hostutils().check_min_windows_version(6, 2):
cls = v2_class
else:
cls = v1_class
LOG.debug("Loading class: %(module_name)s.%(class_name)s",
{'module_name': cls.__module__, 'class_name': cls.__name__})
return cls
def _get_virt_utils_class(v1_class, v2_class):
# The "root/virtualization" WMI namespace is no longer supported on
# Windows Server / Hyper-V Server 2012 R2 / Windows 8.1
# (kernel version 6.3) or above.
if (CONF.hyperv.force_hyperv_utils_v1 and
get_hostutils().check_min_windows_version(6, 3)):
raise vmutils.HyperVException(
_('The "force_hyperv_utils_v1" option cannot be set to "True" '
'on Windows Server / Hyper-V Server 2012 R2 or above as the WMI '
'"root/virtualization" namespace is no longer supported.'))
return _get_class(v1_class, v2_class, CONF.hyperv.force_hyperv_utils_v1)
def get_vmutils(host='.'):
return _get_virt_utils_class(vmutils.VMUtils, vmutilsv2.VMUtilsV2)(host)
def get_vhdutils():
return _get_virt_utils_class(vhdutils.VHDUtils, vhdutilsv2.VHDUtilsV2)()
def get_networkutils():
return _get_virt_utils_class(networkutils.NetworkUtils,
networkutilsv2.NetworkUtilsV2)()
def get_hostutils():
return hostutils.HostUtils()
def get_pathutils():
return pathutils.PathUtils()
def get_volumeutils():
return _get_class(volumeutils.VolumeUtils, volumeutilsv2.VolumeUtilsV2,
CONF.hyperv.force_volumeutils_v1)()
def get_livemigrationutils():
return livemigrationutils.LiveMigrationUtils()
def get_rdpconsoleutils():
return _get_virt_utils_class(rdpconsoleutils.RDPConsoleUtils,
rdpconsoleutilsv2.RDPConsoleUtilsV2)()

212
hyperv/nova/vhdutils.py Normal file
View File

@@ -0,0 +1,212 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Official VHD format specs can be retrieved at:
http://technet.microsoft.com/en-us/library/bb676673.aspx
See "Download the Specifications Without Registering"
Official VHDX format specs can be retrieved at:
http://www.microsoft.com/en-us/download/details.aspx?id=34750
"""
import struct
import sys
if sys.platform == 'win32':
import wmi
from xml.etree import ElementTree
from nova.i18n import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
VHD_HEADER_SIZE_FIX = 512
VHD_BAT_ENTRY_SIZE = 4
VHD_DYNAMIC_DISK_HEADER_SIZE = 1024
VHD_HEADER_SIZE_DYNAMIC = 512
VHD_FOOTER_SIZE_DYNAMIC = 512
VHD_BLK_SIZE_OFFSET = 544
VHD_SIGNATURE = 'conectix'
VHDX_SIGNATURE = 'vhdxfile'
class VHDUtils(object):
def __init__(self):
self._vmutils = vmutils.VMUtils()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization')
def validate_vhd(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.ValidateVirtualHardDisk(
Path=vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
def create_dynamic_vhd(self, path, max_internal_size, format):
if format != constants.DISK_FORMAT_VHD:
raise vmutils.HyperVException(_("Unsupported disk format: %s") %
format)
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateDynamicVirtualHardDisk(
Path=path, MaxInternalSize=max_internal_size)
self._vmutils.check_ret_val(ret_val, job_path)
def create_differencing_vhd(self, path, parent_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateDifferencingVirtualHardDisk(
Path=path, ParentPath=parent_path)
self._vmutils.check_ret_val(ret_val, job_path)
def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.ReconnectParentVirtualHardDisk(
ChildPath=child_vhd_path,
ParentPath=parent_vhd_path,
Force=True)
self._vmutils.check_ret_val(ret_val, job_path)
def merge_vhd(self, src_vhd_path, dest_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.MergeVirtualHardDisk(
SourcePath=src_vhd_path,
DestinationPath=dest_vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_resize_method(self):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
return image_man_svc.ExpandVirtualHardDisk
def resize_vhd(self, vhd_path, new_max_size, is_file_max_size=True):
if is_file_max_size:
new_internal_max_size = self.get_internal_vhd_size_by_file_size(
vhd_path, new_max_size)
else:
new_internal_max_size = new_max_size
resize = self._get_resize_method()
(job_path, ret_val) = resize(
Path=vhd_path, MaxInternalSize=new_internal_max_size)
self._vmutils.check_ret_val(ret_val, job_path)
def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size):
"""Fixed VHD size = Data Block size + 512 bytes
| Dynamic_VHD_size = Dynamic Disk Header
| + Copy of hard disk footer
| + Hard Disk Footer
| + Data Block
| + BAT
| Dynamic Disk header fields
| Copy of hard disk footer (512 bytes)
| Dynamic Disk Header (1024 bytes)
| BAT (Block Allocation table)
| Data Block 1
| Data Block 2
| Data Block n
| Hard Disk Footer (512 bytes)
| Default block size is 2M
| BAT entry size is 4byte
"""
base_vhd_info = self.get_vhd_info(vhd_path)
vhd_type = base_vhd_info['Type']
if vhd_type == constants.VHD_TYPE_FIXED:
vhd_header_size = VHD_HEADER_SIZE_FIX
return new_vhd_file_size - vhd_header_size
elif vhd_type == constants.VHD_TYPE_DYNAMIC:
bs = self._get_vhd_dynamic_blk_size(vhd_path)
bes = VHD_BAT_ENTRY_SIZE
ddhs = VHD_DYNAMIC_DISK_HEADER_SIZE
hs = VHD_HEADER_SIZE_DYNAMIC
fs = VHD_FOOTER_SIZE_DYNAMIC
max_internal_size = (new_vhd_file_size -
(hs + ddhs + fs)) * bs / (bes + bs)
return max_internal_size
else:
vhd_parent = self.get_vhd_parent_path(vhd_path)
return self.get_internal_vhd_size_by_file_size(vhd_parent,
new_vhd_file_size)
def _get_vhd_dynamic_blk_size(self, vhd_path):
blk_size_offset = VHD_BLK_SIZE_OFFSET
try:
with open(vhd_path, "rb") as f:
f.seek(blk_size_offset)
version = f.read(4)
except IOError:
raise vmutils.HyperVException(_("Unable to obtain block size from"
" VHD %(vhd_path)s") %
{"vhd_path": vhd_path})
return struct.unpack('>i', version)[0]
def get_vhd_parent_path(self, vhd_path):
return self.get_vhd_info(vhd_path).get("ParentPath")
def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(vhd_info,
job_path,
ret_val) = image_man_svc.GetVirtualHardDiskInfo(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
vhd_info_dict = {}
et = ElementTree.fromstring(vhd_info)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
value_text = item.find("VALUE").text
if name == "ParentPath":
vhd_info_dict[name] = value_text
elif name in ["FileSize", "MaxInternalSize"]:
vhd_info_dict[name] = long(value_text)
elif name in ["InSavedState", "InUse"]:
vhd_info_dict[name] = bool(value_text)
elif name == "Type":
vhd_info_dict[name] = int(value_text)
return vhd_info_dict
def get_vhd_format(self, path):
with open(path, 'rb') as f:
# Read header
if f.read(8) == VHDX_SIGNATURE:
return constants.DISK_FORMAT_VHDX
# Read footer
f.seek(0, 2)
file_size = f.tell()
if file_size >= 512:
f.seek(-512, 2)
if f.read(8) == VHD_SIGNATURE:
return constants.DISK_FORMAT_VHD
raise vmutils.HyperVException(_('Unsupported virtual disk format'))
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHD

242
hyperv/nova/vhdutilsv2.py Normal file
View File

@@ -0,0 +1,242 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VHD related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import struct
import sys
if sys.platform == 'win32':
import wmi
from xml.etree import ElementTree
from oslo_utils import units
from nova.i18n import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
VHDX_BAT_ENTRY_SIZE = 8
VHDX_HEADER_OFFSETS = [64 * units.Ki, 128 * units.Ki]
VHDX_HEADER_SECTION_SIZE = units.Mi
VHDX_LOG_LENGTH_OFFSET = 68
VHDX_METADATA_SIZE_OFFSET = 64
VHDX_REGION_TABLE_OFFSET = 192 * units.Ki
VHDX_BS_METADATA_ENTRY_OFFSET = 48
class VHDUtilsV2(vhdutils.VHDUtils):
_VHD_TYPE_DYNAMIC = 3
_VHD_TYPE_DIFFERENCING = 4
_vhd_format_map = {
constants.DISK_FORMAT_VHD: 2,
constants.DISK_FORMAT_VHDX: 3,
}
def __init__(self):
self._vmutils = vmutilsv2.VMUtilsV2()
if sys.platform == 'win32':
self._conn = wmi.WMI(moniker='//./root/virtualization/v2')
def create_dynamic_vhd(self, path, max_internal_size, format):
vhd_format = self._vhd_format_map.get(format)
if not vhd_format:
raise vmutils.HyperVException(_("Unsupported disk format: %s") %
format)
self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path,
max_internal_size=max_internal_size)
def create_differencing_vhd(self, path, parent_path):
# Although this method can take a size argument in case of VHDX
# images, avoid it as the underlying Win32 is currently not
# resizing the disk properly. This can be reconsidered once the
# Win32 issue is fixed.
parent_vhd_info = self.get_vhd_info(parent_path)
self._create_vhd(self._VHD_TYPE_DIFFERENCING,
parent_vhd_info["Format"],
path, parent_path=parent_path)
def _create_vhd(self, vhd_type, format, path, max_internal_size=None,
parent_path=None):
vhd_info = self._conn.Msvm_VirtualHardDiskSettingData.new()
vhd_info.Type = vhd_type
vhd_info.Format = format
vhd_info.Path = path
vhd_info.ParentPath = parent_path
if max_internal_size:
vhd_info.MaxInternalSize = max_internal_size
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateVirtualHardDisk(
VirtualDiskSettingData=vhd_info.GetText_(1))
self._vmutils.check_ret_val(ret_val, job_path)
def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, child_vhd_path)
et = ElementTree.fromstring(vhd_info_xml)
item = et.find(".//PROPERTY[@NAME='ParentPath']/VALUE")
if item is not None:
item.text = parent_vhd_path
else:
msg = (_("Failed to reconnect image %(child_vhd_path)s to "
"parent %(parent_vhd_path)s. The child image has no "
"parent path property.") %
{'child_vhd_path': child_vhd_path,
'parent_vhd_path': parent_vhd_path})
raise vmutils.HyperVException(msg)
vhd_info_xml = ElementTree.tostring(et)
(job_path, ret_val) = image_man_svc.SetVirtualHardDiskSettingData(
VirtualDiskSettingData=vhd_info_xml)
self._vmutils.check_ret_val(ret_val, job_path)
def _get_resize_method(self):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
return image_man_svc.ResizeVirtualHardDisk
def get_internal_vhd_size_by_file_size(self, vhd_path,
new_vhd_file_size):
"""VHDX Size = Header (1 MB)
+ Log
+ Metadata Region
+ BAT
+ Payload Blocks
Chunk size = maximum number of bytes described by a SB block
= 2 ** 23 * LogicalSectorSize
"""
vhd_format = self.get_vhd_format(vhd_path)
if vhd_format == constants.DISK_FORMAT_VHD:
return super(VHDUtilsV2,
self).get_internal_vhd_size_by_file_size(
vhd_path, new_vhd_file_size)
else:
vhd_info = self.get_vhd_info(vhd_path)
vhd_type = vhd_info['Type']
if vhd_type == self._VHD_TYPE_DIFFERENCING:
vhd_parent = self.get_vhd_parent_path(vhd_path)
return self.get_internal_vhd_size_by_file_size(vhd_parent,
new_vhd_file_size)
else:
try:
with open(vhd_path, 'rb') as f:
hs = VHDX_HEADER_SECTION_SIZE
bes = VHDX_BAT_ENTRY_SIZE
lss = vhd_info['LogicalSectorSize']
bs = self._get_vhdx_block_size(f)
ls = self._get_vhdx_log_size(f)
ms = self._get_vhdx_metadata_size_and_offset(f)[0]
chunk_ratio = (1 << 23) * lss / bs
size = new_vhd_file_size
max_internal_size = (bs * chunk_ratio * (size - hs -
ls - ms - bes - bes / chunk_ratio) / (bs *
chunk_ratio + bes * chunk_ratio + bes))
return max_internal_size - (max_internal_size % bs)
except IOError as ex:
raise vmutils.HyperVException(_("Unable to obtain "
"internal size from VHDX: "
"%(vhd_path)s. Exception: "
"%(ex)s") %
{"vhd_path": vhd_path,
"ex": ex})
def _get_vhdx_current_header_offset(self, vhdx_file):
sequence_numbers = []
for offset in VHDX_HEADER_OFFSETS:
vhdx_file.seek(offset + 8)
sequence_numbers.append(struct.unpack('<Q',
vhdx_file.read(8))[0])
current_header = sequence_numbers.index(max(sequence_numbers))
return VHDX_HEADER_OFFSETS[current_header]
def _get_vhdx_log_size(self, vhdx_file):
current_header_offset = self._get_vhdx_current_header_offset(vhdx_file)
offset = current_header_offset + VHDX_LOG_LENGTH_OFFSET
vhdx_file.seek(offset)
log_size = struct.unpack('<I', vhdx_file.read(4))[0]
return log_size
def _get_vhdx_metadata_size_and_offset(self, vhdx_file):
offset = VHDX_METADATA_SIZE_OFFSET + VHDX_REGION_TABLE_OFFSET
vhdx_file.seek(offset)
metadata_offset = struct.unpack('<Q', vhdx_file.read(8))[0]
metadata_size = struct.unpack('<I', vhdx_file.read(4))[0]
return metadata_size, metadata_offset
def _get_vhdx_block_size(self, vhdx_file):
metadata_offset = self._get_vhdx_metadata_size_and_offset(vhdx_file)[1]
offset = metadata_offset + VHDX_BS_METADATA_ENTRY_OFFSET
vhdx_file.seek(offset)
file_parameter_offset = struct.unpack('<I', vhdx_file.read(4))[0]
vhdx_file.seek(file_parameter_offset + metadata_offset)
block_size = struct.unpack('<I', vhdx_file.read(4))[0]
return block_size
def _get_vhd_info_xml(self, image_man_svc, vhd_path):
(job_path,
ret_val,
vhd_info_xml) = image_man_svc.GetVirtualHardDiskSettingData(vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
return vhd_info_xml.encode('utf8', 'xmlcharrefreplace')
def get_vhd_info(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
vhd_info_xml = self._get_vhd_info_xml(image_man_svc, vhd_path)
vhd_info_dict = {}
et = ElementTree.fromstring(vhd_info_xml)
for item in et.findall("PROPERTY"):
name = item.attrib["NAME"]
value_item = item.find("VALUE")
if value_item is None:
value_text = None
else:
value_text = value_item.text
if name in ["Path", "ParentPath"]:
vhd_info_dict[name] = value_text
elif name in ["BlockSize", "LogicalSectorSize",
"PhysicalSectorSize", "MaxInternalSize"]:
vhd_info_dict[name] = long(value_text)
elif name in ["Type", "Format"]:
vhd_info_dict[name] = int(value_text)
return vhd_info_dict
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHDX

82
hyperv/nova/vif.py Normal file
View File

@@ -0,0 +1,82 @@
# Copyright 2013 Cloudbase Solutions Srl
# Copyright 2013 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_log import log as logging
from nova.virt.hyperv import utilsfactory
hyperv_opts = [
cfg.StrOpt('vswitch_name',
help='External virtual switch Name, '
'if not provided, the first external virtual '
'switch is used'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
LOG = logging.getLogger(__name__)
class HyperVBaseVIFDriver(object):
@abc.abstractmethod
def plug(self, instance, vif):
pass
@abc.abstractmethod
def unplug(self, instance, vif):
pass
class HyperVNeutronVIFDriver(HyperVBaseVIFDriver):
"""Neutron VIF driver."""
def plug(self, instance, vif):
# Neutron takes care of plugging the port
pass
def unplug(self, instance, vif):
# Neutron takes care of unplugging the port
pass
class HyperVNovaNetworkVIFDriver(HyperVBaseVIFDriver):
"""Nova network VIF driver."""
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._netutils = utilsfactory.get_networkutils()
def plug(self, instance, vif):
vswitch_path = self._netutils.get_external_vswitch(
CONF.hyperv.vswitch_name)
vm_name = instance.name
LOG.debug('Creating vswitch port for instance: %s', vm_name)
if self._netutils.vswitch_port_needed():
vswitch_data = self._netutils.create_vswitch_port(vswitch_path,
vm_name)
else:
vswitch_data = vswitch_path
self._vmutils.set_nic_connection(vm_name, vif['id'], vswitch_data)
def unplug(self, instance, vif):
# TODO(alepilotti) Not implemented
pass

690
hyperv/nova/vmops.py Normal file
View File

@@ -0,0 +1,690 @@
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import functools
import os
import time
from eventlet import timeout as etimeout
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova.openstack.common import fileutils
from nova.openstack.common import loopingcall
from nova import utils
from nova.virt import configdrive
from nova.virt import hardware
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import ioutils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='Path of qemu-img command which is used to convert '
'between different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive'),
cfg.BoolOpt('enable_instance_metrics_collection',
default=False,
help='Enables metrics collections for an instance by using '
'Hyper-V\'s metric APIs. Collected data can by retrieved '
'by other apps and services, e.g.: Ceilometer. '
'Requires Hyper-V / Windows Server 2012 and above'),
cfg.FloatOpt('dynamic_memory_ratio',
default=1.0,
help='Enables dynamic memory allocation (ballooning) when '
'set to a value greater than 1. The value expresses '
'the ratio between the total RAM assigned to an '
'instance and its startup RAM amount. For example a '
'ratio of 2.0 for an instance with 1024MB of RAM '
'implies 512MB of RAM allocated at startup'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=60,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('network_api_class', 'nova.network')
SHUTDOWN_TIME_INCREMENT = 5
REBOOT_TYPE_SOFT = 'SOFT'
REBOOT_TYPE_HARD = 'HARD'
VM_GENERATIONS = {
constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1,
constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2
}
VM_GENERATIONS_CONTROLLER_TYPES = {
constants.VM_GEN_1: constants.CTRL_TYPE_IDE,
constants.VM_GEN_2: constants.CTRL_TYPE_SCSI
}
def check_admin_permissions(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
# Make sure the windows account has the required admin permissions.
self._vmutils.check_admin_permissions()
return function(self, *args, **kwds)
return wrapper
class VMOps(object):
_vif_driver_class_map = {
'nova.network.neutronv2.api.API':
'nova.virt.hyperv.vif.HyperVNeutronVIFDriver',
'nova.network.api.API':
'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
}
# The console log is stored in two files, each should have at most half of
# the maximum console log size.
_MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._hostutils = utilsfactory.get_hostutils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._vif_driver = None
self._load_vif_driver_class()
self._vm_log_writers = {}
def _load_vif_driver_class(self):
try:
class_name = self._vif_driver_class_map[CONF.network_api_class]
self._vif_driver = importutils.import_object(class_name)
except KeyError:
raise TypeError(_("VIF driver not found for "
"network_api_class: %s") %
CONF.network_api_class)
def list_instance_uuids(self):
instance_uuids = []
for (instance_name, notes) in self._vmutils.list_instance_notes():
if notes and uuidutils.is_uuid_like(notes[0]):
instance_uuids.append(str(notes[0]))
else:
LOG.debug("Notes not found or not resembling a GUID for "
"instance: %s" % instance_name)
return instance_uuids
def list_instances(self):
return self._vmutils.list_instances()
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug("get_info called for instance", instance=instance)
instance_name = instance.name
if not self._vmutils.vm_exists(instance_name):
raise exception.InstanceNotFound(instance_id=instance.uuid)
info = self._vmutils.get_vm_summary_info(instance_name)
state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return hardware.InstanceInfo(state=state,
max_mem_kb=info['MemoryUsage'],
mem_kb=info['MemoryUsage'],
num_cpu=info['NumberOfProcessors'],
cpu_time_ns=info['UpTime'])
def _create_root_vhd(self, context, instance):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['MaxInternalSize']
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance.name,
format_ext)
root_vhd_size = instance.root_gb * units.Gi
try:
if CONF.use_cow_images:
LOG.debug("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
vhd_type = self._vhdutils.get_vhd_format(base_vhd_path)
if vhd_type == constants.DISK_FORMAT_VHD:
# The base image has already been resized. As differencing
# vhdx images support it, the root image will be resized
# instead if needed.
return root_vhd_path
else:
LOG.debug("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s",
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
base_vhd_path, root_vhd_size))
if self._is_resize_needed(root_vhd_path, base_vhd_size,
root_vhd_internal_size,
instance):
self._vhdutils.resize_vhd(root_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
self._pathutils.remove(root_vhd_path)
return root_vhd_path
def _is_resize_needed(self, vhd_path, old_size, new_size, instance):
if new_size < old_size:
error_msg = _("Cannot resize a VHD to a smaller size, the"
" original size is %(old_size)s, the"
" newer size is %(new_size)s"
) % {'old_size': old_size,
'new_size': new_size}
raise vmutils.VHDResizeException(error_msg)
elif new_size > old_size:
LOG.debug("Resizing VHD %(vhd_path)s to new "
"size %(new_size)s" %
{'new_size': new_size,
'vhd_path': vhd_path},
instance=instance)
return True
return False
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi
if eph_vhd_size:
vhd_format = self._vhdutils.get_best_supported_vhd_format()
eph_vhd_path = self._pathutils.get_ephemeral_vhd_path(
instance.name, vhd_format)
self._vhdutils.create_dynamic_vhd(eph_vhd_path, eph_vhd_size,
vhd_format)
return eph_vhd_path
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_LI("Spawning new instance"), instance=instance)
instance_name = instance.name
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._create_root_vhd(context, instance)
eph_vhd_path = self.create_ephemeral_vhd(instance)
vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta)
try:
self.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen)
if configdrive.required_by(instance):
configdrive_path = self._create_config_drive(instance,
injected_files,
admin_password,
network_info)
self.attach_config_drive(instance, configdrive_path, vm_gen)
self.power_on(instance)
except Exception:
with excutils.save_and_reraise_exception():
self.destroy(instance)
def create_instance(self, instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path, vm_gen):
instance_name = instance.name
self._vmutils.create_vm(instance_name,
instance.memory_mb,
instance.vcpus,
CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio,
vm_gen,
[instance.uuid])
self._vmutils.create_scsi_controller(instance_name)
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
ctrl_disk_addr = 0
if root_vhd_path:
self._attach_drive(instance_name, root_vhd_path, 0, ctrl_disk_addr,
controller_type)
ctrl_disk_addr += 1
if eph_vhd_path:
self._attach_drive(instance_name, eph_vhd_path, 0, ctrl_disk_addr,
controller_type)
# If ebs_root is False, the first volume will be attached to SCSI
# controller. Generation 2 VMs only has a SCSI controller.
ebs_root = vm_gen is not constants.VM_GEN_2 and root_vhd_path is None
self._volumeops.attach_volumes(block_device_info,
instance_name,
ebs_root)
for vif in network_info:
LOG.debug('Creating nic for instance', instance=instance)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
self._vif_driver.plug(instance, vif)
if CONF.hyperv.enable_instance_metrics_collection:
self._vmutils.enable_vm_metrics_collection(instance_name)
self._create_vm_com_port_pipe(instance)
def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr,
controller_type, drive_type=constants.DISK):
if controller_type == constants.CTRL_TYPE_SCSI:
self._vmutils.attach_scsi_drive(instance_name, path, drive_type)
else:
self._vmutils.attach_ide_drive(instance_name, path, drive_addr,
ctrl_disk_addr, drive_type)
def get_image_vm_generation(self, root_vhd_path, image_meta):
image_props = image_meta['properties']
default_vm_gen = self._hostutils.get_default_vm_generation()
image_prop_vm = image_props.get(constants.IMAGE_PROP_VM_GEN,
default_vm_gen)
if image_prop_vm not in self._hostutils.get_supported_vm_types():
LOG.error(_LE('Requested VM Generation %s is not supported on '
' this OS.'), image_prop_vm)
raise vmutils.HyperVException(
_('Requested VM Generation %s is not supported on this '
'OS.') % image_prop_vm)
vm_gen = VM_GENERATIONS[image_prop_vm]
if (vm_gen != constants.VM_GEN_1 and root_vhd_path and
self._vhdutils.get_vhd_format(
root_vhd_path) == constants.DISK_FORMAT_VHD):
LOG.error(_LE('Requested VM Generation %s, but provided VHD '
'instead of VHDX.'), vm_gen)
raise vmutils.HyperVException(
_('Requested VM Generation %s, but provided VHD instead of '
'VHDX.') % vm_gen)
return vm_gen
def _create_config_drive(self, instance, injected_files, admin_password,
network_info):
if CONF.config_drive_format != 'iso9660':
raise vmutils.UnsupportedConfigDriveFormatException(
_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
LOG.info(_LI('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md,
network_info=network_info)
instance_path = self._pathutils.get_instance_dir(
instance.name)
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed with '
'error: %s'),
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
configdrive_path = configdrive_path_iso
return configdrive_path
def attach_config_drive(self, instance, configdrive_path, vm_gen):
configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):]
# Do the attach here and if there is a certain file format that isn't
# supported in constants.DISK_FORMAT_MAP then bomb out.
try:
drive_type = constants.DISK_FORMAT_MAP[configdrive_ext]
controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
self._attach_drive(instance.name, configdrive_path, 1, 0,
controller_type, drive_type)
except KeyError:
raise exception.InvalidDiskFormat(disk_format=configdrive_ext)
def _delete_disk_files(self, instance_name):
self._pathutils.get_instance_dir(instance_name,
create_dir=False,
remove_dir=True)
def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True):
instance_name = instance.name
LOG.info(_LI("Got request to destroy instance"), instance=instance)
try:
if self._vmutils.vm_exists(instance_name):
# Stop the VM first.
self.power_off(instance)
self._vmutils.destroy_vm(instance_name)
self._volumeops.disconnect_volumes(block_device_info)
else:
LOG.debug("Instance not found", instance=instance)
if destroy_disks:
self._delete_disk_files(instance_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to destroy instance: %s'),
instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug("Rebooting instance", instance=instance)
if reboot_type == REBOOT_TYPE_SOFT:
if self._soft_shutdown(instance):
self.power_on(instance)
return
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_REBOOT)
def _soft_shutdown(self, instance,
timeout=CONF.hyperv.wait_soft_reboot_seconds,
retry_interval=SHUTDOWN_TIME_INCREMENT):
"""Perform a soft shutdown on the VM.
:return: True if the instance was shutdown within time limit,
False otherwise.
"""
LOG.debug("Performing Soft shutdown on instance", instance=instance)
while timeout > 0:
# Perform a soft shutdown on the instance.
# Wait maximum timeout for the instance to be shutdown.
# If it was not shutdown, retry until it succeeds or a maximum of
# time waited is equal to timeout.
wait_time = min(retry_interval, timeout)
try:
LOG.debug("Soft shutdown instance, timeout remaining: %d",
timeout, instance=instance)
self._vmutils.soft_shutdown_vm(instance.name)
if self._wait_for_power_off(instance.name, wait_time):
LOG.info(_LI("Soft shutdown succeeded."),
instance=instance)
return True
except vmutils.HyperVException as e:
# Exception is raised when trying to shutdown the instance
# while it is still booting.
LOG.debug("Soft shutdown failed: %s", e, instance=instance)
time.sleep(wait_time)
timeout -= retry_interval
LOG.warning(_LW("Timed out while waiting for soft shutdown."),
instance=instance)
return False
def pause(self, instance):
"""Pause VM instance."""
LOG.debug("Pause instance", instance=instance)
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug("Unpause instance", instance=instance)
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug("Suspend instance", instance=instance)
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug("Resume instance", instance=instance)
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
LOG.debug("Power off instance", instance=instance)
if retry_interval <= 0:
retry_interval = SHUTDOWN_TIME_INCREMENT
if timeout and self._soft_shutdown(instance, timeout, retry_interval):
return
self._set_vm_state(instance,
constants.HYPERV_VM_STATE_DISABLED)
def power_on(self, instance, block_device_info=None):
"""Power on the specified instance."""
LOG.debug("Power on instance", instance=instance)
if block_device_info:
self._volumeops.fix_instance_volume_disk_paths(instance.name,
block_device_info)
self._set_vm_state(instance, constants.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, instance, req_state):
instance_name = instance.name
instance_uuid = instance.uuid
try:
self._vmutils.set_vm_state(instance_name, req_state)
if req_state in (constants.HYPERV_VM_STATE_DISABLED,
constants.HYPERV_VM_STATE_REBOOT):
self._delete_vm_console_log(instance)
if req_state in (constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_REBOOT):
self.log_vm_serial_output(instance_name,
instance_uuid)
LOG.debug("Successfully changed state of VM %(instance_name)s"
" to: %(req_state)s", {'instance_name': instance_name,
'req_state': req_state})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to change vm state of %(instance_name)s"
" to %(req_state)s"),
{'instance_name': instance_name,
'req_state': req_state})
def _get_vm_state(self, instance_name):
summary_info = self._vmutils.get_vm_summary_info(instance_name)
return summary_info['EnabledState']
def _wait_for_power_off(self, instance_name, time_limit):
"""Waiting for a VM to be in a disabled state.
:return: True if the instance is shutdown within time_limit,
False otherwise.
"""
desired_vm_states = [constants.HYPERV_VM_STATE_DISABLED]
def _check_vm_status(instance_name):
if self._get_vm_state(instance_name) in desired_vm_states:
raise loopingcall.LoopingCallDone()
periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status,
instance_name)
try:
# add a timeout to the periodic call.
periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT)
etimeout.with_timeout(time_limit, periodic_call.wait)
except etimeout.Timeout:
# VM did not shutdown in the expected time_limit.
return False
finally:
# stop the periodic call, in case of exceptions or Timeout.
periodic_call.stop()
return True
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self.power_on(instance, block_device_info)
def log_vm_serial_output(self, instance_name, instance_uuid):
# Uses a 'thread' that will run in background, reading
# the console output from the according named pipe and
# write it to a file.
console_log_path = self._pathutils.get_vm_console_log_paths(
instance_name)[0]
pipe_path = r'\\.\pipe\%s' % instance_uuid
vm_log_writer = ioutils.IOThread(pipe_path, console_log_path,
self._MAX_CONSOLE_LOG_FILE_SIZE)
self._vm_log_writers[instance_uuid] = vm_log_writer
vm_log_writer.start()
def get_console_output(self, instance):
console_log_paths = (
self._pathutils.get_vm_console_log_paths(instance.name))
try:
instance_log = ''
# Start with the oldest console log file.
for console_log_path in console_log_paths[::-1]:
if os.path.exists(console_log_path):
with open(console_log_path, 'rb') as fp:
instance_log += fp.read()
return instance_log
except IOError as err:
msg = _("Could not get instance console log. Error: %s") % err
raise vmutils.HyperVException(msg, instance=instance)
def _delete_vm_console_log(self, instance):
console_log_files = self._pathutils.get_vm_console_log_paths(
instance.name)
vm_log_writer = self._vm_log_writers.get(instance.uuid)
if vm_log_writer:
vm_log_writer.join()
for log_file in console_log_files:
fileutils.delete_if_exists(log_file)
def copy_vm_console_logs(self, vm_name, dest_host):
local_log_paths = self._pathutils.get_vm_console_log_paths(
vm_name)
remote_log_paths = self._pathutils.get_vm_console_log_paths(
vm_name, remote_server=dest_host)
for local_log_path, remote_log_path in zip(local_log_paths,
remote_log_paths):
if self._pathutils.exists(local_log_path):
self._pathutils.copy(local_log_path,
remote_log_path)
def _create_vm_com_port_pipe(self, instance):
# Creates a pipe to the COM 0 serial port of the specified vm.
pipe_path = r'\\.\pipe\%s' % instance.uuid
self._vmutils.get_vm_serial_port_connection(
instance.name, update_connection=pipe_path)
def restart_vm_log_writers(self):
# Restart the VM console log writers after nova compute restarts.
active_instances = self._vmutils.get_active_instances()
for instance_name in active_instances:
instance_path = self._pathutils.get_instance_dir(instance_name)
# Skip instances that are not created by Nova
if not os.path.exists(instance_path):
continue
vm_serial_conn = self._vmutils.get_vm_serial_port_connection(
instance_name)
if vm_serial_conn:
instance_uuid = os.path.basename(vm_serial_conn)
self.log_vm_serial_output(instance_name, instance_uuid)

763
hyperv/nova/vmutils.py Normal file
View File

@@ -0,0 +1,763 @@
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V.
"""
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from nova import exception
from nova.i18n import _, _LW
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(alexpilotti): Move the exceptions to a separate module
# TODO(alexpilotti): Add more domain exceptions
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
# TODO(alexpilotti): Add a storage exception base class
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class UnsupportedConfigDriveFormatException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
# These constants can be overridden by inherited classes
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_DRIVE_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_DRIVE_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_HARD_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_DVD_DISK_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SERIAL_PORT_RES_SUB_TYPE = 'Microsoft Serial Port'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_SHUTDOWN_COMPONENT = "Msvm_ShutdownComponent"
_VIRTUAL_SYSTEM_CURRENT_SETTINGS = 3
_AUTOMATIC_STARTUP_ACTION_NONE = 0
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = {v: k for k, v in
self._vm_power_states_map.iteritems()}
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
# On version of Hyper-V prior to 2012 trying to directly set properties
# in default setting data WMI objects results in an exception
self._clone_wmi_objs = False
if sys.platform == 'win32':
hostutls = hostutils.HostUtils()
self._clone_wmi_objs = not hostutls.check_min_windows_version(6, 2)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instance_notes(self):
instance_notes = []
for vs in self._conn.Msvm_VirtualSystemSettingData(
['ElementName', 'Notes'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS):
instance_notes.append((vs.ElementName,
[v for v in vs.Notes.split('\n') if v]))
return instance_notes
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
return [v.ElementName for v in
self._conn.Msvm_VirtualSystemSettingData(
['ElementName'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS)]
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
# See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
# Nova requires a valid state to be returned. Hyper-V has more
# states than Nova, typically intermediate ones and since there is
# no direct mapping for those, ENABLED is the only reasonable option
# considering that in all the non mappable states the instance
# is running.
enabled_state = self._enabled_states_map.get(si.EnabledState,
constants.HYPERV_VM_STATE_ENABLED)
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def get_vm_id(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return vm.Name
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
# Must be a multiple of 2
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
# Start with the minimum memory
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio, vm_gen, notes=None):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug('Creating VM %s', vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug('Setting memory for vm %s', vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug('Set vCPUs for vm %s', vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
# Don't start automatically on host boot
vs_gs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE
(vm_path,
job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))
self.check_ret_val(ret_val, job_path)
vm = self._get_wmi_obj(vm_path)
if notes:
vmsetting = self._get_vm_setting_data(vm)
vmsetting.Notes = '\n'.join(notes)
self._modify_virtual_system(vs_man_svc, vm_path, vmsetting)
return self._get_wmi_obj(vm_path)
def _modify_virtual_system(self, vs_man_svc, vm_path, vmsetting):
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystem(
ComputerSystem=vm_path,
SystemSettingData=vmsetting.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_scsi_controller(vm)
def _get_vm_scsi_controller(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
ide_ctrls = [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)]
return ide_ctrls[0].path_() if ide_ctrls else None
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks(self, scsi_controller_path):
volumes = self._conn.query(
self._get_attached_disks_query_string(scsi_controller_path))
return volumes
def _get_attached_disks_query_string(self, scsi_controller_path):
return ("SELECT * FROM %(class_name)s WHERE ("
"ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s') AND "
"Parent='%(parent)s'" % {
'class_name': self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE,
'res_sub_type_virt': self._DISK_DRIVE_RES_SUB_TYPE,
'parent': scsi_controller_path.replace("'", "''")})
def _get_new_setting_data(self, class_name):
obj = self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
return self._check_clone_wmi_obj(class_name, obj)
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
obj = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
return self._check_clone_wmi_obj(class_name, obj)
def _check_clone_wmi_obj(self, class_name, obj):
if self._clone_wmi_objs:
return self._clone_wmi_obj(class_name, obj)
else:
return obj
def _clone_wmi_obj(self, class_name, obj):
wmi_class = getattr(self._conn, class_name)
new_obj = wmi_class.new()
# Copy the properties from the original.
for prop in obj._properties:
value = obj.Properties_.Item(prop).Value
new_obj.Properties_.Item(prop).Value = value
return new_obj
def attach_scsi_drive(self, vm_name, path, drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_scsi_controller(vm)
drive_addr = self.get_free_controller_slot(ctrller_path)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def attach_drive(self, vm_name, path, ctrller_path, drive_addr,
drive_type=constants.DISK):
"""Create a drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if drive_type == constants.DISK:
res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DRIVE_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
# Set the ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
# Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.DISK:
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DISK_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
# Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
# Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def _get_disk_resource_address(self, disk_resource):
return disk_resource.Address
def set_disk_host_resource(self, vm_name, controller_path, address,
mounted_disk_path):
disk_found = False
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
for disk_resource in disk_resources + volume_resources:
if (disk_resource.Parent == controller_path and
self._get_disk_resource_address(disk_resource) ==
str(address)):
if (disk_resource.HostResource and
disk_resource.HostResource[0] != mounted_disk_path):
LOG.debug('Updating disk host resource "%(old)s" to '
'"%(new)s"' %
{'old': disk_resource.HostResource[0],
'new': mounted_disk_path})
disk_resource.HostResource = [mounted_disk_path]
self._modify_virt_resource(disk_resource, vm.path_())
disk_found = True
break
if not disk_found:
LOG.warning(_LW('Disk not found on controller '
'"%(controller_path)s" with '
'address "%(address)s"'),
{'controller_path': controller_path,
'address': address})
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
# Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
# Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
# Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def soft_shutdown_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
shutdown_component = vm.associators(
wmi_result_class=self._SHUTDOWN_COMPONENT)
if not shutdown_component:
# If no shutdown_component is found, it means the VM is already
# in a shutdown state.
return
reason = 'Soft shutdown requested by OpenStack Nova.'
(ret_val, ) = shutdown_component[0].InitiateShutdown(Force=False,
Reason=reason)
self.check_ret_val(ret_val, None)
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
# Invalid state for current operation (32775) typically means that
# the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s",
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def _get_vm_disks(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds if
r.ResourceSubType in
[self._HARD_DISK_RES_SUB_TYPE,
self._DVD_DISK_RES_SUB_TYPE]]
if (self._RESOURCE_ALLOC_SETTING_DATA_CLASS !=
self._STORAGE_ALLOC_SETTING_DATA_CLASS):
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
volume_resources = [r for r in rasds if
r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
return (disk_resources, volume_resources)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
# Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
"""Poll WMI job state and wait for completion."""
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s",
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path, is_physical=True):
vm = self._lookup_vm_check(vm_name)
disk_resource = self._get_mounted_disk_resource_from_path(disk_path,
is_physical)
if disk_resource:
parent = self._conn.query("SELECT * FROM "
"Msvm_ResourceAllocationSettingData "
"WHERE __PATH = '%s'" %
disk_resource.Parent)[0]
self._remove_virt_resource(disk_resource, vm.path_())
if not is_physical:
self._remove_virt_resource(parent, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path, is_physical):
if is_physical:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._PHYS_DISK_RES_SUB_TYPE
else:
class_name = self._STORAGE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
disk_resources = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s'" %
{"class_name": class_name,
"res_sub_type": res_sub_type})
for disk_resource in disk_resources:
if disk_resource.HostResource:
if disk_resource.HostResource[0].lower() == disk_path.lower():
return disk_resource
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def get_free_controller_slot(self, scsi_controller_path):
attached_disks = self.get_attached_disks(scsi_controller_path)
used_slots = [int(disk.AddressOnParent) for disk in attached_disks]
for slot in xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
if slot not in used_slots:
return slot
raise HyperVException(_("Exceeded the maximum number of slots"))
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
def get_vm_serial_port_connection(self, vm_name, update_connection=None):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
serial_port = (
[r for r in rasds if
r.ResourceSubType == self._SERIAL_PORT_RES_SUB_TYPE][0])
if update_connection:
serial_port.Connection = [update_connection]
self._modify_virt_resource(serial_port, vm.path_())
if len(serial_port.Connection) > 0:
return serial_port.Connection[0]
def get_active_instances(self):
"""Return the names of all the active instances known to Hyper-V."""
vm_names = self.list_instances()
vms = [self._lookup_vm(vm_name) for vm_name in vm_names]
active_vm_names = [v.ElementName for v in vms
if v.EnabledState == constants.HYPERV_VM_STATE_ENABLED]
return active_vm_names

300
hyperv/nova/vmutilsv2.py Normal file
View File

@@ -0,0 +1,300 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
import uuid
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VMUtilsV2(vmutils.VMUtils):
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Physical Disk Drive'
_DISK_DRIVE_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic Disk Drive'
_DVD_DRIVE_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic DVD Drive'
_SCSI_RES_SUBTYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller'
_HARD_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual Hard Disk'
_DVD_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller'
_SERIAL_PORT_RES_SUB_TYPE = 'Microsoft:Hyper-V:Serial Port'
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_VIRTUAL_SYSTEM_SUBTYPE_GEN2 = 'Microsoft:Hyper-V:SubType:2'
_SNAPSHOT_FULL = 2
_METRIC_AGGR_CPU_AVG = 'Aggregated Average CPU Utilization'
_METRIC_AGGR_MEMORY_AVG = 'Aggregated Average Memory Utilization'
_METRIC_ENABLED = 2
_STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData'
_ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS = \
'Msvm_EthernetPortAllocationSettingData'
_AUTOMATIC_STARTUP_ACTION_NONE = 2
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 11,
constants.HYPERV_VM_STATE_PAUSED: 9,
constants.HYPERV_VM_STATE_SUSPENDED: 6}
def __init__(self, host='.'):
super(VMUtilsV2, self).__init__(host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def list_instance_notes(self):
instance_notes = []
for vs in self._conn.Msvm_VirtualSystemSettingData(
['ElementName', 'Notes'],
VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED):
instance_notes.append((vs.ElementName, [v for v in vs.Notes if v]))
return instance_notes
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
return [v.ElementName for v in
self._conn.Msvm_VirtualSystemSettingData(
['ElementName'],
VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED)]
def _create_vm_obj(self, vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio):
vs_data = self._conn.Msvm_VirtualSystemSettingData.new()
vs_data.ElementName = vm_name
vs_data.Notes = notes
# Don't start automatically on host boot
vs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE
# vNUMA and dynamic memory are mutually exclusive
if dynamic_memory_ratio > 1:
vs_data.VirtualNumaEnabled = False
if vm_gen == constants.VM_GEN_2:
vs_data.VirtualSystemSubType = self._VIRTUAL_SYSTEM_SUBTYPE_GEN2
vs_data.SecureBootEnabled = False
(job_path,
vm_path,
ret_val) = vs_man_svc.DefineSystem(ResourceSettings=[],
ReferenceConfiguration=None,
SystemSettings=vs_data.GetText_(1))
job = self.check_ret_val(ret_val, job_path)
if not vm_path and job:
vm_path = job.associators(self._AFFECTED_JOB_ELEMENT_CLASS)[0]
return self._get_wmi_obj(vm_path)
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _get_attached_disks_query_string(self, scsi_controller_path):
# DVD Drives can be attached to SCSI as well, if the VM Generation is 2
return ("SELECT * FROM Msvm_ResourceAllocationSettingData WHERE ("
"ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s' OR "
"ResourceSubType='%(res_sub_type_dvd)s') AND "
"Parent = '%(parent)s'" % {
'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE,
'res_sub_type_virt': self._DISK_DRIVE_RES_SUB_TYPE,
'res_sub_type_dvd': self._DVD_DRIVE_RES_SUB_TYPE,
'parent': scsi_controller_path.replace("'", "''")})
def attach_drive(self, vm_name, path, ctrller_path, drive_addr,
drive_type=constants.DISK):
"""Create a drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if drive_type == constants.DISK:
res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DRIVE_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
# Set the ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
drive.AddressOnParent = drive_addr
# Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.DISK:
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DISK_RES_SUB_TYPE
res = self._get_new_resource_setting_data(
res_sub_type, self._STORAGE_ALLOC_SETTING_DATA_CLASS)
res.Parent = drive_path
res.HostResource = [path]
self._add_virt_resource(res, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.AddressOnParent = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def _get_disk_resource_address(self, disk_resource):
return disk_resource.AddressOnParent
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_RES_SUBTYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(scsicontrl, vm.path_())
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.HostResource
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
# Remove the VM. It does not destroy any associated virtual disk.
(job_path, ret_val) = vs_man_svc.DestroySystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddResourceSettings(vm_path, res_xml)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path,
out_res_setting_data,
ret_val) = vs_man_svc.ModifyResourceSettings(
ResourceSettings=[res_setting_data.GetText_(1)])
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveResourceSettings(res_path)
self.check_ret_val(ret_val, job_path)
def get_vm_state(self, vm_name):
settings = self.get_vm_summary_info(vm_name)
return settings['EnabledState']
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_snap_svc = self._conn.Msvm_VirtualSystemSnapshotService()[0]
(job_path, snp_setting_data, ret_val) = vs_snap_svc.CreateSnapshot(
AffectedSystem=vm.path_(),
SnapshotType=self._SNAPSHOT_FULL)
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_snap_svc = self._conn.Msvm_VirtualSystemSnapshotService()[0]
(job_path, ret_val) = vs_snap_svc.DestroySnapshot(snapshot_path)
self.check_ret_val(ret_val, job_path)
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
eth_port_data = self._get_new_setting_data(
self._ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS)
eth_port_data.HostResource = [vswitch_conn_data]
eth_port_data.Parent = nic_data.path_()
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(eth_port_data, vm.path_())
def enable_vm_metrics_collection(self, vm_name):
metric_names = [self._METRIC_AGGR_CPU_AVG,
self._METRIC_AGGR_MEMORY_AVG]
vm = self._lookup_vm_check(vm_name)
metric_svc = self._conn.Msvm_MetricService()[0]
(disks, volumes) = self._get_vm_disks(vm)
filtered_disks = [d for d in disks if
d.ResourceSubType is not self._DVD_DISK_RES_SUB_TYPE]
# enable metrics for disk.
for disk in filtered_disks:
self._enable_metrics(metric_svc, disk)
for metric_name in metric_names:
metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name)
if not metric_def:
LOG.debug("Metric not found: %s", metric_name)
else:
self._enable_metrics(metric_svc, vm, metric_def[0].path_())
def _enable_metrics(self, metric_svc, element, definition_path=None):
metric_svc.ControlMetrics(
Subject=element.path_(),
Definition=definition_path,
MetricCollectionEnabled=self._METRIC_ENABLED)

431
hyperv/nova/volumeops.py Normal file
View File

@@ -0,0 +1,431 @@
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import collections
import os
import re
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from nova import exception
from nova.i18n import _, _LE, _LW
from nova.virt import driver
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
hyper_volumeops_opts = [
cfg.IntOpt('volume_attach_retry_count',
default=10,
help='The number of times to retry to attach a volume'),
cfg.IntOpt('volume_attach_retry_interval',
default=5,
help='Interval between volume attachment attempts, in seconds'),
cfg.IntOpt('mounted_disk_query_retry_count',
default=10,
help='The number of times to retry checking for a disk mounted '
'via iSCSI.'),
cfg.IntOpt('mounted_disk_query_retry_interval',
default=5,
help='Interval between checks for a mounted iSCSI '
'disk, in seconds.'),
]
CONF = cfg.CONF
CONF.register_opts(hyper_volumeops_opts, 'hyperv')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
class VolumeOps(object):
"""Management class for Volume-related tasks
"""
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
self._initiator = None
self._default_root_device = 'vda'
self.volume_drivers = {'smbfs': SMBFSVolumeDriver(),
'iscsi': ISCSIVolumeDriver()}
def _get_volume_driver(self, driver_type=None, connection_info=None):
if connection_info:
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def attach_volumes(self, block_device_info, instance_name, ebs_root):
mapping = driver.block_device_info_get_mapping(block_device_info)
if ebs_root:
self.attach_volume(mapping[0]['connection_info'],
instance_name, True)
mapping = mapping[1:]
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
def disconnect_volumes(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
block_devices = self._group_block_devices_by_type(
mapping)
for driver_type, block_device_mapping in block_devices.items():
volume_driver = self._get_volume_driver(driver_type)
volume_driver.disconnect_volumes(block_device_mapping)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.attach_volume(connection_info, instance_name, ebs_root)
def detach_volume(self, connection_info, instance_name):
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.detach_volume(connection_info, instance_name)
def ebs_root_in_block_devices(self, block_device_info):
if block_device_info:
root_device = block_device_info.get('root_device_name')
if not root_device:
root_device = self._default_root_device
return self._volutils.volume_in_mapping(root_device,
block_device_info)
def fix_instance_volume_disk_paths(self, instance_name, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
if self.ebs_root_in_block_devices(block_device_info):
mapping = mapping[1:]
disk_address = 0
for vol in mapping:
connection_info = vol['connection_info']
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.fix_instance_volume_disk_path(
instance_name, connection_info, disk_address)
disk_address += 1
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warning(_LW('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_block_storage_ip,
'host': CONF.host,
'initiator': self._initiator,
}
def initialize_volumes_connection(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
connection_info = vol['connection_info']
volume_driver = self._get_volume_driver(
connection_info=connection_info)
volume_driver.initialize_volume_connection(connection_info)
def _group_block_devices_by_type(self, block_device_mapping):
block_devices = collections.defaultdict(list)
for volume in block_device_mapping:
connection_info = volume['connection_info']
volume_type = connection_info.get('driver_volume_type')
block_devices[volume_type].append(volume)
return block_devices
class ISCSIVolumeDriver(object):
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
def login_storage_target(self, connection_info):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
auth_method = data.get('auth_method')
auth_username = data.get('auth_username')
auth_password = data.get('auth_password')
if auth_method and auth_method.upper() != 'CHAP':
raise vmutils.HyperVException(
_("Cannot log in target %(target_iqn)s. Unsupported iSCSI "
"authentication method: %(auth_method)s.") %
{'target_iqn': target_iqn,
'auth_method': auth_method})
# Check if we already logged in
if self._volutils.get_device_number_for_target(target_iqn, target_lun):
LOG.debug("Already logged in on storage target. No need to "
"login. Portal: %(target_portal)s, "
"IQN: %(target_iqn)s, LUN: %(target_lun)s",
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
else:
LOG.debug("Logging in on storage target. Portal: "
"%(target_portal)s, IQN: %(target_iqn)s, "
"LUN: %(target_lun)s",
{'target_portal': target_portal,
'target_iqn': target_iqn, 'target_lun': target_lun})
self._volutils.login_storage_target(target_lun, target_iqn,
target_portal, auth_username,
auth_password)
# Wait for the target to be mounted
self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def disconnect_volumes(self, block_device_mapping):
iscsi_targets = collections.defaultdict(int)
for vol in block_device_mapping:
target_iqn = vol['connection_info']['data']['target_iqn']
iscsi_targets[target_iqn] += 1
for target_iqn, disconnected_luns in iscsi_targets.items():
self.logout_storage_target(target_iqn, disconnected_luns)
def logout_storage_target(self, target_iqn, disconnected_luns_count=1):
total_available_luns = self._volutils.get_target_lun_count(
target_iqn)
if total_available_luns == disconnected_luns_count:
LOG.debug("Logging off storage target %s", target_iqn)
self._volutils.logout_storage_target(target_iqn)
else:
LOG.debug("Skipping disconnecting target %s as there "
"are LUNs still being used.", target_iqn)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""Attach a volume to the SCSI controller or to the IDE controller if
ebs_root is True
"""
target_iqn = None
LOG.debug("Attach_volume: %(connection_info)s to %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
try:
self.login_storage_target(connection_info)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
# Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
if ebs_root:
# Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
# Attaching to the first slot
slot = 0
else:
# Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._vmutils.get_free_controller_slot(ctrller_path)
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
mounted_disk_path)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to attach volume to instance %s'),
instance_name)
if target_iqn:
self.logout_storage_target(target_iqn)
def detach_volume(self, connection_info, instance_name):
"""Detach a volume to the SCSI controller."""
LOG.debug("Detach_volume: %(connection_info)s "
"from %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
# Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
LOG.debug("Detaching physical disk from instance: %s",
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
self.logout_storage_target(target_iqn)
def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
wait_for_device=False):
# The WMI query in get_device_number_for_target can incorrectly
# return no data when the system is under load. This issue can
# be avoided by adding a retry.
for i in xrange(CONF.hyperv.mounted_disk_query_retry_count):
device_number = self._volutils.get_device_number_for_target(
target_iqn, target_lun)
if device_number in (None, -1):
attempt = i + 1
LOG.debug('Attempt %d to get device_number '
'from get_device_number_for_target failed. '
'Retrying...', attempt)
time.sleep(CONF.hyperv.mounted_disk_query_retry_interval)
else:
break
if device_number in (None, -1):
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s') % target_iqn)
LOG.debug('Device number: %(device_number)s, '
'target lun: %(target_lun)s',
{'device_number': device_number, 'target_lun': target_lun})
# Finding Mounted disk drive
for i in range(0, CONF.hyperv.volume_attach_retry_count):
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
device_number)
if mounted_disk_path or not wait_for_device:
break
time.sleep(CONF.hyperv.volume_attach_retry_interval)
if not mounted_disk_path:
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s. Please ensure that '
'the host\'s SAN policy is set to '
'"OfflineAll" or "OfflineShared"') %
target_iqn)
return mounted_disk_path
def get_target_from_disk_path(self, physical_drive_path):
return self._volutils.get_target_from_disk_path(physical_drive_path)
def fix_instance_volume_disk_path(self, instance_name, connection_info,
disk_address):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
mounted_disk_path = self._get_mounted_disk_from_lun(
target_iqn, target_lun, True)
ctrller_path = self._vmutils.get_vm_scsi_controller(instance_name)
self._vmutils.set_disk_host_resource(
instance_name, ctrller_path, disk_address, mounted_disk_path)
def get_target_lun_count(self, target_iqn):
return self._volutils.get_target_lun_count(target_iqn)
def initialize_volume_connection(self, connection_info):
self.login_storage_target(connection_info)
class SMBFSVolumeDriver(object):
def __init__(self):
self._pathutils = utilsfactory.get_pathutils()
self._vmutils = utilsfactory.get_vmutils()
self._volutils = utilsfactory.get_volumeutils()
self._username_regex = re.compile(r'user(?:name)?=([^, ]+)')
self._password_regex = re.compile(r'pass(?:word)?=([^, ]+)')
def attach_volume(self, connection_info, instance_name, ebs_root=False):
self.ensure_share_mounted(connection_info)
disk_path = self._get_disk_path(connection_info)
try:
if ebs_root:
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
slot = 0
else:
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._vmutils.get_free_controller_slot(ctrller_path)
self._vmutils.attach_drive(instance_name,
disk_path,
ctrller_path,
slot)
except vmutils.HyperVException as exn:
LOG.exception(_LE('Attach volume failed: %s'), exn)
raise vmutils.HyperVException(_('Unable to attach volume '
'to instance %s') % instance_name)
def detach_volume(self, connection_info, instance_name):
LOG.debug("Detaching volume: %(connection_info)s "
"from %(instance_name)s",
{'connection_info': connection_info,
'instance_name': instance_name})
disk_path = self._get_disk_path(connection_info)
export_path = self._get_export_path(connection_info)
self._vmutils.detach_vm_disk(instance_name, disk_path,
is_physical=False)
self._pathutils.unmount_smb_share(export_path)
def disconnect_volumes(self, block_device_mapping):
export_paths = set()
for vol in block_device_mapping:
connection_info = vol['connection_info']
export_path = self._get_export_path(connection_info)
export_paths.add(export_path)
for export_path in export_paths:
self._pathutils.unmount_smb_share(export_path)
def _get_export_path(self, connection_info):
return connection_info['data']['export'].replace('/', '\\')
def _get_disk_path(self, connection_info):
export = self._get_export_path(connection_info)
disk_name = connection_info['data']['name']
disk_path = os.path.join(export, disk_name)
return disk_path
def ensure_share_mounted(self, connection_info):
export_path = self._get_export_path(connection_info)
if not self._pathutils.check_smb_mapping(export_path):
opts_str = connection_info['data'].get('options', '')
username, password = self._parse_credentials(opts_str)
self._pathutils.mount_smb_share(export_path,
username=username,
password=password)
def _parse_credentials(self, opts_str):
match = self._username_regex.findall(opts_str)
username = match[0] if match and match[0] != 'guest' else None
match = self._password_regex.findall(opts_str)
password = match[0] if match else None
return username, password
def fix_instance_volume_disk_path(self, instance_name, connection_info,
disk_address):
self.ensure_share_mounted(connection_info)
def initialize_volume_connection(self, connection_info):
self.ensure_share_mounted(connection_info)

121
hyperv/nova/volumeutils.py Normal file
View File

@@ -0,0 +1,121 @@
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
Official Microsoft iSCSI Initiator and iSCSI command line interface
documentation can be retrieved at:
http://www.microsoft.com/en-us/download/details.aspx?id=34750
"""
import re
import time
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _
from nova import utils
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VolumeUtils(basevolumeutils.BaseVolumeUtils):
def __init__(self):
super(VolumeUtils, self).__init__()
def execute(self, *args, **kwargs):
stdout_value, stderr_value = utils.execute(*args, **kwargs)
if stdout_value.find('The operation completed successfully') == -1:
raise vmutils.HyperVException(_('An error has occurred when '
'calling the iscsi initiator: %s')
% stdout_value)
return stdout_value
def _login_target_portal(self, target_portal):
(target_address,
target_port) = utils.parse_server_string(target_portal)
output = self.execute('iscsicli.exe', 'ListTargetPortals')
pattern = r'Address and Socket *: (.*)'
portals = [addr.split() for addr in re.findall(pattern, output)]
LOG.debug("Ensuring connection to portal: %s" % target_portal)
if [target_address, str(target_port)] in portals:
self.execute('iscsicli.exe', 'RefreshTargetPortal',
target_address, target_port)
else:
# Adding target portal to iscsi initiator. Sending targets
self.execute('iscsicli.exe', 'AddTargetPortal',
target_address, target_port,
'*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*',
'*', '*')
def login_storage_target(self, target_lun, target_iqn, target_portal,
auth_username=None, auth_password=None):
"""Ensure that the target is logged in."""
self._login_target_portal(target_portal)
# Listing targets
self.execute('iscsicli.exe', 'ListTargets')
retry_count = CONF.hyperv.volume_attach_retry_count
# If the target is not connected, at least two iterations are needed:
# one for performing the login and another one for checking if the
# target was logged in successfully.
if retry_count < 2:
retry_count = 2
for attempt in xrange(retry_count):
try:
session_info = self.execute('iscsicli.exe', 'SessionList')
if session_info.find(target_iqn) == -1:
# Sending login
self.execute('iscsicli.exe', 'qlogintarget', target_iqn,
auth_username, auth_password)
else:
return
except vmutils.HyperVException as exc:
LOG.debug("Attempt %(attempt)d to connect to target "
"%(target_iqn)s failed. Retrying. "
"Exceptipn: %(exc)s ",
{'target_iqn': target_iqn,
'exc': exc,
'attempt': attempt})
time.sleep(CONF.hyperv.volume_attach_retry_interval)
raise vmutils.HyperVException(_('Failed to login target %s') %
target_iqn)
def logout_storage_target(self, target_iqn):
"""Logs out storage target through its session id."""
sessions = self._conn_wmi.query("SELECT * FROM "
"MSiSCSIInitiator_SessionClass "
"WHERE TargetName='%s'" % target_iqn)
for session in sessions:
self.execute_log_out(session.SessionId)
def execute_log_out(self, session_id):
"""Executes log out of the session described by its session ID."""
self.execute('iscsicli.exe', 'logouttarget', session_id)

View File

@@ -0,0 +1,131 @@
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes
and storage repositories on Windows Server 2012 and above
"""
import sys
import time
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _
from nova import utils
from nova.virt.hyperv import basevolumeutils
from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
_CHAP_AUTH_TYPE = 'ONEWAYCHAP'
def __init__(self, host='.'):
super(VolumeUtilsV2, self).__init__(host)
storage_namespace = '//%s/root/microsoft/windows/storage' % host
if sys.platform == 'win32':
self._conn_storage = wmi.WMI(moniker=storage_namespace)
def _login_target_portal(self, target_portal):
(target_address,
target_port) = utils.parse_server_string(target_portal)
# Checking if the portal is already connected.
portal = self._conn_storage.query("SELECT * FROM "
"MSFT_iSCSITargetPortal "
"WHERE TargetPortalAddress='%s' "
"AND TargetPortalPortNumber='%s'"
% (target_address, target_port))
if portal:
portal[0].Update()
else:
# Adding target portal to iscsi initiator. Sending targets
portal = self._conn_storage.MSFT_iSCSITargetPortal
portal.New(TargetPortalAddress=target_address,
TargetPortalPortNumber=target_port)
def login_storage_target(self, target_lun, target_iqn, target_portal,
auth_username=None, auth_password=None):
"""Ensure that the target is logged in."""
self._login_target_portal(target_portal)
retry_count = CONF.hyperv.volume_attach_retry_count
# If the target is not connected, at least two iterations are needed:
# one for performing the login and another one for checking if the
# target was logged in successfully.
if retry_count < 2:
retry_count = 2
for attempt in xrange(retry_count):
target = self._conn_storage.query("SELECT * FROM MSFT_iSCSITarget "
"WHERE NodeAddress='%s' " %
target_iqn)
if target and target[0].IsConnected:
if attempt == 0:
# The target was already connected but an update may be
# required
target[0].Update()
return
try:
target = self._conn_storage.MSFT_iSCSITarget
auth = {}
if auth_username and auth_password:
auth['AuthenticationType'] = self._CHAP_AUTH_TYPE
auth['ChapUsername'] = auth_username
auth['ChapSecret'] = auth_password
target.Connect(NodeAddress=target_iqn,
IsPersistent=True, **auth)
time.sleep(CONF.hyperv.volume_attach_retry_interval)
except wmi.x_wmi as exc:
LOG.debug("Attempt %(attempt)d to connect to target "
"%(target_iqn)s failed. Retrying. "
"WMI exception: %(exc)s " %
{'target_iqn': target_iqn,
'exc': exc,
'attempt': attempt})
raise vmutils.HyperVException(_('Failed to login target %s') %
target_iqn)
def logout_storage_target(self, target_iqn):
"""Logs out storage target through its session id."""
targets = self._conn_storage.MSFT_iSCSITarget(NodeAddress=target_iqn)
if targets:
target = targets[0]
if target.IsConnected:
sessions = self._conn_storage.MSFT_iSCSISession(
TargetNodeAddress=target_iqn)
for session in sessions:
if session.IsPersistent:
session.Unregister()
target.Disconnect()
def execute_log_out(self, session_id):
sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass(
SessionId=session_id)
if sessions:
self.logout_storage_target(sessions[0].TargetName)

0
hyperv/tests/__init__.py Normal file
View File

View File

View File

@@ -0,0 +1,166 @@
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts, mocks and fixtures for the test suite
"""
import uuid
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import utils
from nova.virt.hyperv import constants
def get_fake_instance_data(name, project_id, user_id):
return {'name': name,
'id': 1,
'uuid': str(uuid.uuid4()),
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'flavor':
{'name': 'm1.tiny',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 1024,
'flavorid': 1,
'rxtx_factor': 1}
}
def get_fake_image_data(project_id, user_id):
return {'name': 'image1',
'id': 1,
'project_id': project_id,
'user_id': user_id,
'image_ref': "1",
'kernel_id': "1",
'ramdisk_id': "1",
'mac_address': "de:ad:be:ef:be:ef",
'flavor': 'm1.tiny',
'properties': {
constants.IMAGE_PROP_VM_GEN: constants.IMAGE_PROP_VM_GEN_1}
}
def get_fake_volume_info_data(target_portal, volume_id):
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': volume_id,
'target_iqn': 'iqn.2010-10.org.openstack:volume-' + volume_id,
'target_portal': target_portal,
'target_lun': 1,
'auth_method': 'CHAP',
'auth_username': 'fake_username',
'auth_password': 'fake_password',
'target_discovered': False,
},
'mount_device': 'vda',
'delete_on_termination': False
}
def get_fake_block_device_info(target_portal, volume_id):
connection_info = get_fake_volume_info_data(target_portal, volume_id)
return {'block_device_mapping': [{'connection_info': connection_info}],
'root_device_name': 'fake_root_device_name',
'ephemerals': [],
'swap': None
}
def stub_out_db_instance_api(stubs):
"""Stubs out the db API for creating Instances."""
FLAVORS = {
'm1.tiny': dict(memory_mb=512, vcpus=1, root_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, root_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, root_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, root_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, root_gb=160, flavorid=5)}
class FakeModel(object):
"""Stubs out for model."""
def __init__(self, values):
self.values = values
def get(self, key, default=None):
if key in self.values:
return self.values[key]
else:
return default
def __getattr__(self, name):
return self.values[name]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.values[key] = value
def __str__(self):
return str(self.values)
def fake_instance_create(context, values):
"""Stubs out the db.instance_create method."""
if 'flavor' not in values:
return
flavor = values['flavor']
base_options = {
'name': values['name'],
'id': values['id'],
'uuid': str(uuid.uuid4()),
'reservation_id': utils.generate_uid('r'),
'image_ref': values['image_ref'],
'kernel_id': values['kernel_id'],
'ramdisk_id': values['ramdisk_id'],
'vm_state': vm_states.BUILDING,
'task_state': task_states.SCHEDULING,
'user_id': values['user_id'],
'project_id': values['project_id'],
'flavor': flavor,
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'mac_addresses': [{'address': values['mac_address']}],
'root_gb': flavor['root_gb'],
'system_metadata': {'image_shutdown_timeout': 0},
}
return FakeModel(base_options)
def fake_flavor_get_all(context, inactive=0, filters=None):
return FLAVORS.values()
def fake_flavor_get_by_name(context, name):
return FLAVORS[name]
def fake_block_device_mapping_get_all_by_instance(context, instance_uuid):
return {}
stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'flavor_get_all', fake_flavor_get_all)
stubs.Set(db, 'flavor_get_by_name', fake_flavor_get_by_name)
stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)

90
hyperv/tests/unit/fake.py Normal file
View File

@@ -0,0 +1,90 @@
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import os
class PathUtils(object):
def open(self, path, mode):
return io.BytesIO(b'fake content')
def exists(self, path):
return False
def makedirs(self, path):
pass
def remove(self, path):
pass
def rename(self, src, dest):
pass
def copyfile(self, src, dest):
pass
def copy(self, src, dest):
pass
def rmtree(self, path):
pass
def get_instances_dir(self, remote_server=None):
return 'C:\\FakeInstancesPath\\'
def get_instance_migr_revert_dir(self, instance_name, create_dir=False,
remove_dir=False):
return os.path.join(self.get_instances_dir(), instance_name, '_revert')
def get_instance_dir(self, instance_name, remote_server=None,
create_dir=True, remove_dir=False):
return os.path.join(self.get_instances_dir(remote_server),
instance_name)
def lookup_root_vhd_path(self, instance_name):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.vhd')
def lookup_configdrive_path(self, instance_name):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'configdrive.iso')
def lookup_ephemeral_vhd_path(self, instance_name):
instance_path = self.get_instance_dir(instance_name)
if instance_path:
return os.path.join(instance_path, 'ephemeral.vhd')
def get_root_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.' + format_ext)
def get_ephemeral_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
def get_base_vhd_dir(self):
return os.path.join(self.get_instances_dir(), '_base')
def get_export_dir(self, instance_name):
export_dir = os.path.join(self.get_instances_dir(), 'export',
instance_name)
return export_dir
def vhd_exists(self, path):
return False
def get_vm_console_log_paths(self, vm_name, remote_server=None):
return 'fake_vm_log_path'

View File

@@ -0,0 +1,33 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
class HyperVBaseTestCase(test.NoDBTestCase):
def setUp(self):
super(HyperVBaseTestCase, self).setUp()
wmi_patcher = mock.patch('__builtin__.wmi', create=True)
platform_patcher = mock.patch('sys.platform', 'win32')
platform_patcher.start()
wmi_patcher.start()
self.addCleanup(wmi_patcher.stop)
self.addCleanup(platform_patcher.stop)

View File

@@ -0,0 +1,188 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import basevolumeutils
def _exception_thrower():
raise Exception("Testing exception handling.")
class BaseVolumeUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V BaseVolumeUtils class."""
_FAKE_COMPUTER_NAME = "fake_computer_name"
_FAKE_DOMAIN_NAME = "fake_domain_name"
_FAKE_INITIATOR_NAME = "fake_initiator_name"
_FAKE_INITIATOR_IQN_NAME = "iqn.1991-05.com.microsoft:fake_computer_name"
_FAKE_DISK_PATH = 'fake_path DeviceID="123\\\\2"'
_FAKE_MOUNT_DEVICE = '/dev/fake/mount'
_FAKE_DEVICE_NAME = '/dev/fake/path'
_FAKE_SWAP = {'device_name': _FAKE_DISK_PATH}
def setUp(self):
self._volutils = basevolumeutils.BaseVolumeUtils()
self._volutils._conn_wmi = mock.MagicMock()
self._volutils._conn_cimv2 = mock.MagicMock()
super(BaseVolumeUtilsTestCase, self).setUp()
def test_get_iscsi_initiator_ok(self):
self._check_get_iscsi_initiator(
mock.MagicMock(return_value=mock.sentinel.FAKE_KEY),
self._FAKE_INITIATOR_NAME)
def test_get_iscsi_initiator_exception(self):
initiator_name = "%(iqn)s.%(domain)s" % {
'iqn': self._FAKE_INITIATOR_IQN_NAME,
'domain': self._FAKE_DOMAIN_NAME
}
self._check_get_iscsi_initiator(_exception_thrower, initiator_name)
def _check_get_iscsi_initiator(self, winreg_method, expected):
mock_computer = mock.MagicMock()
mock_computer.name = self._FAKE_COMPUTER_NAME
mock_computer.Domain = self._FAKE_DOMAIN_NAME
self._volutils._conn_cimv2.Win32_ComputerSystem.return_value = [
mock_computer]
with mock.patch.object(basevolumeutils,
'_winreg', create=True) as mock_winreg:
mock_winreg.OpenKey = winreg_method
mock_winreg.QueryValueEx = mock.MagicMock(return_value=[expected])
initiator_name = self._volutils.get_iscsi_initiator()
self.assertEqual(expected, initiator_name)
@mock.patch.object(basevolumeutils, 'driver')
def test_volume_in_mapping(self, mock_driver):
mock_driver.block_device_info_get_mapping.return_value = [
{'mount_device': self._FAKE_MOUNT_DEVICE}]
mock_driver.block_device_info_get_swap = mock.MagicMock(
return_value=self._FAKE_SWAP)
mock_driver.block_device_info_get_ephemerals = mock.MagicMock(
return_value=[{'device_name': self._FAKE_DEVICE_NAME}])
mock_driver.swap_is_usable = mock.MagicMock(return_value=True)
self.assertTrue(self._volutils.volume_in_mapping(
self._FAKE_MOUNT_DEVICE, mock.sentinel.FAKE_BLOCK_DEVICE_INFO))
def test_get_drive_number_from_disk_path(self):
fake_disk_path = (
'\\\\WIN-I5BTVHOIFGK\\root\\virtualization\\v2:Msvm_DiskDrive.'
'CreationClassName="Msvm_DiskDrive",DeviceID="Microsoft:353B3BE8-'
'310C-4cf4-839E-4E1B14616136\\\\1",SystemCreationClassName='
'"Msvm_ComputerSystem",SystemName="WIN-I5BTVHOIFGK"')
expected_disk_number = 1
ret_val = self._volutils._get_drive_number_from_disk_path(
fake_disk_path)
self.assertEqual(expected_disk_number, ret_val)
def test_get_drive_number_not_found(self):
fake_disk_path = 'fake_disk_path'
ret_val = self._volutils._get_drive_number_from_disk_path(
fake_disk_path)
self.assertFalse(ret_val)
@mock.patch.object(basevolumeutils.BaseVolumeUtils,
"_get_drive_number_from_disk_path")
def test_get_session_id_from_mounted_disk(self, mock_get_session_id):
mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
mock_initiator_session = self._create_initiator_session()
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = [mock_initiator_session]
session_id = self._volutils.get_session_id_from_mounted_disk(
self._FAKE_DISK_PATH)
self.assertEqual(mock.sentinel.FAKE_SESSION_ID, session_id)
def test_get_devices_for_target(self):
init_session = self._create_initiator_session()
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = [init_session]
devices = self._volutils._get_devices_for_target(
mock.sentinel.FAKE_IQN)
self.assertEqual(init_session.Devices, devices)
def test_get_devices_for_target_not_found(self):
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = []
devices = self._volutils._get_devices_for_target(
mock.sentinel.FAKE_IQN)
self.assertEqual(0, len(devices))
@mock.patch.object(basevolumeutils.BaseVolumeUtils,
'_get_devices_for_target')
def test_get_device_number_for_target(self, fake_get_devices):
init_session = self._create_initiator_session()
fake_get_devices.return_value = init_session.Devices
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = [init_session]
device_number = self._volutils.get_device_number_for_target(
mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
self.assertEqual(mock.sentinel.FAKE_DEVICE_NUMBER, device_number)
@mock.patch.object(basevolumeutils.BaseVolumeUtils,
'_get_devices_for_target')
def test_get_target_lun_count(self, fake_get_devices):
init_session = self._create_initiator_session()
# Only disk devices are being counted.
disk_device = mock.Mock(DeviceType=self._volutils._FILE_DEVICE_DISK)
init_session.Devices.append(disk_device)
fake_get_devices.return_value = init_session.Devices
lun_count = self._volutils.get_target_lun_count(
mock.sentinel.FAKE_IQN)
self.assertEqual(1, lun_count)
@mock.patch.object(basevolumeutils.BaseVolumeUtils,
"_get_drive_number_from_disk_path")
def test_get_target_from_disk_path(self, mock_get_session_id):
mock_get_session_id.return_value = mock.sentinel.FAKE_DEVICE_NUMBER
init_sess = self._create_initiator_session()
mock_ses_class = self._volutils._conn_wmi.MSiSCSIInitiator_SessionClass
mock_ses_class.return_value = [init_sess]
(target_name, scsi_lun) = self._volutils.get_target_from_disk_path(
self._FAKE_DISK_PATH)
self.assertEqual(mock.sentinel.FAKE_TARGET_NAME, target_name)
self.assertEqual(mock.sentinel.FAKE_LUN, scsi_lun)
def _create_initiator_session(self):
device = mock.MagicMock()
device.ScsiLun = mock.sentinel.FAKE_LUN
device.DeviceNumber = mock.sentinel.FAKE_DEVICE_NUMBER
device.TargetName = mock.sentinel.FAKE_TARGET_NAME
init_session = mock.MagicMock()
init_session.Devices = [device]
init_session.SessionId = mock.sentinel.FAKE_SESSION_ID
return init_session

View File

@@ -0,0 +1,182 @@
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import units
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostops
CONF = cfg.CONF
class HostOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V HostOps class."""
FAKE_ARCHITECTURE = 0
FAKE_NAME = 'fake_name'
FAKE_MANUFACTURER = 'FAKE_MANUFACTURER'
FAKE_NUM_CPUS = 1
FAKE_INSTANCE_DIR = "C:/fake/dir"
FAKE_LOCAL_IP = '10.11.12.13'
FAKE_TICK_COUNT = 1000000
def setUp(self):
super(HostOpsTestCase, self).setUp()
self._hostops = hostops.HostOps()
self._hostops._hostutils = mock.MagicMock()
self._hostops._pathutils = mock.MagicMock()
def test_get_cpu_info(self):
mock_processors = mock.MagicMock()
info = {'Architecture': self.FAKE_ARCHITECTURE,
'Name': self.FAKE_NAME,
'Manufacturer': self.FAKE_MANUFACTURER,
'NumberOfCores': self.FAKE_NUM_CPUS,
'NumberOfLogicalProcessors': self.FAKE_NUM_CPUS}
def getitem(key):
return info[key]
mock_processors.__getitem__.side_effect = getitem
self._hostops._hostutils.get_cpus_info.return_value = [mock_processors]
response = self._hostops._get_cpu_info()
self._hostops._hostutils.get_cpus_info.assert_called_once_with()
expected = [mock.call(fkey)
for fkey in constants.PROCESSOR_FEATURE.keys()]
self._hostops._hostutils.is_cpu_feature_present.has_calls(expected)
expected_response = self._get_mock_cpu_info()
self.assertEqual(expected_response, response)
def _get_mock_cpu_info(self):
return {'vendor': self.FAKE_MANUFACTURER,
'model': self.FAKE_NAME,
'arch': constants.WMI_WIN32_PROCESSOR_ARCHITECTURE[
self.FAKE_ARCHITECTURE],
'features': constants.PROCESSOR_FEATURE.values(),
'topology': {'cores': self.FAKE_NUM_CPUS,
'threads': self.FAKE_NUM_CPUS,
'sockets': self.FAKE_NUM_CPUS}}
def test_get_memory_info(self):
self._hostops._hostutils.get_memory_info.return_value = (2 * units.Ki,
1 * units.Ki)
response = self._hostops._get_memory_info()
self._hostops._hostutils.get_memory_info.assert_called_once_with()
self.assertEqual((2, 1, 1), response)
def test_get_local_hdd_info_gb(self):
self._hostops._pathutils.get_instance_dir.return_value = ''
self._hostops._hostutils.get_volume_info.return_value = (2 * units.Gi,
1 * units.Gi)
response = self._hostops._get_local_hdd_info_gb()
self._hostops._pathutils.get_instances_dir.assert_called_once_with()
self._hostops._hostutils.get_volume_info.assert_called_once_with('')
self.assertEqual((2, 1, 1), response)
def test_get_hypervisor_version(self):
self._hostops._hostutils.get_windows_version.return_value = '6.3.9600'
response_lower = self._hostops._get_hypervisor_version()
self._hostops._hostutils.get_windows_version.return_value = '10.1.0'
response_higher = self._hostops._get_hypervisor_version()
self.assertEqual(6003, response_lower)
self.assertEqual(10001, response_higher)
@mock.patch.object(hostops.HostOps, '_get_cpu_info')
@mock.patch.object(hostops.HostOps, '_get_memory_info')
@mock.patch.object(hostops.HostOps, '_get_hypervisor_version')
@mock.patch.object(hostops.HostOps, '_get_local_hdd_info_gb')
@mock.patch('platform.node')
def test_get_available_resource(self, mock_node,
mock_get_local_hdd_info_gb,
mock_get_hypervisor_version,
mock_get_memory_info, mock_get_cpu_info):
mock_get_local_hdd_info_gb.return_value = (mock.sentinel.LOCAL_GB,
mock.sentinel.LOCAL_GB_FREE,
mock.sentinel.LOCAL_GB_USED)
mock_get_memory_info.return_value = (mock.sentinel.MEMORY_MB,
mock.sentinel.MEMORY_MB_FREE,
mock.sentinel.MEMORY_MB_USED)
mock_cpu_info = self._get_mock_cpu_info()
mock_get_cpu_info.return_value = mock_cpu_info
mock_get_hypervisor_version.return_value = mock.sentinel.VERSION
response = self._hostops.get_available_resource()
mock_get_memory_info.assert_called_once_with()
mock_get_cpu_info.assert_called_once_with()
mock_get_hypervisor_version.assert_called_once_with()
expected = {'supported_instances': '[["i686", "hyperv", "hvm"], '
'["x86_64", "hyperv", "hvm"]]',
'hypervisor_hostname': mock_node(),
'cpu_info': jsonutils.dumps(mock_cpu_info),
'hypervisor_version': mock.sentinel.VERSION,
'memory_mb': mock.sentinel.MEMORY_MB,
'memory_mb_used': mock.sentinel.MEMORY_MB_USED,
'local_gb': mock.sentinel.LOCAL_GB,
'local_gb_used': mock.sentinel.LOCAL_GB_USED,
'vcpus': self.FAKE_NUM_CPUS,
'vcpus_used': 0,
'hypervisor_type': 'hyperv',
'numa_topology': None,
}
self.assertEqual(expected, response)
def _test_host_power_action(self, action):
self._hostops._hostutils.host_power_action = mock.Mock()
self._hostops.host_power_action(action)
self._hostops._hostutils.host_power_action.assert_called_with(
action)
def test_host_power_action_shutdown(self):
self._test_host_power_action(constants.HOST_POWER_ACTION_SHUTDOWN)
def test_host_power_action_reboot(self):
self._test_host_power_action(constants.HOST_POWER_ACTION_REBOOT)
def test_host_power_action_exception(self):
self.assertRaises(NotImplementedError,
self._hostops.host_power_action,
constants.HOST_POWER_ACTION_STARTUP)
def test_get_host_ip_addr(self):
CONF.set_override('my_ip', None)
self._hostops._hostutils.get_local_ips.return_value = [
self.FAKE_LOCAL_IP]
response = self._hostops.get_host_ip_addr()
self._hostops._hostutils.get_local_ips.assert_called_once_with()
self.assertEqual(self.FAKE_LOCAL_IP, response)
@mock.patch('time.strftime')
def test_get_host_uptime(self, mock_time):
self._hostops._hostutils.get_host_tick_count64.return_value = (
self.FAKE_TICK_COUNT)
response = self._hostops.get_host_uptime()
tdelta = datetime.timedelta(milliseconds=long(self.FAKE_TICK_COUNT))
expected = "%s up %s, 0 users, load average: 0, 0, 0" % (
str(mock_time()), str(tdelta))
self.assertEqual(expected, response)

View File

@@ -0,0 +1,141 @@
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostutils
class FakeCPUSpec(object):
"""Fake CPU Spec for unit tests."""
Architecture = mock.sentinel.cpu_arch
Name = mock.sentinel.cpu_name
Manufacturer = mock.sentinel.cpu_man
NumberOfCores = mock.sentinel.cpu_cores
NumberOfLogicalProcessors = mock.sentinel.cpu_procs
class HostUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V hostutils class."""
_FAKE_MEMORY_TOTAL = 1024L
_FAKE_MEMORY_FREE = 512L
_FAKE_DISK_SIZE = 1024L
_FAKE_DISK_FREE = 512L
_FAKE_VERSION_GOOD = '6.2.0'
_FAKE_VERSION_BAD = '6.1.9'
def setUp(self):
self._hostutils = hostutils.HostUtils()
self._hostutils._conn_cimv2 = mock.MagicMock()
super(HostUtilsTestCase, self).setUp()
@mock.patch('nova.virt.hyperv.hostutils.ctypes')
def test_get_host_tick_count64(self, mock_ctypes):
tick_count64 = "100"
mock_ctypes.windll.kernel32.GetTickCount64.return_value = tick_count64
response = self._hostutils.get_host_tick_count64()
self.assertEqual(tick_count64, response)
def test_get_cpus_info(self):
cpu = mock.MagicMock(spec=FakeCPUSpec)
self._hostutils._conn_cimv2.query.return_value = [cpu]
cpu_list = self._hostutils.get_cpus_info()
self.assertEqual([cpu._mock_children], cpu_list)
def test_get_memory_info(self):
memory = mock.MagicMock()
type(memory).TotalVisibleMemorySize = mock.PropertyMock(
return_value=self._FAKE_MEMORY_TOTAL)
type(memory).FreePhysicalMemory = mock.PropertyMock(
return_value=self._FAKE_MEMORY_FREE)
self._hostutils._conn_cimv2.query.return_value = [memory]
total_memory, free_memory = self._hostutils.get_memory_info()
self.assertEqual(self._FAKE_MEMORY_TOTAL, total_memory)
self.assertEqual(self._FAKE_MEMORY_FREE, free_memory)
def test_get_volume_info(self):
disk = mock.MagicMock()
type(disk).Size = mock.PropertyMock(return_value=self._FAKE_DISK_SIZE)
type(disk).FreeSpace = mock.PropertyMock(
return_value=self._FAKE_DISK_FREE)
self._hostutils._conn_cimv2.query.return_value = [disk]
(total_memory, free_memory) = self._hostutils.get_volume_info(
mock.sentinel.FAKE_DRIVE)
self.assertEqual(self._FAKE_DISK_SIZE, total_memory)
self.assertEqual(self._FAKE_DISK_FREE, free_memory)
def test_check_min_windows_version_true(self):
self._test_check_min_windows_version(self._FAKE_VERSION_GOOD, True)
def test_check_min_windows_version_false(self):
self._test_check_min_windows_version(self._FAKE_VERSION_BAD, False)
def _test_check_min_windows_version(self, version, expected):
os = mock.MagicMock()
os.Version = version
self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [os]
self.assertEqual(expected,
self._hostutils.check_min_windows_version(6, 2))
def _test_host_power_action(self, action):
fake_win32 = mock.MagicMock()
fake_win32.Win32Shutdown = mock.MagicMock()
self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [
fake_win32]
if action == constants.HOST_POWER_ACTION_SHUTDOWN:
self._hostutils.host_power_action(action)
fake_win32.Win32Shutdown.assert_called_with(
self._hostutils._HOST_FORCED_SHUTDOWN)
elif action == constants.HOST_POWER_ACTION_REBOOT:
self._hostutils.host_power_action(action)
fake_win32.Win32Shutdown.assert_called_with(
self._hostutils._HOST_FORCED_REBOOT)
else:
self.assertRaises(NotImplementedError,
self._hostutils.host_power_action, action)
def test_host_shutdown(self):
self._test_host_power_action(constants.HOST_POWER_ACTION_SHUTDOWN)
def test_host_reboot(self):
self._test_host_power_action(constants.HOST_POWER_ACTION_REBOOT)
def test_host_startup(self):
self._test_host_power_action(constants.HOST_POWER_ACTION_STARTUP)
def test_get_supported_vm_types_2012_r2(self):
with mock.patch.object(self._hostutils,
'check_min_windows_version') as mock_check_win:
mock_check_win.return_value = True
result = self._hostutils.get_supported_vm_types()
self.assertEqual([constants.IMAGE_PROP_VM_GEN_1,
constants.IMAGE_PROP_VM_GEN_2], result)
def test_get_supported_vm_types(self):
with mock.patch.object(self._hostutils,
'check_min_windows_version') as mock_check_win:
mock_check_win.return_value = False
result = self._hostutils.get_supported_vm_types()
self.assertEqual([constants.IMAGE_PROP_VM_GEN_1], result)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,137 @@
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
CONF = cfg.CONF
class ImageCacheTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V ImageCache class."""
FAKE_BASE_DIR = 'fake/base/dir'
FAKE_FORMAT = 'fake_format'
FAKE_IMAGE_REF = 'fake_image_ref'
def setUp(self):
super(ImageCacheTestCase, self).setUp()
self.context = 'fake-context'
self.instance = fake_instance.fake_instance_obj(self.context)
# utilsfactory will check the host OS version via get_hostutils,
# in order to return the proper Utils Class, so it must be mocked.
patched_func = mock.patch.object(imagecache.utilsfactory,
"get_hostutils")
patched_get_pathutils = mock.patch.object(imagecache.utilsfactory,
"get_pathutils")
patched_func.start()
patched_get_pathutils.start()
self.addCleanup(patched_func.stop)
self.addCleanup(patched_get_pathutils.stop)
self.imagecache = imagecache.ImageCache()
self.imagecache._pathutils = mock.MagicMock()
self.imagecache._vhdutils = mock.MagicMock()
def _test_get_root_vhd_size_gb(self, old_flavor=True):
if old_flavor:
mock_flavor = objects.Flavor(**test_flavor.fake_flavor)
self.instance.old_flavor = mock_flavor
else:
self.instance.old_flavor = None
return self.imagecache._get_root_vhd_size_gb(self.instance)
def test_get_root_vhd_size_gb_old_flavor(self):
ret_val = self._test_get_root_vhd_size_gb()
self.assertEqual(test_flavor.fake_flavor['root_gb'], ret_val)
def test_get_root_vhd_size_gb(self):
ret_val = self._test_get_root_vhd_size_gb(old_flavor=False)
self.assertEqual(self.instance.root_gb, ret_val)
def _prepare_get_cached_image(self, path_exists, use_cow):
self.instance.image_ref = self.FAKE_IMAGE_REF
self.imagecache._pathutils.get_base_vhd_dir.return_value = (
self.FAKE_BASE_DIR)
self.imagecache._pathutils.exists.return_value = path_exists
self.imagecache._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHD)
CONF.set_override('use_cow_images', use_cow)
expected_path = os.path.join(self.FAKE_BASE_DIR,
self.FAKE_IMAGE_REF)
expected_vhd_path = "%s.%s" % (expected_path,
constants.DISK_FORMAT_VHD.lower())
return (expected_path, expected_vhd_path)
@mock.patch.object(imagecache.images, 'fetch')
def test_get_cached_image_with_fetch(self, mock_fetch):
(expected_path,
expected_vhd_path) = self._prepare_get_cached_image(False, False)
result = self.imagecache.get_cached_image(self.context, self.instance)
self.assertEqual(expected_vhd_path, result)
mock_fetch.assert_called_once_with(self.context, self.FAKE_IMAGE_REF,
expected_path,
self.instance['user_id'],
self.instance['project_id'])
self.imagecache._vhdutils.get_vhd_format.assert_called_once_with(
expected_path)
self.imagecache._pathutils.rename.assert_called_once_with(
expected_path, expected_vhd_path)
@mock.patch.object(imagecache.images, 'fetch')
def test_get_cached_image_with_fetch_exception(self, mock_fetch):
(expected_path,
expected_vhd_path) = self._prepare_get_cached_image(False, False)
# path doesn't exist until fetched.
self.imagecache._pathutils.exists.side_effect = [False, False, True]
mock_fetch.side_effect = exception.InvalidImageRef(
image_href=self.FAKE_IMAGE_REF)
self.assertRaises(exception.InvalidImageRef,
self.imagecache.get_cached_image,
self.context, self.instance)
self.imagecache._pathutils.remove.assert_called_once_with(
expected_path)
@mock.patch.object(imagecache.ImageCache, '_resize_and_cache_vhd')
def test_get_cached_image_use_cow(self, mock_resize):
(expected_path,
expected_vhd_path) = self._prepare_get_cached_image(True, True)
expected_resized_vhd_path = expected_vhd_path + 'x'
mock_resize.return_value = expected_resized_vhd_path
result = self.imagecache.get_cached_image(self.context, self.instance)
self.assertEqual(expected_resized_vhd_path, result)
mock_resize.assert_called_once_with(self.instance, expected_vhd_path)

View File

@@ -0,0 +1,61 @@
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.import mock
import mock
import os
from nova import test
from nova.virt.hyperv import ioutils
class IOThreadTestCase(test.NoDBTestCase):
_FAKE_SRC = r'fake_source_file'
_FAKE_DEST = r'fake_dest_file'
_FAKE_MAX_BYTES = 1
def setUp(self):
self._iothread = ioutils.IOThread(
self._FAKE_SRC, self._FAKE_DEST, self._FAKE_MAX_BYTES)
super(IOThreadTestCase, self).setUp()
@mock.patch('__builtin__.open')
@mock.patch('os.rename')
@mock.patch('os.path.exists')
@mock.patch('os.remove')
def test_copy(self, fake_remove, fake_exists, fake_rename, fake_open):
fake_data = 'a'
fake_src = mock.Mock()
fake_dest = mock.Mock()
fake_src.read.return_value = fake_data
fake_dest.tell.return_value = 0
fake_exists.return_value = True
mock_context_manager = mock.MagicMock()
fake_open.return_value = mock_context_manager
mock_context_manager.__enter__.side_effect = [fake_src, fake_dest]
self._iothread._stopped.isSet = mock.Mock(side_effect=[False, True])
self._iothread._copy()
fake_dest.seek.assert_called_once_with(0, os.SEEK_END)
fake_dest.write.assert_called_once_with(fake_data)
fake_dest.close.assert_called_once_with()
fake_rename.assert_called_once_with(
self._iothread._dest, self._iothread._dest_archive)
fake_remove.assert_called_once_with(
self._iothread._dest_archive)
self.assertEqual(3, fake_open.call_count)

View File

@@ -0,0 +1,136 @@
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import livemigrationops
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V LiveMigrationOps class."""
def setUp(self):
super(LiveMigrationOpsTestCase, self).setUp()
self.context = 'fake_context'
self._livemigrops = livemigrationops.LiveMigrationOps()
self._livemigrops._livemigrutils = mock.MagicMock()
@mock.patch('nova.virt.hyperv.vmops.VMOps.copy_vm_console_logs')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('nova.virt.hyperv.pathutils.PathUtils.copy_configdrive')
def _test_live_migration(self, mock_copy_configdrive, mock_required_by,
mock_copy_logs, side_effect, configdrive=False):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_post = mock.MagicMock()
mock_recover = mock.MagicMock()
fake_dest = mock.sentinel.DESTINATION
self._livemigrops._livemigrutils.live_migrate_vm.side_effect = [
side_effect]
if side_effect is vmutils.HyperVException:
self.assertRaises(vmutils.HyperVException,
self._livemigrops.live_migration,
self.context, mock_instance, fake_dest,
mock_post, mock_recover, False, None)
mock_recover.assert_called_once_with(self.context, mock_instance,
fake_dest, False)
else:
if configdrive:
mock_required_by.return_value = True
self.flags(config_drive_cdrom=True, group='hyperv')
self._livemigrops.live_migration(context=self.context,
instance_ref=mock_instance,
dest=fake_dest,
post_method=mock_post,
recover_method=mock_recover)
mock_copy_logs.assert_called_once_with(mock_instance.name,
fake_dest)
if configdrive:
mock_copy_configdrive.assert_called_once_with(
mock_instance.name, fake_dest)
mock_live_migr = self._livemigrops._livemigrutils.live_migrate_vm
mock_live_migr.assert_called_once_with(mock_instance.name,
fake_dest)
mock_post.assert_called_once_with(self.context, mock_instance,
fake_dest, False)
def test_live_migration(self):
self._test_live_migration(side_effect=None)
def test_live_migration_exception(self):
self._test_live_migration(side_effect=vmutils.HyperVException)
def test_live_migration_wrong_os_version(self):
self._livemigrops._livemigrutils = None
self.assertRaises(NotImplementedError,
self._livemigrops.live_migration, self.context,
instance_ref=mock.DEFAULT,
dest=mock.sentinel.DESTINATION,
post_method=mock.DEFAULT,
recover_method=mock.DEFAULT)
def test_live_migration_with_configdrive(self):
self._test_live_migration(side_effect=None, configdrive=True)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.ebs_root_in_block_devices')
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.initialize_volumes_connection')
def test_pre_live_migration(self, mock_initialize_connection,
mock_get_cached_image,
mock_ebs_root_in_block_devices):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.image_ref = "fake_image_ref"
mock_ebs_root_in_block_devices.return_value = None
CONF.set_override('use_cow_images', True)
self._livemigrops.pre_live_migration(
self.context, mock_instance,
block_device_info=mock.sentinel.BLOCK_INFO,
network_info=mock.sentinel.NET_INFO)
check_config = (
self._livemigrops._livemigrutils.check_live_migration_config)
check_config.assert_called_once_with()
mock_ebs_root_in_block_devices.assert_called_once_with(
mock.sentinel.BLOCK_INFO)
mock_get_cached_image.assert_called_once_with(self.context,
mock_instance)
mock_initialize_connection.assert_called_once_with(
mock.sentinel.BLOCK_INFO)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.disconnect_volumes')
def test_post_live_migration(self, mock_disconnect_volumes):
self._livemigrops.post_live_migration(
self.context, mock.sentinel.instance,
mock.sentinel.block_device_info)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.block_device_info)
@mock.patch('nova.virt.hyperv.vmops.VMOps.log_vm_serial_output')
def test_post_live_migration_at_destination(self, mock_log_vm):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._livemigrops.post_live_migration_at_destination(
self.context, mock_instance, network_info=mock.sentinel.NET_INFO,
block_migration=mock.sentinel.BLOCK_INFO)
mock_log_vm.assert_called_once_with(mock_instance.name,
mock_instance.uuid)

View File

@@ -0,0 +1,274 @@
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import livemigrationutils
class LiveMigrationUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V LiveMigrationUtils class."""
_FAKE_RET_VAL = 0
_RESOURCE_TYPE_VHD = 31
_RESOURCE_TYPE_DISK = 17
_RESOURCE_SUB_TYPE_VHD = 'Microsoft:Hyper-V:Virtual Hard Disk'
_RESOURCE_SUB_TYPE_DISK = 'Microsoft:Hyper-V:Physical Disk Drive'
def setUp(self):
self.liveutils = livemigrationutils.LiveMigrationUtils()
self.liveutils._vmutils = mock.MagicMock()
self.liveutils._volutils = mock.MagicMock()
self._conn = mock.MagicMock()
self.liveutils._get_conn_v2 = mock.MagicMock(return_value=self._conn)
super(LiveMigrationUtilsTestCase, self).setUp()
def test_check_live_migration_config(self):
mock_migr_svc = self._conn.Msvm_VirtualSystemMigrationService()[0]
vsmssd = mock.MagicMock()
vsmssd.EnableVirtualSystemMigration = True
mock_migr_svc.associators.return_value = [vsmssd]
mock_migr_svc.MigrationServiceListenerIPAdressList.return_value = [
mock.sentinel.FAKE_HOST]
self.liveutils.check_live_migration_config()
self.assertTrue(mock_migr_svc.associators.called)
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
'_destroy_planned_vm')
def test_check_existing_planned_vm_found(self, mock_destroy_planned_vm):
mock_vm = mock.MagicMock()
mock_v2 = mock.MagicMock()
mock_v2.Msvm_PlannedComputerSystem.return_value = [mock_vm]
self.liveutils._check_existing_planned_vm(mock_v2, mock_vm)
mock_destroy_planned_vm.assert_called_once_with(mock_v2, mock_vm)
@mock.patch.object(livemigrationutils.LiveMigrationUtils,
'_destroy_planned_vm')
def test_check_existing_planned_vm_none(self, mock_destroy_planned_vm):
mock_v2 = mock.MagicMock()
mock_v2.Msvm_PlannedComputerSystem.return_value = []
self.liveutils._check_existing_planned_vm(mock_v2, mock.MagicMock())
self.assertFalse(mock_destroy_planned_vm.called)
def test_create_remote_planned_vm(self):
mock_vsmsd = self._conn.query()[0]
mock_vm = mock.MagicMock()
mock_v2 = mock.MagicMock()
mock_v2.Msvm_PlannedComputerSystem.return_value = [mock_vm]
migr_svc = self._conn.Msvm_VirtualSystemMigrationService()[0]
migr_svc.MigrateVirtualSystemToHost.return_value = (
self._FAKE_RET_VAL, mock.sentinel.FAKE_JOB_PATH)
resulted_vm = self.liveutils._create_remote_planned_vm(
self._conn, mock_v2, mock_vm, [mock.sentinel.FAKE_REMOTE_IP_ADDR],
mock.sentinel.FAKE_HOST)
self.assertEqual(mock_vm, resulted_vm)
migr_svc.MigrateVirtualSystemToHost.assert_called_once_with(
ComputerSystem=mock_vm.path_.return_value,
DestinationHost=mock.sentinel.FAKE_HOST,
MigrationSettingData=mock_vsmsd.GetText_.return_value)
def test_get_physical_disk_paths(self):
ide_path = {mock.sentinel.IDE_PATH: mock.sentinel.IDE_HOST_RESOURCE}
scsi_path = {mock.sentinel.SCSI_PATH: mock.sentinel.SCSI_HOST_RESOURCE}
ide_ctrl = self.liveutils._vmutils.get_vm_ide_controller.return_value
scsi_ctrl = self.liveutils._vmutils.get_vm_scsi_controller.return_value
mock_get_controller_paths = (
self.liveutils._vmutils.get_controller_volume_paths)
mock_get_controller_paths.side_effect = [ide_path, scsi_path]
result = self.liveutils._get_physical_disk_paths(mock.sentinel.VM_NAME)
expected = dict(ide_path)
expected.update(scsi_path)
self.assertDictContainsSubset(expected, result)
calls = [mock.call(ide_ctrl), mock.call(scsi_ctrl)]
mock_get_controller_paths.assert_has_calls(calls)
def test_get_physical_disk_paths_no_ide(self):
scsi_path = {mock.sentinel.SCSI_PATH: mock.sentinel.SCSI_HOST_RESOURCE}
scsi_ctrl = self.liveutils._vmutils.get_vm_scsi_controller.return_value
mock_get_controller_paths = (
self.liveutils._vmutils.get_controller_volume_paths)
self.liveutils._vmutils.get_vm_ide_controller.return_value = None
mock_get_controller_paths.return_value = scsi_path
result = self.liveutils._get_physical_disk_paths(mock.sentinel.VM_NAME)
self.assertEqual(scsi_path, result)
mock_get_controller_paths.assert_called_once_with(scsi_ctrl)
@mock.patch.object(livemigrationutils.volumeutilsv2, 'VolumeUtilsV2')
def test_get_remote_disk_data(self, mock_vol_utils_class):
mock_vol_utils_remote = mock_vol_utils_class.return_value
mock_vm_utils = mock.MagicMock()
disk_paths = {
mock.sentinel.FAKE_RASD_PATH: mock.sentinel.FAKE_DISK_PATH}
self.liveutils._volutils.get_target_from_disk_path.return_value = (
mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
mock_vol_utils_remote.get_device_number_for_target.return_value = (
mock.sentinel.FAKE_DEV_NUM)
mock_vm_utils.get_mounted_disk_by_drive_number.return_value = (
mock.sentinel.FAKE_DISK_PATH)
disk_paths = self.liveutils._get_remote_disk_data(
mock_vm_utils, disk_paths, mock.sentinel.FAKE_HOST)
self.liveutils._volutils.get_target_from_disk_path.assert_called_with(
mock.sentinel.FAKE_DISK_PATH)
mock_vol_utils_remote.get_device_number_for_target.assert_called_with(
mock.sentinel.FAKE_IQN, mock.sentinel.FAKE_LUN)
mock_vm_utils.get_mounted_disk_by_drive_number.assert_called_once_with(
mock.sentinel.FAKE_DEV_NUM)
self.assertEqual(
{mock.sentinel.FAKE_RASD_PATH: mock.sentinel.FAKE_DISK_PATH},
disk_paths)
def test_update_planned_vm_disk_resources(self):
mock_vm_utils = mock.MagicMock()
self._prepare_vm_mocks(self._RESOURCE_TYPE_DISK,
self._RESOURCE_SUB_TYPE_DISK)
mock_vm = self._conn.Msvm_ComputerSystem.return_value[0]
sasd = mock_vm.associators()[0].associators()[0]
mock_vsmsvc = self._conn.Msvm_VirtualSystemManagementService()[0]
self.liveutils._update_planned_vm_disk_resources(
mock_vm_utils, self._conn, mock_vm, mock.sentinel.FAKE_VM_NAME,
{sasd.path.return_value.RelPath: mock.sentinel.FAKE_RASD_PATH})
mock_vsmsvc.ModifyResourceSettings.assert_called_once_with(
ResourceSettings=[sasd.GetText_.return_value])
def test_get_vhd_setting_data(self):
self._prepare_vm_mocks(self._RESOURCE_TYPE_VHD,
self._RESOURCE_SUB_TYPE_VHD)
mock_vm = self._conn.Msvm_ComputerSystem.return_value[0]
mock_sasd = mock_vm.associators()[0].associators()[0]
vhd_sds = self.liveutils._get_vhd_setting_data(mock_vm)
self.assertEqual([mock_sasd.GetText_.return_value], vhd_sds)
def test_live_migrate_vm_helper(self):
mock_conn_local = mock.MagicMock()
mock_vm = mock.MagicMock()
mock_vsmsd = mock_conn_local.query()[0]
mock_vsmsvc = mock_conn_local.Msvm_VirtualSystemMigrationService()[0]
mock_vsmsvc.MigrateVirtualSystemToHost.return_value = (
self._FAKE_RET_VAL, mock.sentinel.FAKE_JOB_PATH)
self.liveutils._live_migrate_vm(
mock_conn_local, mock_vm, None,
[mock.sentinel.FAKE_REMOTE_IP_ADDR],
mock.sentinel.FAKE_RASD_PATH, mock.sentinel.FAKE_HOST)
mock_vsmsvc.MigrateVirtualSystemToHost.assert_called_once_with(
ComputerSystem=mock_vm.path_.return_value,
DestinationHost=mock.sentinel.FAKE_HOST,
MigrationSettingData=mock_vsmsd.GetText_.return_value,
NewResourceSettingData=mock.sentinel.FAKE_RASD_PATH)
@mock.patch.object(livemigrationutils, 'vmutilsv2')
def test_live_migrate_vm(self, mock_vm_utils):
mock_vm_utils_remote = mock_vm_utils.VMUtilsV2.return_value
mock_vm = self._get_vm()
mock_migr_svc = self._conn.Msvm_VirtualSystemMigrationService()[0]
mock_migr_svc.MigrationServiceListenerIPAddressList = [
mock.sentinel.FAKE_REMOTE_IP_ADDR]
# patches, call and assertions.
with mock.patch.multiple(
self.liveutils,
_destroy_planned_vm=mock.DEFAULT,
_get_physical_disk_paths=mock.DEFAULT,
_get_remote_disk_data=mock.DEFAULT,
_create_remote_planned_vm=mock.DEFAULT,
_update_planned_vm_disk_resources=mock.DEFAULT,
_get_vhd_setting_data=mock.DEFAULT,
_live_migrate_vm=mock.DEFAULT):
disk_paths = {
mock.sentinel.FAKE_IDE_PATH: mock.sentinel.FAKE_SASD_RESOURCE}
self.liveutils._get_physical_disk_paths.return_value = disk_paths
mock_disk_paths = [mock.sentinel.FAKE_DISK_PATH]
self.liveutils._get_remote_disk_data.return_value = (
mock_disk_paths)
self.liveutils._create_remote_planned_vm.return_value = mock_vm
self.liveutils.live_migrate_vm(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_HOST)
self.liveutils._get_remote_disk_data.assert_called_once_with(
mock_vm_utils_remote, disk_paths, mock.sentinel.FAKE_HOST)
self.liveutils._create_remote_planned_vm.assert_called_once_with(
self._conn, self._conn, mock_vm,
[mock.sentinel.FAKE_REMOTE_IP_ADDR], mock.sentinel.FAKE_HOST)
mocked_method = self.liveutils._update_planned_vm_disk_resources
mocked_method.assert_called_once_with(
mock_vm_utils_remote, self._conn, mock_vm,
mock.sentinel.FAKE_VM_NAME, mock_disk_paths)
self.liveutils._live_migrate_vm.assert_called_once_with(
self._conn, mock_vm, mock_vm,
[mock.sentinel.FAKE_REMOTE_IP_ADDR],
self.liveutils._get_vhd_setting_data.return_value,
mock.sentinel.FAKE_HOST)
def _prepare_vm_mocks(self, resource_type, resource_sub_type):
mock_vm_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vm = self._get_vm()
self._conn.Msvm_PlannedComputerSystem.return_value = [vm]
mock_vm_svc.DestroySystem.return_value = (mock.sentinel.FAKE_JOB_PATH,
self._FAKE_RET_VAL)
mock_vm_svc.ModifyResourceSettings.return_value = (
None, mock.sentinel.FAKE_JOB_PATH, self._FAKE_RET_VAL)
sasd = mock.MagicMock()
other_sasd = mock.MagicMock()
sasd.ResourceType = resource_type
sasd.ResourceSubType = resource_sub_type
sasd.HostResource = [mock.sentinel.FAKE_SASD_RESOURCE]
sasd.path.return_value.RelPath = mock.sentinel.FAKE_DISK_PATH
vm_settings = mock.MagicMock()
vm.associators.return_value = [vm_settings]
vm_settings.associators.return_value = [sasd, other_sasd]
def _get_vm(self):
mock_vm = mock.MagicMock()
self._conn.Msvm_ComputerSystem.return_value = [mock_vm]
mock_vm.path_.return_value = mock.sentinel.FAKE_VM_PATH
return mock_vm

View File

@@ -0,0 +1,74 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import migrationops
from nova.virt.hyperv import vmutils
class MigrationOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V MigrationOps class."""
_FAKE_TIMEOUT = 10
_FAKE_RETRY_INTERVAL = 5
def setUp(self):
super(MigrationOpsTestCase, self).setUp()
self.context = 'fake-context'
self._migrationops = migrationops.MigrationOps()
self._migrationops._vmops = mock.MagicMock()
self._migrationops._vmutils = mock.MagicMock()
self._migrationops._pathutils = mock.Mock()
def test_check_and_attach_config_drive_unknown_path(self):
instance = fake_instance.fake_instance_obj(self.context,
expected_attrs=['system_metadata'])
instance.config_drive = 'True'
self._migrationops._pathutils.lookup_configdrive_path.return_value = (
None)
self.assertRaises(vmutils.HyperVException,
self._migrationops._check_and_attach_config_drive,
instance,
mock.sentinel.FAKE_VM_GEN)
@mock.patch.object(migrationops.MigrationOps, '_migrate_disk_files')
@mock.patch.object(migrationops.MigrationOps, '_check_target_flavor')
def test_migrate_disk_and_power_off(self, mock_check_flavor,
mock_migrate_disk_files):
instance = fake_instance.fake_instance_obj(self.context)
flavor = mock.MagicMock()
network_info = mock.MagicMock()
disk_files = [mock.MagicMock()]
volume_drives = [mock.MagicMock()]
mock_get_vm_st_path = self._migrationops._vmutils.get_vm_storage_paths
mock_get_vm_st_path.return_value = (disk_files, volume_drives)
self._migrationops.migrate_disk_and_power_off(
self.context, instance, mock.sentinel.FAKE_DEST, flavor,
network_info, None, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
mock_check_flavor.assert_called_once_with(instance, flavor)
self._migrationops._vmops.power_off.assert_called_once_with(
instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
mock_get_vm_st_path.assert_called_once_with(instance.name)
mock_migrate_disk_files.assert_called_once_with(
instance.name, disk_files, mock.sentinel.FAKE_DEST)
self._migrationops._vmops.destroy.assert_called_once_with(
instance, destroy_disks=False)

View File

@@ -0,0 +1,82 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import vmutils
class NetworkUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V NetworkUtils class."""
_FAKE_PORT = {'Name': mock.sentinel.FAKE_PORT_NAME}
_FAKE_RET_VALUE = 0
_MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualSwitch'
def setUp(self):
self._networkutils = networkutils.NetworkUtils()
self._networkutils._conn = mock.MagicMock()
super(NetworkUtilsTestCase, self).setUp()
def test_get_external_vswitch(self):
mock_vswitch = mock.MagicMock()
mock_vswitch.path_.return_value = mock.sentinel.FAKE_VSWITCH_PATH
getattr(self._networkutils._conn,
self._MSVM_VIRTUAL_SWITCH).return_value = [mock_vswitch]
switch_path = self._networkutils.get_external_vswitch(
mock.sentinel.FAKE_VSWITCH_NAME)
self.assertEqual(mock.sentinel.FAKE_VSWITCH_PATH, switch_path)
def test_get_external_vswitch_not_found(self):
self._networkutils._conn.Msvm_VirtualEthernetSwitch.return_value = []
self.assertRaises(vmutils.HyperVException,
self._networkutils.get_external_vswitch,
mock.sentinel.FAKE_VSWITCH_NAME)
def test_get_external_vswitch_no_name(self):
mock_vswitch = mock.MagicMock()
mock_vswitch.path_.return_value = mock.sentinel.FAKE_VSWITCH_PATH
mock_ext_port = self._networkutils._conn.Msvm_ExternalEthernetPort()[0]
self._prepare_external_port(mock_vswitch, mock_ext_port)
switch_path = self._networkutils.get_external_vswitch(None)
self.assertEqual(mock.sentinel.FAKE_VSWITCH_PATH, switch_path)
def _prepare_external_port(self, mock_vswitch, mock_ext_port):
mock_lep = mock_ext_port.associators()[0]
mock_lep.associators.return_value = [mock_vswitch]
def test_create_vswitch_port(self):
svc = self._networkutils._conn.Msvm_VirtualSwitchManagementService()[0]
svc.CreateSwitchPort.return_value = (
self._FAKE_PORT, self._FAKE_RET_VALUE)
port = self._networkutils.create_vswitch_port(
mock.sentinel.FAKE_VSWITCH_PATH, mock.sentinel.FAKE_PORT_NAME)
svc.CreateSwitchPort.assert_called_once_with(
Name=mock.ANY, FriendlyName=mock.sentinel.FAKE_PORT_NAME,
ScopeOfResidence="", VirtualSwitch=mock.sentinel.FAKE_VSWITCH_PATH)
self.assertEqual(self._FAKE_PORT, port)
def test_vswitch_port_needed(self):
self.assertTrue(self._networkutils.vswitch_port_needed())

View File

@@ -0,0 +1,45 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_networkutils
from nova.virt.hyperv import networkutilsv2
class NetworkUtilsV2TestCase(test_networkutils.NetworkUtilsTestCase):
"""Unit tests for the Hyper-V NetworkUtilsV2 class."""
_MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
def setUp(self):
super(NetworkUtilsV2TestCase, self).setUp()
self._networkutils = networkutilsv2.NetworkUtilsV2()
self._networkutils._conn = mock.MagicMock()
def _prepare_external_port(self, mock_vswitch, mock_ext_port):
mock_lep = mock_ext_port.associators()[0]
mock_lep1 = mock_lep.associators()[0]
mock_esw = mock_lep1.associators()[0]
mock_esw.associators.return_value = [mock_vswitch]
def test_create_vswitch_port(self):
self.assertRaises(
NotImplementedError,
self._networkutils.create_vswitch_port,
mock.sentinel.FAKE_VSWITCH_PATH,
mock.sentinel.FAKE_PORT_NAME)
def test_vswitch_port_needed(self):
self.assertFalse(self._networkutils.vswitch_port_needed())

View File

@@ -0,0 +1,153 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vmutils
class PathUtilsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V PathUtils class."""
def setUp(self):
super(PathUtilsTestCase, self).setUp()
self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir')
self.fake_instance_name = 'fake_instance_name'
self._pathutils = pathutils.PathUtils()
def _mock_lookup_configdrive_path(self, ext):
self._pathutils.get_instance_dir = mock.MagicMock(
return_value=self.fake_instance_dir)
def mock_exists(*args, **kwargs):
path = args[0]
return True if path[(path.rfind('.') + 1):] == ext else False
self._pathutils.exists = mock_exists
configdrive_path = self._pathutils.lookup_configdrive_path(
self.fake_instance_name)
return configdrive_path
def test_lookup_configdrive_path(self):
for format_ext in constants.DISK_FORMAT_MAP:
configdrive_path = self._mock_lookup_configdrive_path(format_ext)
fake_path = os.path.join(self.fake_instance_dir,
'configdrive.' + format_ext)
self.assertEqual(configdrive_path, fake_path)
def test_lookup_configdrive_path_non_exist(self):
self._pathutils.get_instance_dir = mock.MagicMock(
return_value=self.fake_instance_dir)
self._pathutils.exists = mock.MagicMock(return_value=False)
configdrive_path = self._pathutils.lookup_configdrive_path(
self.fake_instance_name)
self.assertIsNone(configdrive_path)
@mock.patch.object(pathutils.PathUtils, 'unmount_smb_share')
@mock.patch('os.path.exists')
def _test_check_smb_mapping(self, mock_exists, mock_unmount_smb_share,
existing_mappings=True, share_available=False):
mock_exists.return_value = share_available
fake_mappings = (
[mock.sentinel.smb_mapping] if existing_mappings else [])
self._pathutils._smb_conn.Msft_SmbMapping.return_value = (
fake_mappings)
ret_val = self._pathutils.check_smb_mapping(
mock.sentinel.share_path)
self.assertEqual(existing_mappings and share_available, ret_val)
if existing_mappings and not share_available:
mock_unmount_smb_share.assert_called_once_with(
mock.sentinel.share_path, force=True)
def test_check_mapping(self):
self._test_check_smb_mapping()
def test_remake_unavailable_mapping(self):
self._test_check_smb_mapping(existing_mappings=True,
share_available=False)
def test_available_mapping(self):
self._test_check_smb_mapping(existing_mappings=True,
share_available=True)
def test_mount_smb_share(self):
fake_create = self._pathutils._smb_conn.Msft_SmbMapping.Create
self._pathutils.mount_smb_share(mock.sentinel.share_path,
mock.sentinel.username,
mock.sentinel.password)
fake_create.assert_called_once_with(
RemotePath=mock.sentinel.share_path,
UserName=mock.sentinel.username,
Password=mock.sentinel.password)
def _test_unmount_smb_share(self, force=False):
fake_mapping = mock.Mock()
smb_mapping_class = self._pathutils._smb_conn.Msft_SmbMapping
smb_mapping_class.return_value = [fake_mapping]
self._pathutils.unmount_smb_share(mock.sentinel.share_path,
force)
smb_mapping_class.assert_called_once_with(
RemotePath=mock.sentinel.share_path)
fake_mapping.Remove.assert_called_once_with(Force=force)
def test_soft_unmount_smb_share(self):
self._test_unmount_smb_share()
def test_force_unmount_smb_share(self):
self._test_unmount_smb_share(force=True)
@mock.patch('os.path.join')
def test_get_instances_sub_dir(self, fake_path_join):
class WindowsError(Exception):
def __init__(self, winerror=None):
self.winerror = winerror
fake_dir_name = "fake_dir_name"
fake_windows_error = WindowsError
self._pathutils._check_create_dir = mock.MagicMock(
side_effect=WindowsError(pathutils.ERROR_INVALID_NAME))
with mock.patch('__builtin__.WindowsError',
fake_windows_error, create=True):
self.assertRaises(vmutils.HyperVException,
self._pathutils._get_instances_sub_dir,
fake_dir_name)
@mock.patch.object(pathutils.PathUtils, 'get_configdrive_path')
@mock.patch.object(pathutils.PathUtils, 'copyfile')
def test_copy_configdrive(self, mock_copyfile, mock_get_configdrive_path):
mock_get_configdrive_path.side_effect = [mock.sentinel.FAKE_LOCAL_PATH,
mock.sentinel.FAKE_REMOTE_PATH
]
self._pathutils.copy_configdrive(self.fake_instance_name,
mock.sentinel.DEST_HOST)
mock_get_configdrive_path.assert_has_calls(
[mock.call(self.fake_instance_name, constants.DVD_FORMAT),
mock.call(self.fake_instance_name, constants.DVD_FORMAT,
remote_server=mock.sentinel.DEST_HOST)])
mock_copyfile.assert_called_once_with(mock.sentinel.FAKE_LOCAL_PATH,
mock.sentinel.FAKE_REMOTE_PATH)

View File

@@ -0,0 +1,28 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.virt.hyperv import rdpconsoleutils
class RDPConsoleUtilsTestCase(test.NoDBTestCase):
def setUp(self):
self._rdpconsoleutils = rdpconsoleutils.RDPConsoleUtils()
super(RDPConsoleUtilsTestCase, self).setUp()
def test_get_rdp_console_port(self):
listener_port = self._rdpconsoleutils.get_rdp_console_port()
self.assertEqual(self._rdpconsoleutils._DEFAULT_HYPERV_RDP_PORT,
listener_port)

View File

@@ -0,0 +1,37 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import rdpconsoleutilsv2
class RDPConsoleUtilsV2TestCase(test.NoDBTestCase):
_FAKE_RDP_PORT = 1000
def setUp(self):
self._rdpconsoleutils = rdpconsoleutilsv2.RDPConsoleUtilsV2()
self._rdpconsoleutils._conn = mock.MagicMock()
super(RDPConsoleUtilsV2TestCase, self).setUp()
def test_get_rdp_console_port(self):
conn = self._rdpconsoleutils._conn
mock_rdp_setting_data = conn.Msvm_TerminalServiceSettingData()[0]
mock_rdp_setting_data.ListenerPort = self._FAKE_RDP_PORT
listener_port = self._rdpconsoleutils.get_rdp_console_port()
self.assertEqual(self._FAKE_RDP_PORT, listener_port)

View File

@@ -0,0 +1,121 @@
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from nova.compute import task_states
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import snapshotops
class SnapshotOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V SnapshotOps class."""
def setUp(self):
super(SnapshotOpsTestCase, self).setUp()
self.context = 'fake_context'
self._snapshotops = snapshotops.SnapshotOps()
self._snapshotops._pathutils = mock.MagicMock()
self._snapshotops._vmutils = mock.MagicMock()
self._snapshotops._vhdutils = mock.MagicMock()
@mock.patch('nova.image.glance.get_remote_image_service')
def test_save_glance_image(self, mock_get_remote_image_service):
image_metadata = {"is_public": False,
"disk_format": "vhd",
"container_format": "bare",
"properties": {}}
glance_image_service = mock.MagicMock()
mock_get_remote_image_service.return_value = (glance_image_service,
mock.sentinel.IMAGE_ID)
self._snapshotops._save_glance_image(context=self.context,
image_id=mock.sentinel.IMAGE_ID,
image_vhd_path=mock.sentinel.PATH)
mock_get_remote_image_service.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID)
self._snapshotops._pathutils.open.assert_called_with(
mock.sentinel.PATH, 'rb')
glance_image_service.update.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, image_metadata,
self._snapshotops._pathutils.open().__enter__())
@mock.patch('nova.virt.hyperv.snapshotops.SnapshotOps._save_glance_image')
def _test_snapshot(self, mock_save_glance_image, base_disk_path):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_update = mock.MagicMock()
fake_src_path = os.path.join('fake', 'path')
self._snapshotops._pathutils.lookup_root_vhd_path.return_value = (
fake_src_path)
fake_exp_dir = os.path.join(os.path.join('fake', 'exp'), 'dir')
self._snapshotops._pathutils.get_export_dir.return_value = fake_exp_dir
self._snapshotops._vhdutils.get_vhd_parent_path.return_value = (
base_disk_path)
fake_snapshot_path = (
self._snapshotops._vmutils.take_vm_snapshot.return_value)
self._snapshotops.snapshot(context=self.context,
instance=mock_instance,
image_id=mock.sentinel.IMAGE_ID,
update_task_state=mock_update)
self._snapshotops._vmutils.take_vm_snapshot.assert_called_once_with(
mock_instance.name)
mock_lookup_path = self._snapshotops._pathutils.lookup_root_vhd_path
mock_lookup_path.assert_called_once_with(mock_instance.name)
mock_get_vhd_path = self._snapshotops._vhdutils.get_vhd_parent_path
mock_get_vhd_path.assert_called_once_with(fake_src_path)
self._snapshotops._pathutils.get_export_dir.assert_called_once_with(
mock_instance.name)
expected = [mock.call(fake_src_path,
os.path.join(fake_exp_dir,
os.path.basename(fake_src_path)))]
dest_vhd_path = os.path.join(fake_exp_dir,
os.path.basename(fake_src_path))
if base_disk_path:
basename = os.path.basename(base_disk_path)
base_dest_disk_path = os.path.join(fake_exp_dir, basename)
expected.append(mock.call(base_disk_path, base_dest_disk_path))
mock_reconnect = self._snapshotops._vhdutils.reconnect_parent_vhd
mock_reconnect.assert_called_once_with(dest_vhd_path,
base_dest_disk_path)
self._snapshotops._vhdutils.merge_vhd.assert_called_once_with(
dest_vhd_path, base_dest_disk_path)
mock_save_glance_image.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, base_dest_disk_path)
else:
mock_save_glance_image.assert_called_once_with(
self.context, mock.sentinel.IMAGE_ID, dest_vhd_path)
self._snapshotops._pathutils.copyfile.has_calls(expected)
expected_update = [
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)]
mock_update.has_calls(expected_update)
self._snapshotops._vmutils.remove_vm_snapshot.assert_called_once_with(
fake_snapshot_path)
self._snapshotops._pathutils.rmtree.assert_called_once_with(
fake_exp_dir)
def test_snapshot(self):
base_disk_path = os.path.join('fake', 'disk')
self._test_snapshot(base_disk_path=base_disk_path)
def test_snapshot_no_base_disk(self):
self._test_snapshot(base_disk_path=None)

View File

@@ -0,0 +1,57 @@
# Copyright 2014 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the Hyper-V utils factory.
"""
import mock
from oslo_config import cfg
from nova import test
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
CONF = cfg.CONF
class TestHyperVUtilsFactory(test.NoDBTestCase):
def test_get_vmutils_force_v1_and_min_version(self):
self._test_returned_class(None, True, True)
def test_get_vmutils_v2(self):
self._test_returned_class(vmutilsv2.VMUtilsV2, False, True)
def test_get_vmutils_v2_r2(self):
self._test_returned_class(vmutils.VMUtils, False, False)
def test_get_vmutils_force_v1_and_not_min_version(self):
self._test_returned_class(vmutils.VMUtils, True, False)
def _test_returned_class(self, expected_class, force_v1, os_supports_v2):
CONF.set_override('force_hyperv_utils_v1', force_v1, 'hyperv')
with mock.patch.object(
hostutils.HostUtils,
'check_min_windows_version') as mock_check_min_windows_version:
mock_check_min_windows_version.return_value = os_supports_v2
if os_supports_v2 and force_v1:
self.assertRaises(vmutils.HyperVException,
utilsfactory.get_vmutils)
else:
actual_class = type(utilsfactory.get_vmutils())
self.assertEqual(actual_class, expected_class)

View File

@@ -0,0 +1,281 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import units
from nova import test
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
class VHDUtilsBaseTestCase(test.NoDBTestCase):
"Base Class unit test classes of Hyper-V VHD Utils classes."
_FAKE_VHD_PATH = "C:\\fake_path.vhdx"
_FAKE_PARENT_PATH = "C:\\fake_parent_path.vhdx"
_FAKE_FORMAT = 3
_FAKE_TYPE = 3
_FAKE_MAX_INTERNAL_SIZE = units.Gi
_FAKE_DYNAMIC_BLK_SIZE = 2097152L
_FAKE_BAD_TYPE = 5
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_VHD_INFO_XML = (
"""<INSTANCE CLASSNAME="Msvm_VirtualHardDiskSettingData">
<PROPERTY NAME="BlockSize" TYPE="uint32">
<VALUE>33554432</VALUE>
</PROPERTY>
<PROPERTY NAME="Caption" TYPE="string">
<VALUE>Virtual Hard Disk Setting Data</VALUE>
</PROPERTY>
<PROPERTY NAME="Description" TYPE="string">
<VALUE>Setting Data for a Virtual Hard Disk.</VALUE>
</PROPERTY>
<PROPERTY NAME="ElementName" TYPE="string">
<VALUE>fake_path.vhdx</VALUE>
</PROPERTY>
<PROPERTY NAME="Format" TYPE="uint16">
<VALUE>%(format)s</VALUE>
</PROPERTY>
<PROPERTY NAME="InstanceID" TYPE="string">
<VALUE>52794B89-AC06-4349-AC57-486CAAD52F69</VALUE>
</PROPERTY>
<PROPERTY NAME="LogicalSectorSize" TYPE="uint32">
<VALUE>4096</VALUE>
</PROPERTY>
<PROPERTY NAME="MaxInternalSize" TYPE="uint64">
<VALUE>%(max_internal_size)s</VALUE>
</PROPERTY>
<PROPERTY NAME="ParentPath" TYPE="string">
<VALUE>%(parent_path)s</VALUE>
</PROPERTY>
<PROPERTY NAME="Path" TYPE="string">
<VALUE>%(path)s</VALUE>
</PROPERTY>
<PROPERTY NAME="PhysicalSectorSize" TYPE="uint32">
<VALUE>4096</VALUE>
</PROPERTY>
<PROPERTY NAME="Type" TYPE="uint16">
<VALUE>%(type)s</VALUE>
</PROPERTY>
</INSTANCE>""" % {'path': _FAKE_VHD_PATH,
'parent_path': _FAKE_PARENT_PATH,
'format': _FAKE_FORMAT,
'max_internal_size': _FAKE_MAX_INTERNAL_SIZE,
'type': _FAKE_TYPE})
class VHDUtilsTestCase(VHDUtilsBaseTestCase):
"""Unit tests for the Hyper-V VHDUtils class."""
def setUp(self):
super(VHDUtilsTestCase, self).setUp()
self._vhdutils = vhdutils.VHDUtils()
self._vhdutils._conn = mock.MagicMock()
self._vhdutils._vmutils = mock.MagicMock()
self._fake_vhd_info = {
'ParentPath': self._FAKE_PARENT_PATH,
'MaxInternalSize': self._FAKE_MAX_INTERNAL_SIZE,
'Type': self._FAKE_TYPE}
def test_validate_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ValidateVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.validate_vhd(self._FAKE_VHD_PATH)
mock_img_svc.ValidateVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH)
def test_get_vhd_info(self):
self._mock_get_vhd_info()
vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH)
self.assertEqual(self._fake_vhd_info, vhd_info)
def _mock_get_vhd_info(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.GetVirtualHardDiskInfo.return_value = (
self._FAKE_VHD_INFO_XML, self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
def test_create_dynamic_vhd(self):
self._vhdutils.get_vhd_info = mock.MagicMock(
return_value={'Format': self._FAKE_FORMAT})
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.CreateDynamicVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE,
constants.DISK_FORMAT_VHD)
mock_img_svc.CreateDynamicVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
MaxInternalSize=self._FAKE_MAX_INTERNAL_SIZE)
self._vhdutils._vmutils.check_ret_val.assert_called_once_with(
self._FAKE_RET_VAL, self._FAKE_JOB_PATH)
def test_reconnect_parent_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ReconnectParentVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.reconnect_parent_vhd(self._FAKE_VHD_PATH,
self._FAKE_PARENT_PATH)
mock_img_svc.ReconnectParentVirtualHardDisk.assert_called_once_with(
ChildPath=self._FAKE_VHD_PATH,
ParentPath=self._FAKE_PARENT_PATH,
Force=True)
self._vhdutils._vmutils.check_ret_val.assert_called_once_with(
self._FAKE_RET_VAL, self._FAKE_JOB_PATH)
def test_merge_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.MergeVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.merge_vhd(self._FAKE_VHD_PATH, self._FAKE_VHD_PATH)
mock_img_svc.MergeVirtualHardDisk.assert_called_once_with(
SourcePath=self._FAKE_VHD_PATH,
DestinationPath=self._FAKE_VHD_PATH)
self._vhdutils._vmutils.check_ret_val.assert_called_once_with(
self._FAKE_RET_VAL, self._FAKE_JOB_PATH)
def test_resize_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ExpandVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock(
return_value=self._FAKE_MAX_INTERNAL_SIZE)
self._vhdutils.resize_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE)
mock_img_svc.ExpandVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
MaxInternalSize=self._FAKE_MAX_INTERNAL_SIZE)
self._vhdutils._vmutils.check_ret_val.assert_called_once_with(
self._FAKE_RET_VAL, self._FAKE_JOB_PATH)
def _mocked_get_internal_vhd_size(self, root_vhd_size, vhd_type):
mock_get_vhd_info = mock.MagicMock(return_value={'Type': vhd_type})
mock_get_blk_size = mock.MagicMock(
return_value=self._FAKE_DYNAMIC_BLK_SIZE)
with mock.patch.multiple(self._vhdutils,
get_vhd_info=mock_get_vhd_info,
_get_vhd_dynamic_blk_size=mock_get_blk_size):
return self._vhdutils.get_internal_vhd_size_by_file_size(
None, root_vhd_size)
def test_create_differencing_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.CreateDifferencingVirtualHardDisk.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
self._FAKE_PARENT_PATH)
mock_img_svc.CreateDifferencingVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
ParentPath=self._FAKE_PARENT_PATH)
def test_get_internal_vhd_size_by_file_size_fixed(self):
root_vhd_size = 1 * 1024 ** 3
real_size = self._mocked_get_internal_vhd_size(
root_vhd_size, constants.VHD_TYPE_FIXED)
expected_vhd_size = 1 * 1024 ** 3 - 512
self.assertEqual(expected_vhd_size, real_size)
def test_get_internal_vhd_size_by_file_size_dynamic(self):
root_vhd_size = 20 * 1024 ** 3
real_size = self._mocked_get_internal_vhd_size(
root_vhd_size, constants.VHD_TYPE_DYNAMIC)
expected_vhd_size = 20 * 1024 ** 3 - 43008
self.assertEqual(expected_vhd_size, real_size)
def test_get_internal_vhd_size_by_file_size_differencing(self):
# For differencing images, the internal size of the parent vhd
# is returned
vhdutil = vhdutils.VHDUtils()
root_vhd_size = 20 * 1024 ** 3
vhdutil.get_vhd_info = mock.MagicMock()
vhdutil.get_vhd_parent_path = mock.MagicMock()
vhdutil.get_vhd_parent_path.return_value = self._FAKE_VHD_PATH
vhdutil.get_vhd_info.side_effect = [
{'Type': 4}, {'Type': constants.VHD_TYPE_DYNAMIC}]
vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock()
vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152
real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
root_vhd_size)
expected_vhd_size = 20 * 1024 ** 3 - 43008
self.assertEqual(expected_vhd_size, real_size)
def test_get_vhd_format_vhdx(self):
with mock.patch('nova.virt.hyperv.vhdutils.open',
mock.mock_open(read_data=vhdutils.VHDX_SIGNATURE),
create=True):
format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
self.assertEqual(constants.DISK_FORMAT_VHDX, format)
def test_get_vhd_format_vhd(self):
with mock.patch('nova.virt.hyperv.vhdutils.open',
mock.mock_open(read_data=vhdutils.VHD_SIGNATURE),
create=True) as mock_open:
f = mock_open.return_value
f.tell.return_value = 1024
format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
self.assertEqual(constants.DISK_FORMAT_VHD, format)
def test_get_vhd_format_invalid_format(self):
with mock.patch('nova.virt.hyperv.vhdutils.open',
mock.mock_open(read_data='invalid'),
create=True) as mock_open:
f = mock_open.return_value
f.tell.return_value = 1024
self.assertRaises(vmutils.HyperVException,
self._vhdutils.get_vhd_format,
self._FAKE_VHD_PATH)
def test_get_vhd_format_zero_length_file(self):
with mock.patch('nova.virt.hyperv.vhdutils.open',
mock.mock_open(read_data=''),
create=True) as mock_open:
f = mock_open.return_value
f.tell.return_value = 0
self.assertRaises(vmutils.HyperVException,
self._vhdutils.get_vhd_format,
self._FAKE_VHD_PATH)
f.seek.assert_called_once_with(0, 2)
def test_get_supported_vhd_format(self):
fmt = self._vhdutils.get_best_supported_vhd_format()
self.assertEqual(constants.DISK_FORMAT_VHD, fmt)

View File

@@ -0,0 +1,244 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_vhdutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutilsv2
from nova.virt.hyperv import vmutils
class VHDUtilsV2TestCase(test_vhdutils.VHDUtilsBaseTestCase):
"""Unit tests for the Hyper-V VHDUtilsV2 class."""
_FAKE_BLOCK_SIZE = 33554432L
_FAKE_LOG_SIZE = 1048576
_FAKE_LOGICAL_SECTOR_SIZE = 4096
_FAKE_METADATA_SIZE = 1048576
_FAKE_PHYSICAL_SECTOR_SIZE = 4096L
def setUp(self):
super(VHDUtilsV2TestCase, self).setUp()
self._vhdutils = vhdutilsv2.VHDUtilsV2()
self._vhdutils._conn = mock.MagicMock()
self._vhdutils._vmutils = mock.MagicMock()
self._fake_file_handle = mock.MagicMock()
self._fake_vhd_info = {
'Path': self._FAKE_VHD_PATH,
'ParentPath': self._FAKE_PARENT_PATH,
'Format': self._FAKE_FORMAT,
'MaxInternalSize': self._FAKE_MAX_INTERNAL_SIZE,
'Type': self._FAKE_TYPE,
'BlockSize': self._FAKE_BLOCK_SIZE,
'LogicalSectorSize': self._FAKE_LOGICAL_SECTOR_SIZE,
'PhysicalSectorSize': self._FAKE_PHYSICAL_SECTOR_SIZE}
def _mock_get_vhd_info(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.GetVirtualHardDiskSettingData.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL, self._FAKE_VHD_INFO_XML)
def test_get_vhd_info(self):
self._mock_get_vhd_info()
vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH)
self.assertEqual(self._FAKE_VHD_PATH, vhd_info['Path'])
self.assertEqual(self._FAKE_PARENT_PATH, vhd_info['ParentPath'])
self.assertEqual(self._FAKE_FORMAT, vhd_info['Format'])
self.assertEqual(self._FAKE_MAX_INTERNAL_SIZE,
vhd_info['MaxInternalSize'])
self.assertEqual(self._FAKE_TYPE, vhd_info['Type'])
def test_get_vhd_info_no_parent(self):
fake_vhd_xml_no_parent = self._FAKE_VHD_INFO_XML.replace(
self._FAKE_PARENT_PATH, "")
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.GetVirtualHardDiskSettingData.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL, fake_vhd_xml_no_parent)
vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH)
self.assertEqual(self._FAKE_VHD_PATH, vhd_info['Path'])
self.assertIsNone(vhd_info['ParentPath'])
self.assertEqual(self._FAKE_FORMAT, vhd_info['Format'])
self.assertEqual(self._FAKE_MAX_INTERNAL_SIZE,
vhd_info['MaxInternalSize'])
self.assertEqual(self._FAKE_TYPE, vhd_info['Type'])
def test_create_dynamic_vhd(self):
self._vhdutils.get_vhd_info = mock.MagicMock(
return_value={'Format': self._FAKE_FORMAT})
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE,
constants.DISK_FORMAT_VHDX)
self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called)
def test_create_differencing_vhd(self):
self._vhdutils.get_vhd_info = mock.MagicMock(
return_value={'ParentPath': self._FAKE_PARENT_PATH,
'Format': self._FAKE_FORMAT})
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
self._FAKE_PARENT_PATH)
self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called)
def test_reconnect_parent_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
fake_new_parent_path = 'fake_new_parent_path'
self._vhdutils._get_vhd_info_xml = mock.MagicMock(
return_value=self._FAKE_VHD_INFO_XML)
mock_img_svc.SetVirtualHardDiskSettingData.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.reconnect_parent_vhd(self._FAKE_VHD_PATH,
fake_new_parent_path)
expected_virt_disk_data = self._FAKE_VHD_INFO_XML.replace(
self._FAKE_PARENT_PATH, fake_new_parent_path)
mock_img_svc.SetVirtualHardDiskSettingData.assert_called_once_with(
VirtualDiskSettingData=expected_virt_disk_data)
def test_reconnect_parent_vhd_exception(self):
# Test that reconnect_parent_vhd raises an exception if the
# vhd info XML does not contain the ParentPath property.
fake_vhd_info_xml = self._FAKE_VHD_INFO_XML.replace('ParentPath',
'FakeParentPath')
self._vhdutils._get_vhd_info_xml = mock.Mock(
return_value=fake_vhd_info_xml)
self.assertRaises(vmutils.HyperVException,
self._vhdutils.reconnect_parent_vhd,
self._FAKE_VHD_PATH,
mock.sentinel.new_parent_path)
def test_resize_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ResizeVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock(
return_value=self._FAKE_MAX_INTERNAL_SIZE)
self._vhdutils.resize_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE)
mock_img_svc.ResizeVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
MaxInternalSize=self._FAKE_MAX_INTERNAL_SIZE)
self.mock_get = self._vhdutils.get_internal_vhd_size_by_file_size
self.mock_get.assert_called_once_with(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE)
def _test_get_vhdx_internal_size(self, vhd_type):
self._vhdutils.get_vhd_info = mock.MagicMock()
self._vhdutils.get_vhd_parent_path = mock.Mock(
return_value=self._FAKE_PARENT_PATH)
if vhd_type == 4:
self._vhdutils.get_vhd_info.side_effect = [
{'Type': vhd_type}, self._fake_vhd_info]
else:
self._vhdutils.get_vhd_info.return_value = self._fake_vhd_info
@mock.patch('nova.virt.hyperv.vhdutils.VHDUtils.get_vhd_format')
def test_get_vhdx_internal_size(self, mock_get_vhd_format):
mock_get_vhd_format.return_value = constants.DISK_FORMAT_VHDX
self._mock_get_vhd_info()
self._vhdutils._get_vhdx_log_size = mock.MagicMock(
return_value=self._FAKE_LOG_SIZE)
self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
return_value=(self._FAKE_METADATA_SIZE, 1024))
self._vhdutils._get_vhdx_block_size = mock.MagicMock(
return_value=self._FAKE_BLOCK_SIZE)
file_mock = mock.MagicMock()
with mock.patch('__builtin__.open', file_mock):
internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
self._FAKE_VHD_PATH, self._FAKE_MAX_INTERNAL_SIZE))
self.assertEqual(self._FAKE_MAX_INTERNAL_SIZE - self._FAKE_BLOCK_SIZE,
internal_size)
def test_get_vhdx_internal_size_dynamic(self):
self._test_get_vhdx_internal_size(3)
def test_get_vhdx_internal_size_differencing(self):
self._test_get_vhdx_internal_size(4)
def test_get_vhdx_current_header(self):
VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024]
fake_sequence_numbers = ['\x01\x00\x00\x00\x00\x00\x00\x00',
'\x02\x00\x00\x00\x00\x00\x00\x00']
self._fake_file_handle.read = mock.MagicMock(
side_effect=fake_sequence_numbers)
offset = self._vhdutils._get_vhdx_current_header_offset(
self._fake_file_handle)
self.assertEqual(offset, VHDX_HEADER_OFFSETS[1])
def test_get_vhdx_metadata_size(self):
fake_metadata_offset = '\x01\x00\x00\x00\x00\x00\x00\x00'
fake_metadata_size = '\x01\x00\x00\x00'
self._fake_file_handle.read = mock.MagicMock(
side_effect=[fake_metadata_offset, fake_metadata_size])
metadata_size, metadata_offset = (
self._vhdutils._get_vhdx_metadata_size_and_offset(
self._fake_file_handle))
self.assertEqual(metadata_size, 1)
self.assertEqual(metadata_offset, 1)
def test_get_block_size(self):
self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
return_value=(self._FAKE_METADATA_SIZE, 1024))
fake_block_size = '\x01\x00\x00\x00'
self._fake_file_handle.read = mock.MagicMock(
return_value=fake_block_size)
block_size = self._vhdutils._get_vhdx_block_size(
self._fake_file_handle)
self.assertEqual(block_size, 1)
def test_get_log_size(self):
fake_current_header_offset = 64 * 1024
self._vhdutils._get_vhdx_current_header_offset = mock.MagicMock(
return_value=fake_current_header_offset)
fake_log_size = '\x01\x00\x00\x00'
self._fake_file_handle.read = mock.MagicMock(
return_value=fake_log_size)
log_size = self._vhdutils._get_vhdx_log_size(self._fake_file_handle)
self.assertEqual(log_size, 1)
def test_get_supported_vhd_format(self):
fmt = self._vhdutils.get_best_supported_vhd_format()
self.assertEqual(constants.DISK_FORMAT_VHDX, fmt)

View File

@@ -0,0 +1,751 @@
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from eventlet import timeout as etimeout
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
from nova import exception
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmops
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
class VMOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V VMOps class."""
_FAKE_TIMEOUT = 2
FAKE_SIZE = 10
FAKE_DIR = 'fake_dir'
FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s'
FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso'
FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd'
ISO9660 = 'iso9660'
_FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd'
def __init__(self, test_case_name):
super(VMOpsTestCase, self).__init__(test_case_name)
def setUp(self):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
self._vmops = vmops.VMOps()
self._vmops._vmutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
def _prepare_create_root_vhd_mocks(self, use_cow_images, vhd_format,
vhd_size):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.root_gb = self.FAKE_SIZE
self.flags(use_cow_images=use_cow_images)
self._vmops._vhdutils.get_vhd_info.return_value = {'MaxInternalSize':
vhd_size * units.Gi}
self._vmops._vhdutils.get_vhd_format.return_value = vhd_format
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
get_size.return_value = root_vhd_internal_size
self._vmops._pathutils.exists.return_value = True
return mock_instance
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_exception(self, mock_get_cached_image,
vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE + 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
self.assertRaises(vmutils.VHDResizeException,
self._vmops._create_root_vhd, self.context,
mock_instance)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
self._vmops._pathutils.exists.assert_called_once_with(
fake_root_path)
self._vmops._pathutils.remove.assert_called_once_with(
fake_root_path)
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_qcow(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=True, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format)
differencing_vhd = self._vmops._vhdutils.create_differencing_vhd
differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path)
self._vmops._vhdutils.get_vhd_info.assert_called_once_with(
fake_vhd_path)
if vhd_format is constants.DISK_FORMAT_VHD:
self.assertFalse(get_size.called)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
get_size.assert_called_once_with(fake_vhd_path,
root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_vhd_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format)
self._vmops._pathutils.copyfile.assert_called_once_with(
fake_vhd_path, fake_root_path)
get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
def test_create_root_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhd_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhdx_size_less_than_internal(self):
self._test_create_root_vhd_exception(
vhd_format=constants.DISK_FORMAT_VHD)
def test_is_resize_needed_exception(self):
inst = mock.MagicMock()
self.assertRaises(
vmutils.VHDResizeException, self._vmops._is_resize_needed,
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst)
def test_is_resize_needed_true(self):
inst = mock.MagicMock()
self.assertTrue(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst))
def test_is_resize_needed_false(self):
inst = mock.MagicMock()
self.assertFalse(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst))
def test_create_ephemeral_vhd(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.ephemeral_gb = self.FAKE_SIZE
best_supported = self._vmops._vhdutils.get_best_supported_vhd_format
best_supported.return_value = mock.sentinel.FAKE_FORMAT
self._vmops._pathutils.get_ephemeral_vhd_path.return_value = (
mock.sentinel.FAKE_PATH)
response = self._vmops.create_ephemeral_vhd(instance=mock_instance)
self._vmops._pathutils.get_ephemeral_vhd_path.assert_called_with(
mock_instance.name, mock.sentinel.FAKE_FORMAT)
self._vmops._vhdutils.create_dynamic_vhd.assert_called_with(
mock.sentinel.FAKE_PATH, mock_instance.ephemeral_gb * units.Gi,
mock.sentinel.FAKE_FORMAT)
self.assertEqual(mock.sentinel.FAKE_PATH, response)
@mock.patch('nova.virt.hyperv.vmops.VMOps.destroy')
@mock.patch('nova.virt.hyperv.vmops.VMOps.power_on')
@mock.patch('nova.virt.hyperv.vmops.VMOps.attach_config_drive')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_config_drive')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('nova.virt.hyperv.vmops.VMOps.create_instance')
@mock.patch('nova.virt.hyperv.vmops.VMOps.get_image_vm_generation')
@mock.patch('nova.virt.hyperv.vmops.VMOps.create_ephemeral_vhd')
@mock.patch('nova.virt.hyperv.vmops.VMOps._create_root_vhd')
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.'
'ebs_root_in_block_devices')
@mock.patch('nova.virt.hyperv.vmops.VMOps._delete_disk_files')
def _test_spawn(self, mock_delete_disk_files,
mock_ebs_root_in_block_devices, mock_create_root_vhd,
mock_create_ephemeral_vhd, mock_get_image_vm_gen,
mock_create_instance, mock_configdrive_required,
mock_create_config_drive, mock_attach_config_drive,
mock_power_on, mock_destroy, exists, boot_from_volume,
configdrive_required, fail):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_image_meta = mock.MagicMock()
fake_root_path = mock_create_root_vhd.return_value
fake_root_path = None if boot_from_volume else fake_root_path
fake_ephemeral_path = mock_create_ephemeral_vhd.return_value
fake_vm_gen = mock_get_image_vm_gen.return_value
fake_config_drive_path = mock_create_config_drive.return_value
self._vmops._vmutils.vm_exists.return_value = exists
mock_ebs_root_in_block_devices.return_value = boot_from_volume
mock_create_root_vhd.return_value = fake_root_path
mock_configdrive_required.return_value = configdrive_required
mock_create_instance.side_effect = fail
if exists:
self.assertRaises(exception.InstanceExists, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
elif fail is vmutils.HyperVException:
self.assertRaises(vmutils.HyperVException, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
mock_destroy.assert_called_once_with(mock_instance)
else:
self._vmops.spawn(self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
mock_ebs_root_in_block_devices.assert_called_once_with(
mock.sentinel.DEV_INFO)
if not boot_from_volume:
mock_create_root_vhd.assert_called_once_with(self.context,
mock_instance)
mock_create_ephemeral_vhd.assert_called_once_with(mock_instance)
mock_get_image_vm_gen.assert_called_once_with(fake_root_path,
mock_image_meta)
mock_create_instance.assert_called_once_with(
mock_instance, mock.sentinel.INFO, mock.sentinel.DEV_INFO,
fake_root_path, fake_ephemeral_path, fake_vm_gen)
mock_configdrive_required.assert_called_once_with(mock_instance)
if configdrive_required:
mock_create_config_drive.assert_called_once_with(
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.INFO)
mock_attach_config_drive.assert_called_once_with(
mock_instance, fake_config_drive_path, fake_vm_gen)
mock_power_on.assert_called_once_with(mock_instance)
def test_spawn(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=True, fail=None)
def test_spawn_instance_exists(self):
self._test_spawn(exists=True, boot_from_volume=False,
configdrive_required=True, fail=None)
def test_spawn_create_instance_exception(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=True,
fail=vmutils.HyperVException)
def test_spawn_not_required(self):
self._test_spawn(exists=False, boot_from_volume=False,
configdrive_required=False, fail=None)
def test_spawn_root_in_block(self):
self._test_spawn(exists=False, boot_from_volume=True,
configdrive_required=False, fail=None)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.attach_volumes')
@mock.patch.object(vmops.VMOps, '_attach_drive')
def _test_create_instance(self, mock_attach_drive, mock_attach_volumes,
fake_root_path, fake_ephemeral_path,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1):
mock_vif_driver = mock.MagicMock()
self._vmops._vif_driver = mock_vif_driver
self.flags(enable_instance_metrics_collection=enable_instance_metrics,
group='hyperv')
fake_network_info = {'id': mock.sentinel.ID,
'address': mock.sentinel.ADDRESS}
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.create_instance(instance=mock_instance,
network_info=[fake_network_info],
block_device_info=mock.sentinel.DEV_INFO,
root_vhd_path=fake_root_path,
eph_vhd_path=fake_ephemeral_path,
vm_gen=vm_gen)
self._vmops._vmutils.create_vm.assert_called_once_with(
mock_instance.name, mock_instance.memory_mb,
mock_instance.vcpus, CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio, vm_gen,
[mock_instance.uuid])
expected = []
ctrl_type = vmops.VM_GENERATIONS_CONTROLLER_TYPES[vm_gen]
ctrl_disk_addr = 0
if fake_root_path:
expected.append(mock.call(mock_instance.name, fake_root_path,
0, ctrl_disk_addr, ctrl_type,
constants.DISK))
ctrl_disk_addr += 1
if fake_ephemeral_path:
expected.append(mock.call(mock_instance.name,
fake_ephemeral_path, 0, ctrl_disk_addr,
ctrl_type, constants.DISK))
mock_attach_drive.has_calls(expected)
self._vmops._vmutils.create_scsi_controller.assert_called_once_with(
mock_instance.name)
ebs_root = vm_gen is not constants.VM_GEN_2 and fake_root_path is None
mock_attach_volumes.assert_called_once_with(mock.sentinel.DEV_INFO,
mock_instance.name,
ebs_root)
self._vmops._vmutils.create_nic.assert_called_once_with(
mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS)
mock_vif_driver.plug.assert_called_once_with(mock_instance,
fake_network_info)
mock_enable = self._vmops._vmutils.enable_vm_metrics_collection
if enable_instance_metrics:
mock_enable.assert_called_once_with(mock_instance.name)
def test_create_instance(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=True)
def test_create_instance_no_root_path(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=True)
def test_create_instance_no_ephemeral_path(self):
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=None,
enable_instance_metrics=True)
def test_create_instance_no_path(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False)
def test_create_instance_enable_instance_metrics_false(self):
fake_ephemeral_path = mock.sentinel.FAKE_EPHEMERAL_PATH
self._test_create_instance(fake_root_path=mock.sentinel.FAKE_ROOT_PATH,
fake_ephemeral_path=fake_ephemeral_path,
enable_instance_metrics=False)
def test_create_instance_gen2(self):
self._test_create_instance(fake_root_path=None,
fake_ephemeral_path=None,
enable_instance_metrics=False,
vm_gen=constants.VM_GEN_2)
def test_attach_drive_vm_to_scsi(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_SCSI)
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
constants.DISK)
def test_attach_drive_vm_to_ide(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_IDE)
self._vmops._vmutils.attach_ide_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.DISK)
def _check_get_image_vm_gen_except(self, image_prop):
image_meta = {"properties": {constants.IMAGE_PROP_VM_GEN: image_prop}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self.assertRaises(vmutils.HyperVException,
self._vmops.get_image_vm_generation,
mock.sentinel.FAKE_PATH,
image_meta)
def test_get_image_vm_generation_default(self):
image_meta = {"properties": {}}
self._vmops._hostutils.get_default_vm_generation.return_value = (
constants.IMAGE_PROP_VM_GEN_1)
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
response = self._vmops.get_image_vm_generation(mock.sentinel.FAKE_PATH,
image_meta)
self.assertEqual(constants.VM_GEN_1, response)
def test_get_image_vm_generation_gen2(self):
image_meta = {"properties": {
constants.IMAGE_PROP_VM_GEN: constants.IMAGE_PROP_VM_GEN_2}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHDX)
response = self._vmops.get_image_vm_generation(mock.sentinel.FAKE_PATH,
image_meta)
self.assertEqual(constants.VM_GEN_2, response)
def test_get_image_vm_generation_bad_prop(self):
self._check_get_image_vm_gen_except(mock.sentinel.FAKE_IMAGE_PROP)
def test_get_image_vm_generation_not_vhdx(self):
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHD)
self._check_get_image_vm_gen_except(constants.IMAGE_PROP_VM_GEN_2)
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder')
@mock.patch('nova.utils.execute')
def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder,
mock_InstanceMetadata, config_drive_format,
config_drive_cdrom, side_effect):
mock_instance = fake_instance.fake_instance_obj(self.context)
self.flags(config_drive_format=config_drive_format)
self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv')
self.flags(config_drive_inject_password=True, group='hyperv')
self._vmops._pathutils.get_instance_dir.return_value = (
self.FAKE_DIR)
mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [
side_effect]
if config_drive_format != self.ISO9660:
self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
self._vmops._create_config_drive,
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
elif side_effect is processutils.ProcessExecutionError:
self.assertRaises(processutils.ProcessExecutionError,
self._vmops._create_config_drive,
mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
else:
path = self._vmops._create_config_drive(mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO)
mock_InstanceMetadata.assert_called_once_with(
mock_instance, content=[mock.sentinel.FILE],
extra_md={'admin_pass': mock.sentinel.PASSWORD},
network_info=mock.sentinel.NET_INFO)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name)
mock_ConfigDriveBuilder.assert_called_with(
instance_md=mock_InstanceMetadata())
mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive
path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)
path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD)
mock_make_drive.assert_called_once_with(path_iso)
if not CONF.hyperv.config_drive_cdrom:
expected = path_vhd
mock_execute.assert_called_once_with(
CONF.hyperv.qemu_img_cmd,
'convert', '-f', 'raw', '-O', 'vpc',
path_iso, path_vhd, attempts=1)
self._vmops._pathutils.remove.assert_called_once_with(
os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO))
else:
expected = path_iso
self.assertEqual(expected, path)
def test_create_config_drive_cdrom(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=True,
side_effect=None)
def test_create_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None)
def test_create_config_drive_other_drive_format(self):
self._test_create_config_drive(config_drive_format=mock.sentinel.OTHER,
config_drive_cdrom=False,
side_effect=None)
def test_create_config_drive_execution_error(self):
self._test_create_config_drive(
config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=processutils.ProcessExecutionError)
def test_attach_config_drive_exception(self):
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(exception.InvalidDiskFormat,
self._vmops.attach_config_drive,
instance, 'C:/fake_instance_dir/configdrive.xxx',
constants.VM_GEN_1)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_1)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_IDE, constants.DISK)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive_gen2(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_2)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_SCSI, constants.DISK)
def test_delete_disk_files(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._delete_disk_files(mock_instance.name)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name, create_dir=False, remove_dir=True)
def test_reboot_hard(self):
self._test_reboot(vmops.REBOOT_TYPE_HARD,
constants.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = True
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
constants.HYPERV_VM_STATE_ENABLED)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_failed(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
constants.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
mock_soft_shutdown.return_value = True
mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
instance, {}, vmops.REBOOT_TYPE_SOFT)
mock_soft_shutdown.assert_called_once_with(instance)
mock_power_on.assert_called_once_with(instance)
def _test_reboot(self, reboot_type, vm_state):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.reboot(instance, {}, reboot_type)
mock_set_state.assert_called_once_with(instance, vm_state)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = True
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_once_with(instance.name)
mock_wait_for_power_off.assert_called_once_with(
instance.name, self._FAKE_TIMEOUT)
self.assertTrue(result)
@mock.patch("time.sleep")
def test_soft_shutdown_failed(self, mock_sleep):
instance = fake_instance.fake_instance_obj(self.context)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.side_effect = vmutils.HyperVException(
"Expected failure.")
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm.assert_called_once_with(instance.name)
self.assertFalse(result)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.side_effect = [False, True]
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
calls = [mock.call(instance.name, 1),
mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertTrue(result)
@mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = False
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
calls = [mock.call(instance.name, 1.5),
mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertFalse(result)
def _test_power_off(self, timeout):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.power_off(instance, timeout)
mock_set_state.assert_called_once_with(
instance, constants.HYPERV_VM_STATE_DISABLED)
def test_power_off_hard(self):
self._test_power_off(timeout=0)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_exception(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_power_off(timeout=1)
@mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
instance = fake_instance.fake_instance_obj(self.context)
mock_soft_shutdown.return_value = True
self._vmops.power_off(instance, 1, 0)
mock_soft_shutdown.assert_called_once_with(
instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(mock_set_state.called)
def test_get_vm_state(self):
summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
with mock.patch.object(self._vmops._vmutils,
'get_vm_summary_info') as mock_get_summary_info:
mock_get_summary_info.return_value = summary_info
response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self.assertTrue(result)
@mock.patch.object(vmops.etimeout, "with_timeout")
def test_wait_for_power_off_false(self, mock_with_timeout):
mock_with_timeout.side_effect = etimeout.Timeout()
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(result)
def test_copy_vm_console_logs(self):
fake_local_paths = (mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_PATH_ARCHIVED)
fake_remote_paths = (mock.sentinel.FAKE_REMOTE_PATH,
mock.sentinel.FAKE_REMOTE_PATH_ARCHIVED)
self._vmops._pathutils.get_vm_console_log_paths.side_effect = [
fake_local_paths, fake_remote_paths]
self._vmops._pathutils.exists.side_effect = [True, False]
self._vmops.copy_vm_console_logs(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_DEST)
calls = [mock.call(mock.sentinel.FAKE_VM_NAME),
mock.call(mock.sentinel.FAKE_VM_NAME,
remote_server=mock.sentinel.FAKE_DEST)]
self._vmops._pathutils.get_vm_console_log_paths.assert_has_calls(calls)
calls = [mock.call(mock.sentinel.FAKE_PATH),
mock.call(mock.sentinel.FAKE_PATH_ARCHIVED)]
self._vmops._pathutils.exists.assert_has_calls(calls)
self._vmops._pathutils.copy.assert_called_once_with(
mock.sentinel.FAKE_PATH, mock.sentinel.FAKE_REMOTE_PATH)
@mock.patch("__builtin__.open")
@mock.patch("os.path.exists")
def test_get_console_output_exception(self, fake_path_exists, fake_open):
fake_vm = mock.MagicMock()
fake_open.side_effect = vmutils.HyperVException
fake_path_exists.return_value = True
self._vmops._pathutils.get_vm_console_log_paths.return_value = (
mock.sentinel.fake_console_log_path,
mock.sentinel.fake_console_log_archived)
with mock.patch('nova.virt.hyperv.vmops.open', fake_open, create=True):
self.assertRaises(vmutils.HyperVException,
self._vmops.get_console_output,
fake_vm)
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
with mock.patch.object(self._vmops._vmutils,
'list_instance_notes') as mock_list_notes:
mock_list_notes.return_value = [('fake_name', [fake_uuid])]
response = self._vmops.list_instance_uuids()
mock_list_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])

View File

@@ -0,0 +1,780 @@
# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import test
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
class VMUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V VMUtils class."""
_FAKE_VM_NAME = 'fake_vm'
_FAKE_MEMORY_MB = 2
_FAKE_VCPUS_NUM = 4
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
_FAKE_RET_VAL_BAD = -1
_FAKE_PATH = "fake_path"
_FAKE_CTRL_PATH = 'fake_ctrl_path'
_FAKE_CTRL_ADDR = 0
_FAKE_DRIVE_ADDR = 0
_FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path'
_FAKE_VM_PATH = "fake_vm_path"
_FAKE_VHD_PATH = "fake_vhd_path"
_FAKE_DVD_PATH = "fake_dvd_path"
_FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
_FAKE_VM_UUID = "04e79212-39bc-4065-933c-50f6d48a57f6"
_FAKE_INSTANCE = {"name": _FAKE_VM_NAME,
"uuid": _FAKE_VM_UUID}
_FAKE_SNAPSHOT_PATH = "fake_snapshot_path"
_FAKE_RES_DATA = "fake_res_data"
_FAKE_HOST_RESOURCE = "fake_host_resource"
_FAKE_CLASS = "FakeClass"
_FAKE_RES_PATH = "fake_res_path"
_FAKE_RES_NAME = 'fake_res_name'
_FAKE_ADDRESS = "fake_address"
_FAKE_JOB_STATUS_DONE = 7
_FAKE_JOB_STATUS_BAD = -1
_FAKE_JOB_DESCRIPTION = "fake_job_description"
_FAKE_ERROR = "fake_error"
_FAKE_ELAPSED_TIME = 0
_CONCRETE_JOB = "Msvm_ConcreteJob"
_FAKE_DYNAMIC_MEMORY_RATIO = 1.0
_FAKE_SUMMARY_INFO = {'NumberOfProcessors': 4,
'EnabledState': 2,
'MemoryUsage': 2,
'UpTime': 1}
_DEFINE_SYSTEM = 'DefineVirtualSystem'
_DESTROY_SYSTEM = 'DestroyVirtualSystem'
_DESTROY_SNAPSHOT = 'RemoveVirtualSystemSnapshot'
_ADD_RESOURCE = 'AddVirtualSystemResources'
_REMOVE_RESOURCE = 'RemoveVirtualSystemResources'
_SETTING_TYPE = 'SettingType'
_VM_GEN = constants.VM_GEN_1
_VIRTUAL_SYSTEM_TYPE_REALIZED = 3
def setUp(self):
self._vmutils = vmutils.VMUtils()
self._vmutils._conn = mock.MagicMock()
super(VMUtilsTestCase, self).setUp()
def test_enable_vm_metrics_collection(self):
self.assertRaises(NotImplementedError,
self._vmutils.enable_vm_metrics_collection,
self._FAKE_VM_NAME)
def test_get_vm_summary_info(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_summary = mock.MagicMock()
mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
[mock_summary])
for (key, val) in self._FAKE_SUMMARY_INFO.items():
setattr(mock_summary, key, val)
summary = self._vmutils.get_vm_summary_info(self._FAKE_VM_NAME)
self.assertEqual(self._FAKE_SUMMARY_INFO, summary)
def _lookup_vm(self):
mock_vm = mock.MagicMock()
self._vmutils._lookup_vm_check = mock.MagicMock(
return_value=mock_vm)
mock_vm.path_.return_value = self._FAKE_VM_PATH
return mock_vm
def test_lookup_vm_ok(self):
mock_vm = mock.MagicMock()
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
vm = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME)
self.assertEqual(mock_vm, vm)
def test_lookup_vm_multiple(self):
mockvm = mock.MagicMock()
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mockvm, mockvm]
self.assertRaises(vmutils.HyperVException,
self._vmutils._lookup_vm_check,
self._FAKE_VM_NAME)
def test_lookup_vm_none(self):
self._vmutils._conn.Msvm_ComputerSystem.return_value = []
self.assertRaises(exception.NotFound,
self._vmutils._lookup_vm_check,
self._FAKE_VM_NAME)
def test_set_vm_memory_static(self):
self._test_set_vm_memory_dynamic(1.0)
def test_set_vm_memory_dynamic(self):
self._test_set_vm_memory_dynamic(2.0)
def _test_set_vm_memory_dynamic(self, dynamic_memory_ratio):
mock_vm = self._lookup_vm()
mock_s = self._vmutils._conn.Msvm_VirtualSystemSettingData()[0]
mock_s.SystemType = 3
mock_vmsetting = mock.MagicMock()
mock_vmsetting.associators.return_value = [mock_s]
self._vmutils._modify_virt_resource = mock.MagicMock()
self._vmutils._set_vm_memory(mock_vm, mock_vmsetting,
self._FAKE_MEMORY_MB,
dynamic_memory_ratio)
self._vmutils._modify_virt_resource.assert_called_with(
mock_s, self._FAKE_VM_PATH)
if dynamic_memory_ratio > 1:
self.assertTrue(mock_s.DynamicMemoryEnabled)
else:
self.assertFalse(mock_s.DynamicMemoryEnabled)
def test_soft_shutdown_vm(self):
mock_vm = self._lookup_vm()
mock_shutdown = mock.MagicMock()
mock_shutdown.InitiateShutdown.return_value = (self._FAKE_RET_VAL, )
mock_vm.associators.return_value = [mock_shutdown]
with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
mock_shutdown.InitiateShutdown.assert_called_once_with(
Force=False, Reason=mock.ANY)
mock_check.assert_called_once_with(self._FAKE_RET_VAL, None)
def test_soft_shutdown_vm_no_component(self):
mock_vm = self._lookup_vm()
mock_vm.associators.return_value = []
with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
self.assertFalse(mock_check.called)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_get_vm_storage_paths(self, mock_get_vm_disks):
self._lookup_vm()
mock_rasds = self._create_mock_disks()
mock_get_vm_disks.return_value = ([mock_rasds[0]], [mock_rasds[1]])
storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
(disk_files, volume_drives) = storage
self.assertEqual([self._FAKE_VHD_PATH], disk_files)
self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
def test_get_vm_disks(self):
mock_vm = self._lookup_vm()
mock_vmsettings = [mock.MagicMock()]
mock_vm.associators.return_value = mock_vmsettings
mock_rasds = self._create_mock_disks()
mock_vmsettings[0].associators.return_value = mock_rasds
(disks, volumes) = self._vmutils._get_vm_disks(mock_vm)
mock_vm.associators.assert_called_with(
wmi_result_class=self._vmutils._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
mock_vmsettings[0].associators.assert_called_with(
wmi_result_class=self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS)
self.assertEqual([mock_rasds[0]], disks)
self.assertEqual([mock_rasds[1]], volumes)
def _create_mock_disks(self):
mock_rasd1 = mock.MagicMock()
mock_rasd1.ResourceSubType = self._vmutils._HARD_DISK_RES_SUB_TYPE
mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
mock_rasd1.Connection = [self._FAKE_VHD_PATH]
mock_rasd1.Parent = self._FAKE_CTRL_PATH
mock_rasd1.Address = self._FAKE_ADDRESS
mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
mock_rasd2 = mock.MagicMock()
mock_rasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
return [mock_rasd1, mock_rasd2]
@mock.patch.object(vmutils.VMUtils, '_set_vm_vcpus')
@mock.patch.object(vmutils.VMUtils, '_set_vm_memory')
@mock.patch.object(vmutils.VMUtils, '_get_wmi_obj')
def test_create_vm(self, mock_get_wmi_obj, mock_set_mem, mock_set_vcpus):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._DEFINE_SYSTEM).return_value = (
None, self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_vm = mock_get_wmi_obj.return_value
self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
mock_s = mock.MagicMock()
setattr(mock_s,
self._SETTING_TYPE,
self._VIRTUAL_SYSTEM_TYPE_REALIZED)
mock_vm.associators.return_value = [mock_s]
self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB,
self._FAKE_VCPUS_NUM, False,
self._FAKE_DYNAMIC_MEMORY_RATIO,
self._VM_GEN)
self.assertTrue(getattr(mock_svc, self._DEFINE_SYSTEM).called)
mock_set_mem.assert_called_with(mock_vm, mock_s, self._FAKE_MEMORY_MB,
self._FAKE_DYNAMIC_MEMORY_RATIO)
mock_set_vcpus.assert_called_with(mock_vm, mock_s,
self._FAKE_VCPUS_NUM,
False)
def test_get_vm_scsi_controller(self):
self._prepare_get_vm_controller(self._vmutils._SCSI_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME)
self.assertEqual(self._FAKE_RES_PATH, path)
@mock.patch("nova.virt.hyperv.vmutils.VMUtils.get_attached_disks")
def test_get_free_controller_slot(self, mock_get_attached_disks):
mock_disk = mock.MagicMock()
mock_disk.AddressOnParent = 3
mock_get_attached_disks.return_value = [mock_disk]
response = self._vmutils.get_free_controller_slot(
self._FAKE_CTRL_PATH)
mock_get_attached_disks.assert_called_once_with(
self._FAKE_CTRL_PATH)
self.assertEqual(response, 0)
def test_get_free_controller_slot_exception(self):
fake_drive = mock.MagicMock()
type(fake_drive).AddressOnParent = mock.PropertyMock(
side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
with mock.patch.object(self._vmutils,
'get_attached_disks') as fake_get_attached_disks:
fake_get_attached_disks.return_value = (
[fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
self.assertRaises(vmutils.HyperVException,
self._vmutils.get_free_controller_slot,
mock.sentinel.scsi_controller_path)
def test_get_vm_ide_controller(self):
self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_ide_controller(self._FAKE_VM_NAME,
self._FAKE_ADDRESS)
self.assertEqual(self._FAKE_RES_PATH, path)
def test_get_vm_ide_controller_none(self):
self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
path = self._vmutils.get_vm_ide_controller(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_NOT_FOUND_ADDR)
self.assertNotEqual(self._FAKE_RES_PATH, path)
def _prepare_get_vm_controller(self, resource_sub_type):
mock_vm = self._lookup_vm()
mock_vm_settings = mock.MagicMock()
mock_rasds = mock.MagicMock()
mock_rasds.path_.return_value = self._FAKE_RES_PATH
mock_rasds.ResourceSubType = resource_sub_type
mock_rasds.Address = self._FAKE_ADDRESS
mock_vm_settings.associators.return_value = [mock_rasds]
mock_vm.associators.return_value = [mock_vm_settings]
def _prepare_resources(self, mock_path, mock_subtype, mock_vm_settings):
mock_rasds = mock_vm_settings.associators.return_value[0]
mock_rasds.path_.return_value = mock_path
mock_rasds.ResourceSubType = mock_subtype
return mock_rasds
@mock.patch("nova.virt.hyperv.vmutils.VMUtils.get_free_controller_slot")
@mock.patch("nova.virt.hyperv.vmutils.VMUtils._get_vm_scsi_controller")
def test_attach_scsi_drive(self, mock_get_vm_scsi_controller,
mock_get_free_controller_slot):
mock_vm = self._lookup_vm()
mock_get_vm_scsi_controller.return_value = self._FAKE_CTRL_PATH
mock_get_free_controller_slot.return_value = self._FAKE_DRIVE_ADDR
with mock.patch.object(self._vmutils,
'attach_drive') as mock_attach_drive:
self._vmutils.attach_scsi_drive(mock_vm, self._FAKE_PATH,
constants.DISK)
mock_get_vm_scsi_controller.assert_called_once_with(mock_vm)
mock_get_free_controller_slot.assert_called_once_with(
self._FAKE_CTRL_PATH)
mock_attach_drive.assert_called_once_with(
mock_vm, self._FAKE_PATH, self._FAKE_CTRL_PATH,
self._FAKE_DRIVE_ADDR, constants.DISK)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
@mock.patch.object(vmutils.VMUtils, '_get_vm_ide_controller')
def test_attach_ide_drive(self, mock_get_ide_ctrl, mock_get_new_rsd):
mock_vm = self._lookup_vm()
mock_rsd = mock_get_new_rsd.return_value
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.attach_ide_drive(self._FAKE_VM_NAME,
self._FAKE_CTRL_PATH,
self._FAKE_CTRL_ADDR,
self._FAKE_DRIVE_ADDR)
mock_add_virt_res.assert_called_with(mock_rsd,
mock_vm.path_.return_value)
mock_get_ide_ctrl.assert_called_with(mock_vm, self._FAKE_CTRL_ADDR)
self.assertTrue(mock_get_new_rsd.called)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
def test_create_scsi_controller(self, mock_get_new_rsd):
mock_vm = self._lookup_vm()
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.create_scsi_controller(self._FAKE_VM_NAME)
mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
mock_vm.path_.return_value)
@mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
def test_attach_volume_to_controller(self, mock_get_new_rsd):
mock_vm = self._lookup_vm()
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.attach_volume_to_controller(
self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, self._FAKE_CTRL_ADDR,
self._FAKE_MOUNTED_DISK_PATH)
mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
mock_vm.path_.return_value)
@mock.patch.object(vmutils.VMUtils, '_modify_virt_resource')
@mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name')
def test_set_nic_connection(self, mock_get_nic_conn, mock_modify_virt_res):
self._lookup_vm()
mock_nic = mock_get_nic_conn.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
mock_modify_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
@mock.patch.object(vmutils.VMUtils, '_get_new_setting_data')
def test_create_nic(self, mock_get_new_virt_res):
self._lookup_vm()
mock_nic = mock_get_new_virt_res.return_value
with mock.patch.object(self._vmutils,
'_add_virt_resource') as mock_add_virt_res:
self._vmutils.create_nic(
self._FAKE_VM_NAME, self._FAKE_RES_NAME, self._FAKE_ADDRESS)
mock_add_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
def test_set_vm_state(self):
mock_vm = self._lookup_vm()
mock_vm.RequestStateChange.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.set_vm_state(self._FAKE_VM_NAME,
constants.HYPERV_VM_STATE_ENABLED)
mock_vm.RequestStateChange.assert_called_with(
constants.HYPERV_VM_STATE_ENABLED)
def test_destroy_vm(self):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._DESTROY_SYSTEM).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.destroy_vm(self._FAKE_VM_NAME)
getattr(mock_svc, self._DESTROY_SYSTEM).assert_called_with(
self._FAKE_VM_PATH)
@mock.patch.object(vmutils.VMUtils, '_wait_for_job')
def test_check_ret_val_ok(self, mock_wait_for_job):
self._vmutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED,
self._FAKE_JOB_PATH)
mock_wait_for_job.assert_called_once_with(self._FAKE_JOB_PATH)
def test_check_ret_val_exception(self):
self.assertRaises(vmutils.HyperVException,
self._vmutils.check_ret_val,
self._FAKE_RET_VAL_BAD,
self._FAKE_JOB_PATH)
def test_wait_for_job_done(self):
mockjob = self._prepare_wait_for_job(constants.WMI_JOB_STATE_COMPLETED)
job = self._vmutils._wait_for_job(self._FAKE_JOB_PATH)
self.assertEqual(mockjob, job)
def test_wait_for_job_exception_concrete_job(self):
mock_job = self._prepare_wait_for_job()
mock_job.path.return_value.Class = self._CONCRETE_JOB
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def test_wait_for_job_exception_with_error(self):
mock_job = self._prepare_wait_for_job()
mock_job.GetError.return_value = (self._FAKE_ERROR, self._FAKE_RET_VAL)
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def test_wait_for_job_exception_no_error(self):
mock_job = self._prepare_wait_for_job()
mock_job.GetError.return_value = (None, None)
self.assertRaises(vmutils.HyperVException,
self._vmutils._wait_for_job,
self._FAKE_JOB_PATH)
def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD):
mock_job = mock.MagicMock()
mock_job.JobState = state
mock_job.Description = self._FAKE_JOB_DESCRIPTION
mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME
self._vmutils._get_wmi_obj = mock.MagicMock(return_value=mock_job)
return mock_job
def test_add_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._ADD_RESOURCE).return_value = (
self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._add_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
self._assert_add_resources(mock_svc)
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyVirtualSystemResources.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyVirtualSystemResources.assert_called_with(
ResourceSettingData=[self._FAKE_RES_DATA],
ComputerSystem=self._FAKE_VM_PATH)
def test_remove_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
getattr(mock_svc, self._REMOVE_RESOURCE).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
self._vmutils._remove_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
self._assert_remove_resources(mock_svc)
def test_set_disk_host_resource(self):
self._lookup_vm()
mock_rasds = self._create_mock_disks()
self._vmutils._get_vm_disks = mock.MagicMock(
return_value=([mock_rasds[0]], [mock_rasds[1]]))
self._vmutils._modify_virt_resource = mock.MagicMock()
self._vmutils._get_disk_resource_address = mock.MagicMock(
return_value=self._FAKE_ADDRESS)
self._vmutils.set_disk_host_resource(
self._FAKE_VM_NAME,
self._FAKE_CTRL_PATH,
self._FAKE_ADDRESS,
mock.sentinel.fake_new_mounted_disk_path)
self._vmutils._get_disk_resource_address.assert_called_with(
mock_rasds[0])
self._vmutils._modify_virt_resource.assert_called_with(
mock_rasds[0], self._FAKE_VM_PATH)
self.assertEqual(
mock.sentinel.fake_new_mounted_disk_path,
mock_rasds[0].HostResource[0])
@mock.patch.object(vmutils, 'wmi', create=True)
@mock.patch.object(vmutils.VMUtils, 'check_ret_val')
def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
mock_svc = self._get_snapshot_service()
mock_svc.CreateVirtualSystemSnapshot.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL, mock.MagicMock())
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateVirtualSystemSnapshot.assert_called_with(
self._FAKE_VM_PATH)
mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
self._FAKE_JOB_PATH)
def test_remove_vm_snapshot(self):
mock_svc = self._get_snapshot_service()
getattr(mock_svc, self._DESTROY_SNAPSHOT).return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH)
getattr(mock_svc, self._DESTROY_SNAPSHOT).assert_called_with(
self._FAKE_SNAPSHOT_PATH)
def test_detach_vm_disk(self):
self._lookup_vm()
mock_disk = self._prepare_mock_disk()
with mock.patch.object(self._vmutils,
'_remove_virt_resource') as mock_rm_virt_res:
self._vmutils.detach_vm_disk(self._FAKE_VM_NAME,
self._FAKE_HOST_RESOURCE)
mock_rm_virt_res.assert_called_with(mock_disk, self._FAKE_VM_PATH)
def test_get_mounted_disk_resource_from_path(self):
mock_disk_1 = mock.MagicMock()
mock_disk_2 = mock.MagicMock()
mock_disk_2.HostResource = [self._FAKE_MOUNTED_DISK_PATH]
self._vmutils._conn.query.return_value = [mock_disk_1, mock_disk_2]
physical_disk = self._vmutils._get_mounted_disk_resource_from_path(
self._FAKE_MOUNTED_DISK_PATH, True)
self.assertEqual(mock_disk_2, physical_disk)
def test_get_controller_volume_paths(self):
self._prepare_mock_disk()
mock_disks = {self._FAKE_RES_PATH: self._FAKE_HOST_RESOURCE}
disks = self._vmutils.get_controller_volume_paths(self._FAKE_RES_PATH)
self.assertEqual(mock_disks, disks)
def _prepare_mock_disk(self):
mock_disk = mock.MagicMock()
mock_disk.HostResource = [self._FAKE_HOST_RESOURCE]
mock_disk.path.return_value.RelPath = self._FAKE_RES_PATH
mock_disk.ResourceSubType = self._vmutils._HARD_DISK_RES_SUB_TYPE
self._vmutils._conn.query.return_value = [mock_disk]
return mock_disk
def _get_snapshot_service(self):
return self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
def _assert_add_resources(self, mock_svc):
getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
[self._FAKE_RES_DATA], self._FAKE_VM_PATH)
def _assert_remove_resources(self, mock_svc):
getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
[self._FAKE_RES_PATH], self._FAKE_VM_PATH)
def test_get_active_instances(self):
fake_vm = mock.MagicMock()
type(fake_vm).ElementName = mock.PropertyMock(
side_effect=['active_vm', 'inactive_vm'])
type(fake_vm).EnabledState = mock.PropertyMock(
side_effect=[constants.HYPERV_VM_STATE_ENABLED,
constants.HYPERV_VM_STATE_DISABLED])
self._vmutils.list_instances = mock.MagicMock(
return_value=[mock.sentinel.fake_vm_name] * 2)
self._vmutils._lookup_vm = mock.MagicMock(side_effect=[fake_vm] * 2)
active_instances = self._vmutils.get_active_instances()
self.assertEqual(['active_vm'], active_instances)
def _test_get_vm_serial_port_connection(self, new_connection=None):
old_serial_connection = 'old_serial_connection'
mock_vm = self._lookup_vm()
mock_vmsettings = [mock.MagicMock()]
mock_vm.associators.return_value = mock_vmsettings
fake_serial_port = mock.MagicMock()
fake_serial_port.ResourceSubType = (
self._vmutils._SERIAL_PORT_RES_SUB_TYPE)
fake_serial_port.Connection = [old_serial_connection]
mock_rasds = [fake_serial_port]
mock_vmsettings[0].associators.return_value = mock_rasds
self._vmutils._modify_virt_resource = mock.MagicMock()
fake_modify = self._vmutils._modify_virt_resource
ret_val = self._vmutils.get_vm_serial_port_connection(
self._FAKE_VM_NAME, update_connection=new_connection)
if new_connection:
self.assertEqual(new_connection, ret_val)
fake_modify.assert_called_once_with(fake_serial_port,
mock_vm.path_())
else:
self.assertEqual(old_serial_connection, ret_val)
def test_set_vm_serial_port_connection(self):
self._test_get_vm_serial_port_connection('new_serial_connection')
def test_get_vm_serial_port_connection(self):
self._test_get_vm_serial_port_connection()
def test_list_instance_notes(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name',
'Notes': '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instance_notes()
self.assertEqual([(attrs['ElementName'], [attrs['Notes']])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName', 'Notes'],
SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils.check_ret_val')
def test_modify_virtual_system(self, mock_check_ret_val):
mock_vs_man_svc = mock.MagicMock()
mock_vmsetting = mock.MagicMock()
fake_path = 'fake path'
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
mock_vs_man_svc.ModifyVirtualSystem.return_value = (0, fake_job_path,
fake_ret_val)
self._vmutils._modify_virtual_system(vs_man_svc=mock_vs_man_svc,
vm_path=fake_path,
vmsetting=mock_vmsetting)
mock_vs_man_svc.ModifyVirtualSystem.assert_called_once_with(
ComputerSystem=fake_path,
SystemSettingData=mock_vmsetting.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils.check_ret_val')
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_wmi_obj')
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._modify_virtual_system')
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_setting_data')
def test_create_vm_obj(self, mock_get_vm_setting_data,
mock_modify_virtual_system,
mock_get_wmi_obj, mock_check_ret_val):
mock_vs_man_svc = mock.MagicMock()
mock_vs_gs_data = mock.MagicMock()
fake_vm_path = 'fake vm path'
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
_conn = self._vmutils._conn.Msvm_VirtualSystemGlobalSettingData
_conn.new.return_value = mock_vs_gs_data
mock_vs_man_svc.DefineVirtualSystem.return_value = (fake_vm_path,
fake_job_path,
fake_ret_val)
response = self._vmutils._create_vm_obj(vs_man_svc=mock_vs_man_svc,
vm_name='fake vm',
vm_gen='fake vm gen',
notes='fake notes',
dynamic_memory_ratio=1.0)
_conn.new.assert_called_once_with()
self.assertEqual(mock_vs_gs_data.ElementName, 'fake vm')
mock_vs_man_svc.DefineVirtualSystem.assert_called_once_with(
[], None, mock_vs_gs_data.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
mock_get_wmi_obj.assert_called_with(fake_vm_path)
mock_get_vm_setting_data.assert_called_once_with(mock_get_wmi_obj())
mock_modify_virtual_system.assert_called_once_with(
mock_vs_man_svc, fake_vm_path, mock_get_vm_setting_data())
self.assertEqual(mock_get_vm_setting_data().Notes,
'\n'.join('fake notes'))
self.assertEqual(response, mock_get_wmi_obj())
def test_list_instances(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instances()
self.assertEqual([(attrs['ElementName'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName'],
SettingType=self._vmutils._VIRTUAL_SYSTEM_CURRENT_SETTINGS)
@mock.patch.object(vmutils.VMUtils, "_clone_wmi_obj")
def _test_check_clone_wmi_obj(self, mock_clone_wmi_obj, clone_objects):
mock_obj = mock.MagicMock()
self._vmutils._clone_wmi_objs = clone_objects
response = self._vmutils._check_clone_wmi_obj(class_name="fakeClass",
obj=mock_obj)
if not clone_objects:
self.assertEqual(mock_obj, response)
else:
mock_clone_wmi_obj.assert_called_once_with("fakeClass", mock_obj)
self.assertEqual(mock_clone_wmi_obj.return_value, response)
def test_check_clone_wmi_obj_true(self):
self._test_check_clone_wmi_obj(clone_objects=True)
def test_check_clone_wmi_obj_false(self):
self._test_check_clone_wmi_obj(clone_objects=False)
def test_clone_wmi_obj(self):
mock_obj = mock.MagicMock()
mock_value = mock.MagicMock()
mock_value.Value = mock.sentinel.fake_value
mock_obj._properties = [mock.sentinel.property]
mock_obj.Properties_.Item.return_value = mock_value
response = self._vmutils._clone_wmi_obj(
class_name="FakeClass", obj=mock_obj)
compare = self._vmutils._conn.FakeClass.new()
self.assertEqual(mock.sentinel.fake_value,
compare.Properties_.Item().Value)
self.assertEqual(compare, response)
def test_get_attached_disks(self):
mock_scsi_ctrl_path = mock.MagicMock()
expected_query = ("SELECT * FROM %(class_name)s "
"WHERE (ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s')"
" AND Parent='%(parent)s'" %
{"class_name":
self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._vmutils._PHYS_DISK_RES_SUB_TYPE,
"res_sub_type_virt":
self._vmutils._DISK_DRIVE_RES_SUB_TYPE,
"parent":
mock_scsi_ctrl_path.replace("'", "''")})
expected_disks = self._vmutils._conn.query.return_value
ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path)
self._vmutils._conn.query.assert_called_once_with(expected_query)
self.assertEqual(expected_disks, ret_disks)

View File

@@ -0,0 +1,237 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_vmutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutilsv2
class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
"""Unit tests for the Hyper-V VMUtilsV2 class."""
_DEFINE_SYSTEM = 'DefineSystem'
_DESTROY_SYSTEM = 'DestroySystem'
_DESTROY_SNAPSHOT = 'DestroySnapshot'
_ADD_RESOURCE = 'AddResourceSettings'
_REMOVE_RESOURCE = 'RemoveResourceSettings'
_SETTING_TYPE = 'VirtualSystemType'
_VM_GEN = constants.VM_GEN_2
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
def setUp(self):
super(VMUtilsV2TestCase, self).setUp()
self._vmutils = vmutilsv2.VMUtilsV2()
self._vmutils._conn = mock.MagicMock()
def test_create_vm(self):
super(VMUtilsV2TestCase, self).test_create_vm()
mock_vssd = self._vmutils._conn.Msvm_VirtualSystemSettingData.new()
self.assertEqual(self._vmutils._VIRTUAL_SYSTEM_SUBTYPE_GEN2,
mock_vssd.VirtualSystemSubType)
self.assertFalse(mock_vssd.SecureBootEnabled)
def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
mock_res_setting_data = mock.MagicMock()
mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
self._vmutils._modify_virt_resource(mock_res_setting_data,
self._FAKE_VM_PATH)
mock_svc.ModifyResourceSettings.assert_called_with(
ResourceSettings=[self._FAKE_RES_DATA])
@mock.patch.object(vmutilsv2, 'wmi', create=True)
@mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
mock_svc = self._get_snapshot_service()
mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
mock_svc.CreateSnapshot.assert_called_with(
AffectedSystem=self._FAKE_VM_PATH,
SnapshotType=self._vmutils._SNAPSHOT_FULL)
mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
self._FAKE_JOB_PATH)
@mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
@mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
mock_add_virt_res):
self._lookup_vm()
fake_eth_port = mock_get_new_sd.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
metric_def = mock.MagicMock()
mock_disk = mock.MagicMock()
mock_disk.path_.return_value = self._FAKE_RES_PATH
mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
fake_metric_def_paths = ['fake_0', 'fake_0', None]
fake_metric_resource_paths = [self._FAKE_VM_PATH,
self._FAKE_VM_PATH,
self._FAKE_RES_PATH]
metric_def.path_.side_effect = fake_metric_def_paths
self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
metric_def]
self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
calls = [mock.call(Name=def_name)
for def_name in [self._vmutils._METRIC_AGGR_CPU_AVG,
self._vmutils._METRIC_AGGR_MEMORY_AVG]]
self._vmutils._conn.CIM_BaseMetricDefinition.assert_has_calls(calls)
calls = []
for i in range(len(fake_metric_def_paths)):
calls.append(mock.call(
Subject=fake_metric_resource_paths[i],
Definition=fake_metric_def_paths[i],
MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
def _get_snapshot_service(self):
return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
def _assert_add_resources(self, mock_svc):
getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
def _assert_remove_resources(self, mock_svc):
getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
[self._FAKE_RES_PATH])
def test_list_instance_notes(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name',
'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instance_notes()
self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName', 'Notes'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
@mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
vm_path, dynamic_memory_ratio=1.0):
mock_vs_man_svc = mock.MagicMock()
mock_vs_data = mock.MagicMock()
mock_job = mock.MagicMock()
fake_job_path = 'fake job path'
fake_ret_val = 'fake return value'
_conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
mock_check_ret_val.return_value = mock_job
_conn.new.return_value = mock_vs_data
mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
vm_path,
fake_ret_val)
mock_job.associators.return_value = ['fake vm path']
response = self._vmutils._create_vm_obj(
vs_man_svc=mock_vs_man_svc,
vm_name='fake vm',
vm_gen='fake vm gen',
notes='fake notes',
dynamic_memory_ratio=dynamic_memory_ratio)
if not vm_path:
mock_job.associators.assert_called_once_with(
self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
_conn.new.assert_called_once_with()
self.assertEqual(mock_vs_data.ElementName, 'fake vm')
mock_vs_man_svc.DefineSystem.assert_called_once_with(
ResourceSettings=[], ReferenceConfiguration=None,
SystemSettings=mock_vs_data.GetText_(1))
mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
if dynamic_memory_ratio > 1:
self.assertFalse(mock_vs_data.VirtualNumaEnabled)
mock_get_wmi_obj.assert_called_with('fake vm path')
self.assertEqual(mock_vs_data.Notes, 'fake notes')
self.assertEqual(response, mock_get_wmi_obj())
def test_create_vm_obj(self):
self._test_create_vm_obj(vm_path='fake vm path')
def test_create_vm_obj_no_vm_path(self):
self._test_create_vm_obj(vm_path=None)
def test_create_vm_obj_dynamic_memory(self):
self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
def test_list_instances(self):
vs = mock.MagicMock()
attrs = {'ElementName': 'fake_name'}
vs.configure_mock(**attrs)
self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
response = self._vmutils.list_instances()
self.assertEqual([(attrs['ElementName'])], response)
self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
['ElementName'],
VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
def test_get_attached_disks(self):
mock_scsi_ctrl_path = mock.MagicMock()
expected_query = ("SELECT * FROM %(class_name)s "
"WHERE (ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s' OR "
"ResourceSubType='%(res_sub_type_dvd)s') AND "
"Parent = '%(parent)s'" %
{"class_name":
self._vmutils._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._vmutils._PHYS_DISK_RES_SUB_TYPE,
"res_sub_type_virt":
self._vmutils._DISK_DRIVE_RES_SUB_TYPE,
"res_sub_type_dvd":
self._vmutils._DVD_DRIVE_RES_SUB_TYPE,
"parent": mock_scsi_ctrl_path.replace("'", "''")})
expected_disks = self._vmutils._conn.query.return_value
ret_disks = self._vmutils.get_attached_disks(mock_scsi_ctrl_path)
self._vmutils._conn.query.assert_called_once_with(expected_query)
self.assertEqual(expected_disks, ret_disks)

View File

@@ -0,0 +1,307 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import mock
from nova import exception
from nova.tests.unit.virt.hyperv import db_fakes
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
FAKE_TARGET_PORTAL = 'fakeportal:3260'
FAKE_VOLUME_ID = 'fake_volume_id'
class VolumeOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for VolumeOps class."""
def setUp(self):
super(VolumeOpsTestCase, self).setUp()
self._volumeops = volumeops.VolumeOps()
def test_get_volume_driver_exception(self):
fake_conn_info = {'driver_volume_type': 'fake_driver'}
self.assertRaises(exception.VolumeDriverNotFound,
self._volumeops._get_volume_driver,
connection_info=fake_conn_info)
def test_fix_instance_volume_disk_paths(self):
block_device_info = db_fakes.get_fake_block_device_info(
FAKE_TARGET_PORTAL, FAKE_VOLUME_ID)
fake_vol_conn_info = (
block_device_info['block_device_mapping'][0]['connection_info'])
with contextlib.nested(
mock.patch.object(self._volumeops,
'_get_volume_driver'),
mock.patch.object(self._volumeops,
'ebs_root_in_block_devices')
) as (mock_get_volume_driver,
mock_ebs_in_block_devices):
fake_vol_driver = mock_get_volume_driver.return_value
mock_ebs_in_block_devices.return_value = False
self._volumeops.fix_instance_volume_disk_paths(
mock.sentinel.instance_name,
block_device_info)
func = fake_vol_driver.fix_instance_volume_disk_path
func.assert_called_once_with(
mock.sentinel.instance_name,
fake_vol_conn_info, 0)
@mock.patch.object(volumeops.VolumeOps, '_get_volume_driver')
def test_disconnect_volumes(self, mock_get_volume_driver):
block_device_info = db_fakes.get_fake_block_device_info(
FAKE_TARGET_PORTAL, FAKE_VOLUME_ID)
block_device_mapping = (
block_device_info['block_device_mapping'])
fake_volume_driver = mock_get_volume_driver.return_value
self._volumeops.disconnect_volumes(block_device_info)
fake_volume_driver.disconnect_volumes.assert_called_once_with(
block_device_mapping)
class ISCSIVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for Hyper-V ISCSIVolumeDriver class."""
def setUp(self):
super(ISCSIVolumeDriverTestCase, self).setUp()
self._volume_driver = volumeops.ISCSIVolumeDriver()
def test_get_mounted_disk_from_lun(self):
with contextlib.nested(
mock.patch.object(self._volume_driver._volutils,
'get_device_number_for_target'),
mock.patch.object(self._volume_driver._vmutils,
'get_mounted_disk_by_drive_number')
) as (mock_get_device_number_for_target,
mock_get_mounted_disk_by_drive_number):
mock_get_device_number_for_target.return_value = 0
mock_get_mounted_disk_by_drive_number.return_value = (
mock.sentinel.disk_path)
disk = self._volume_driver._get_mounted_disk_from_lun(
mock.sentinel.target_iqn,
mock.sentinel.target_lun)
self.assertEqual(disk, mock.sentinel.disk_path)
def test_fix_instace_volume_disk_path(self):
block_device_info = db_fakes.get_fake_block_device_info(
FAKE_TARGET_PORTAL, FAKE_VOLUME_ID)
fake_vol_conn_info = (
block_device_info['block_device_mapping'][0]['connection_info'])
with contextlib.nested(
mock.patch.object(self._volume_driver,
'_get_mounted_disk_from_lun'),
mock.patch.object(self._volume_driver._vmutils,
'get_vm_scsi_controller'),
mock.patch.object(self._volume_driver._vmutils,
'set_disk_host_resource')
) as (mock_get_mounted_disk_from_lun,
mock_get_vm_scsi_controller,
mock_set_disk_host_resource):
mock_get_mounted_disk_from_lun.return_value = (
mock.sentinel.mounted_path)
mock_get_vm_scsi_controller.return_value = (
mock.sentinel.controller_path)
self._volume_driver.fix_instance_volume_disk_path(
mock.sentinel.instance_name,
fake_vol_conn_info,
mock.sentinel.disk_address)
mock_get_mounted_disk_from_lun.assert_called_with(
'iqn.2010-10.org.openstack:volume-' + FAKE_VOLUME_ID,
1, True)
mock_get_vm_scsi_controller.assert_called_with(
mock.sentinel.instance_name)
mock_set_disk_host_resource.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.controller_path,
mock.sentinel.disk_address, mock.sentinel.mounted_path)
@mock.patch('time.sleep')
def test_get_mounted_disk_from_lun_failure(self, fake_sleep):
self.flags(mounted_disk_query_retry_count=1, group='hyperv')
with mock.patch.object(self._volume_driver._volutils,
'get_device_number_for_target') as m_device_num:
m_device_num.side_effect = [None, -1]
self.assertRaises(exception.NotFound,
self._volume_driver._get_mounted_disk_from_lun,
mock.sentinel.target_iqn,
mock.sentinel.target_lun)
@mock.patch.object(volumeops.ISCSIVolumeDriver, 'logout_storage_target')
def test_disconnect_volumes(self, mock_logout_storage_target):
block_device_info = db_fakes.get_fake_block_device_info(
FAKE_TARGET_PORTAL, FAKE_VOLUME_ID)
block_device_mapping = block_device_info['block_device_mapping']
fake_target_iqn = 'iqn.2010-10.org.openstack:volume-' + FAKE_VOLUME_ID
self._volume_driver.disconnect_volumes(block_device_mapping)
mock_logout_storage_target.assert_called_once_with(fake_target_iqn, 1)
class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V SMBFSVolumeDriver class."""
_FAKE_SHARE = '//1.2.3.4/fake_share'
_FAKE_SHARE_NORMALIZED = _FAKE_SHARE.replace('/', '\\')
_FAKE_DISK_NAME = 'fake_volume_name.vhdx'
_FAKE_USERNAME = 'fake_username'
_FAKE_PASSWORD = 'fake_password'
_FAKE_SMB_OPTIONS = '-o username=%s,password=%s' % (_FAKE_USERNAME,
_FAKE_PASSWORD)
_FAKE_CONNECTION_INFO = {'data': {'export': _FAKE_SHARE,
'name': _FAKE_DISK_NAME,
'options': _FAKE_SMB_OPTIONS}}
def setUp(self):
super(SMBFSVolumeDriverTestCase, self).setUp()
self._volume_driver = volumeops.SMBFSVolumeDriver()
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_parse_credentials')
@mock.patch.object(volumeops.SMBFSVolumeDriver, 'ensure_share_mounted')
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path')
@mock.patch.object(vmutils.VMUtils, 'get_vm_scsi_controller')
@mock.patch.object(vmutils.VMUtils, 'get_free_controller_slot')
@mock.patch.object(vmutils.VMUtils, 'attach_drive')
def _test_attach_volume(self, mock_attach_drive,
mock_get_free_controller_slot,
mock_get_vm_scsi_controller,
mock_get_disk_path,
mock_ensure_share_mounted,
mock_parse_credentials,
image_exists=True):
mock_parse_credentials.return_value = (
mock.sentinel.username, self._FAKE_PASSWORD)
mock_get_vm_scsi_controller.return_value = (
mock.sentinel.controller_path)
mock_get_free_controller_slot.return_value = (
mock.sentinel.controller_slot)
mock_get_disk_path.return_value = (
mock.sentinel.disk_path)
if image_exists:
self._volume_driver.attach_volume(
self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name)
mock_ensure_share_mounted.assert_called_with(
self._FAKE_CONNECTION_INFO)
mock_get_disk_path.assert_called_with(
self._FAKE_CONNECTION_INFO)
mock_get_vm_scsi_controller.assert_called_with(
mock.sentinel.instance_name)
mock_get_free_controller_slot.assert_called_with(
mock.sentinel.controller_path)
mock_attach_drive.assert_called_with(
mock.sentinel.instance_name, mock.sentinel.disk_path,
mock.sentinel.controller_path,
mock.sentinel.controller_slot)
else:
mock_attach_drive.side_effect = (
vmutils.HyperVException())
self.assertRaises(vmutils.HyperVException,
self._volume_driver.attach_volume,
self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name)
def test_attach_volume(self):
self._test_attach_volume()
def test_attach_non_existing_image(self):
self._test_attach_volume(image_exists=False)
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_get_disk_path')
@mock.patch.object(vmutils.VMUtils, 'detach_vm_disk')
@mock.patch.object(pathutils.PathUtils, 'unmount_smb_share')
def test_detach_volume(self, mock_unmount_smb_share, mock_detach_vm_disk,
mock_get_disk_path):
mock_get_disk_path.return_value = (
mock.sentinel.disk_path)
self._volume_driver.detach_volume(self._FAKE_CONNECTION_INFO,
mock.sentinel.instance_name)
mock_detach_vm_disk.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.disk_path,
is_physical=False)
def test_parse_credentials(self):
username, password = self._volume_driver._parse_credentials(
self._FAKE_SMB_OPTIONS)
self.assertEqual(self._FAKE_USERNAME, username)
self.assertEqual(self._FAKE_PASSWORD, password)
def test_get_disk_path(self):
expected = os.path.join(self._FAKE_SHARE_NORMALIZED,
self._FAKE_DISK_NAME)
disk_path = self._volume_driver._get_disk_path(
self._FAKE_CONNECTION_INFO)
self.assertEqual(expected, disk_path)
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_parse_credentials')
@mock.patch.object(pathutils.PathUtils, 'check_smb_mapping')
@mock.patch.object(pathutils.PathUtils, 'mount_smb_share')
def _test_ensure_mounted(self, mock_mount_smb_share,
mock_check_smb_mapping, mock_parse_credentials,
is_mounted=False):
mock_check_smb_mapping.return_value = is_mounted
mock_parse_credentials.return_value = (
self._FAKE_USERNAME, self._FAKE_PASSWORD)
self._volume_driver.ensure_share_mounted(
self._FAKE_CONNECTION_INFO)
if is_mounted:
self.assertFalse(
mock_mount_smb_share.called)
else:
mock_mount_smb_share.assert_called_once_with(
self._FAKE_SHARE_NORMALIZED,
username=self._FAKE_USERNAME,
password=self._FAKE_PASSWORD)
def test_ensure_mounted_new_share(self):
self._test_ensure_mounted()
def test_ensure_already_mounted(self):
self._test_ensure_mounted(is_mounted=True)
@mock.patch.object(pathutils.PathUtils, 'unmount_smb_share')
def test_disconnect_volumes(self, mock_unmount_smb_share):
block_device_mapping = [
{'connection_info': self._FAKE_CONNECTION_INFO}]
self._volume_driver.disconnect_volumes(block_device_mapping)
mock_unmount_smb_share.assert_called_once_with(
self._FAKE_SHARE_NORMALIZED)

View File

@@ -0,0 +1,164 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova.tests.unit.virt.hyperv import test_basevolumeutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutils
CONF = cfg.CONF
CONF.import_opt('volume_attach_retry_count', 'nova.virt.hyperv.volumeops',
'hyperv')
class VolumeUtilsTestCase(test_basevolumeutils.BaseVolumeUtilsTestCase):
"""Unit tests for the Hyper-V VolumeUtils class."""
_FAKE_PORTAL_ADDR = '10.1.1.1'
_FAKE_PORTAL_PORT = '3260'
_FAKE_LUN = 0
_FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
_FAKE_STDOUT_VALUE = 'The operation completed successfully'
def setUp(self):
super(VolumeUtilsTestCase, self).setUp()
self._volutils = volumeutils.VolumeUtils()
self._volutils._conn_wmi = mock.MagicMock()
self._volutils._conn_cimv2 = mock.MagicMock()
self.flags(volume_attach_retry_count=4, group='hyperv')
self.flags(volume_attach_retry_interval=0, group='hyperv')
def _test_login_target_portal(self, portal_connected):
fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
self._FAKE_PORTAL_PORT)
self._volutils.execute = mock.MagicMock()
if portal_connected:
exec_output = 'Address and Socket: %s %s' % (
self._FAKE_PORTAL_ADDR, self._FAKE_PORTAL_PORT)
else:
exec_output = ''
self._volutils.execute.return_value = exec_output
self._volutils._login_target_portal(fake_portal)
call_list = self._volutils.execute.call_args_list
all_call_args = [arg for call in call_list for arg in call[0]]
if portal_connected:
self.assertIn('RefreshTargetPortal', all_call_args)
else:
self.assertIn('AddTargetPortal', all_call_args)
def test_login_connected_portal(self):
self._test_login_target_portal(True)
def test_login_new_portal(self):
self._test_login_target_portal(False)
def _test_login_target(self, target_connected=False, raise_exception=False,
use_chap=False):
fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
self._FAKE_PORTAL_PORT)
self._volutils.execute = mock.MagicMock()
self._volutils._login_target_portal = mock.MagicMock()
if use_chap:
username, password = (mock.sentinel.username,
mock.sentinel.password)
else:
username, password = None, None
if target_connected:
self._volutils.execute.return_value = self._FAKE_TARGET
elif raise_exception:
self._volutils.execute.return_value = ''
else:
self._volutils.execute.side_effect = (
['', '', '', self._FAKE_TARGET, ''])
if raise_exception:
self.assertRaises(vmutils.HyperVException,
self._volutils.login_storage_target,
self._FAKE_LUN, self._FAKE_TARGET,
fake_portal, username, password)
else:
self._volutils.login_storage_target(self._FAKE_LUN,
self._FAKE_TARGET,
fake_portal,
username, password)
if target_connected:
call_list = self._volutils.execute.call_args_list
all_call_args = [arg for call in call_list for arg in call[0]]
self.assertNotIn('qlogintarget', all_call_args)
else:
self._volutils.execute.assert_any_call(
'iscsicli.exe', 'qlogintarget',
self._FAKE_TARGET, username, password)
def test_login_connected_target(self):
self._test_login_target(target_connected=True)
def test_login_disconncted_target(self):
self._test_login_target()
def test_login_target_exception(self):
self._test_login_target(raise_exception=True)
def test_login_target_using_chap(self):
self._test_login_target(use_chap=True)
def _test_execute_wrapper(self, raise_exception):
fake_cmd = ('iscsicli.exe', 'ListTargetPortals')
if raise_exception:
output = 'fake error'
else:
output = 'The operation completed successfully'
with mock.patch('nova.utils.execute') as fake_execute:
fake_execute.return_value = (output, None)
if raise_exception:
self.assertRaises(vmutils.HyperVException,
self._volutils.execute,
*fake_cmd)
else:
ret_val = self._volutils.execute(*fake_cmd)
self.assertEqual(output, ret_val)
def test_execute_raise_exception(self):
self._test_execute_wrapper(True)
def test_execute_exception(self):
self._test_execute_wrapper(False)
@mock.patch.object(volumeutils, 'utils')
def test_logout_storage_target(self, mock_utils):
mock_utils.execute.return_value = (self._FAKE_STDOUT_VALUE,
mock.sentinel.FAKE_STDERR_VALUE)
session = mock.MagicMock()
session.SessionId = mock.sentinel.FAKE_SESSION_ID
self._volutils._conn_wmi.query.return_value = [session]
self._volutils.logout_storage_target(mock.sentinel.FAKE_IQN)
mock_utils.execute.assert_called_once_with(
'iscsicli.exe', 'logouttarget', mock.sentinel.FAKE_SESSION_ID)

View File

@@ -0,0 +1,164 @@
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from nova import test
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutilsv2
CONF = cfg.CONF
CONF.import_opt('volume_attach_retry_count', 'nova.virt.hyperv.volumeops',
'hyperv')
class VolumeUtilsV2TestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V VolumeUtilsV2 class."""
_FAKE_PORTAL_ADDR = '10.1.1.1'
_FAKE_PORTAL_PORT = '3260'
_FAKE_LUN = 0
_FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
def setUp(self):
super(VolumeUtilsV2TestCase, self).setUp()
self._volutilsv2 = volumeutilsv2.VolumeUtilsV2()
self._volutilsv2._conn_storage = mock.MagicMock()
self._volutilsv2._conn_wmi = mock.MagicMock()
self.flags(volume_attach_retry_count=4, group='hyperv')
self.flags(volume_attach_retry_interval=0, group='hyperv')
def _test_login_target_portal(self, portal_connected):
fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
self._FAKE_PORTAL_PORT)
fake_portal_object = mock.MagicMock()
_query = self._volutilsv2._conn_storage.query
self._volutilsv2._conn_storage.MSFT_iSCSITargetPortal = (
fake_portal_object)
if portal_connected:
_query.return_value = [fake_portal_object]
else:
_query.return_value = None
self._volutilsv2._login_target_portal(fake_portal)
if portal_connected:
fake_portal_object.Update.assert_called_once_with()
else:
fake_portal_object.New.assert_called_once_with(
TargetPortalAddress=self._FAKE_PORTAL_ADDR,
TargetPortalPortNumber=self._FAKE_PORTAL_PORT)
def test_login_connected_portal(self):
self._test_login_target_portal(True)
def test_login_new_portal(self):
self._test_login_target_portal(False)
def _test_login_target(self, target_connected=False, raise_exception=False,
use_chap=False):
fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
self._FAKE_PORTAL_PORT)
fake_target_object = mock.MagicMock()
if target_connected:
fake_target_object.IsConnected = True
elif not raise_exception:
type(fake_target_object).IsConnected = mock.PropertyMock(
side_effect=[False, True])
else:
fake_target_object.IsConnected = False
_query = self._volutilsv2._conn_storage.query
_query.return_value = [fake_target_object]
self._volutilsv2._conn_storage.MSFT_iSCSITarget = (
fake_target_object)
if use_chap:
username, password = (mock.sentinel.username,
mock.sentinel.password)
auth = {
'AuthenticationType': self._volutilsv2._CHAP_AUTH_TYPE,
'ChapUsername': username,
'ChapSecret': password,
}
else:
username, password = None, None
auth = {}
if raise_exception:
self.assertRaises(vmutils.HyperVException,
self._volutilsv2.login_storage_target,
self._FAKE_LUN, self._FAKE_TARGET, fake_portal)
else:
self._volutilsv2.login_storage_target(self._FAKE_LUN,
self._FAKE_TARGET,
fake_portal,
username, password)
if target_connected:
fake_target_object.Update.assert_called_with()
else:
fake_target_object.Connect.assert_called_once_with(
IsPersistent=True, NodeAddress=self._FAKE_TARGET, **auth)
def test_login_connected_target(self):
self._test_login_target(target_connected=True)
def test_login_disconncted_target(self):
self._test_login_target()
def test_login_target_exception(self):
self._test_login_target(raise_exception=True)
def test_login_target_using_chap(self):
self._test_login_target(use_chap=True)
def test_logout_storage_target(self):
mock_msft_target = self._volutilsv2._conn_storage.MSFT_iSCSITarget
mock_msft_session = self._volutilsv2._conn_storage.MSFT_iSCSISession
mock_target = mock.MagicMock()
mock_target.IsConnected = True
mock_msft_target.return_value = [mock_target]
mock_session = mock.MagicMock()
mock_session.IsPersistent = True
mock_msft_session.return_value = [mock_session]
self._volutilsv2.logout_storage_target(self._FAKE_TARGET)
mock_msft_target.assert_called_once_with(NodeAddress=self._FAKE_TARGET)
mock_msft_session.assert_called_once_with(
TargetNodeAddress=self._FAKE_TARGET)
mock_session.Unregister.assert_called_once_with()
mock_target.Disconnect.assert_called_once_with()
@mock.patch.object(volumeutilsv2.VolumeUtilsV2, 'logout_storage_target')
def test_execute_log_out(self, mock_logout_target):
sess_class = self._volutilsv2._conn_wmi.MSiSCSIInitiator_SessionClass
mock_session = mock.MagicMock()
sess_class.return_value = [mock_session]
self._volutilsv2.execute_log_out(mock.sentinel.FAKE_SESSION_ID)
sess_class.assert_called_once_with(
SessionId=mock.sentinel.FAKE_SESSION_ID)
mock_logout_target.assert_called_once_with(mock_session.TargetName)