diff --git a/nova/conf/__init__.py b/nova/conf/__init__.py index c9843ed67f1c..b8b4d4906a51 100644 --- a/nova/conf/__init__.py +++ b/nova/conf/__init__.py @@ -65,7 +65,6 @@ from nova.conf import vmware from nova.conf import vnc from nova.conf import workarounds from nova.conf import wsgi -from nova.conf import xenserver from nova.conf import zvm CONF = cfg.CONF @@ -116,5 +115,4 @@ vmware.register_opts(CONF) vnc.register_opts(CONF) workarounds.register_opts(CONF) wsgi.register_opts(CONF) -xenserver.register_opts(CONF) zvm.register_opts(CONF) diff --git a/nova/conf/xenserver.py b/nova/conf/xenserver.py deleted file mode 100644 index 56e7e93b9cef..000000000000 --- a/nova/conf/xenserver.py +++ /dev/null @@ -1,549 +0,0 @@ -# Copyright 2016 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import socket - -from oslo_config import cfg -from oslo_utils import units - -xenserver_group = cfg.OptGroup('xenserver', - title='Xenserver Options', - help=""" -.. warning:: The xenapi driver is deprecated and may be removed in a future - release. The driver is not tested by the OpenStack project nor - does it have clear maintainer(s) and thus its quality can not be - ensured. If you are using the driver in production please let us - know in freenode IRC and/or the openstack-discuss mailing list. - -XenServer options are used when the compute_driver is set to use -XenServer (compute_driver=xenapi.XenAPIDriver). - -Must specify connection_url, connection_password and ovs_integration_bridge to -use compute_driver=xenapi.XenAPIDriver. -""") - -xenapi_agent_opts = [ - cfg.IntOpt('agent_timeout', - default=30, - min=0, - help=""" -Number of seconds to wait for agent's reply to a request. - -Nova configures/performs certain administrative actions on a server with the -help of an agent that's installed on the server. The communication between -Nova and the agent is achieved via sharing messages, called records, over -xenstore, a shared storage across all the domains on a Xenserver host. -Operations performed by the agent on behalf of nova are: 'version',' key_init', -'password','resetnetwork','inject_file', and 'agentupdate'. - -To perform one of the above operations, the xapi 'agent' plugin writes the -command and its associated parameters to a certain location known to the domain -and awaits response. On being notified of the message, the agent performs -appropriate actions on the server and writes the result back to xenstore. This -result is then read by the xapi 'agent' plugin to determine the success/failure -of the operation. - -This config option determines how long the xapi 'agent' plugin shall wait to -read the response off of xenstore for a given request/command. If the agent on -the instance fails to write the result in this time period, the operation is -considered to have timed out. - -Related options: - -* ``agent_version_timeout`` -* ``agent_resetnetwork_timeout`` - -"""), - cfg.IntOpt('agent_version_timeout', - default=300, - min=0, - help=""" -Number of seconds to wait for agent't reply to version request. - -This indicates the amount of time xapi 'agent' plugin waits for the agent to -respond to the 'version' request specifically. The generic timeout for agent -communication ``agent_timeout`` is ignored in this case. - -During the build process the 'version' request is used to determine if the -agent is available/operational to perform other requests such as -'resetnetwork', 'password', 'key_init' and 'inject_file'. If the 'version' call -fails, the other configuration is skipped. So, this configuration option can -also be interpreted as time in which agent is expected to be fully operational. -"""), - cfg.IntOpt('agent_resetnetwork_timeout', - default=60, - min=0, - help=""" -Number of seconds to wait for agent's reply to resetnetwork -request. - -This indicates the amount of time xapi 'agent' plugin waits for the agent to -respond to the 'resetnetwork' request specifically. The generic timeout for -agent communication ``agent_timeout`` is ignored in this case. -"""), - cfg.StrOpt('agent_path', - default='usr/sbin/xe-update-networking', - help=""" -Path to locate guest agent on the server. - -Specifies the path in which the XenAPI guest agent should be located. If the -agent is present, network configuration is not injected into the image. - -Related options: - -For this option to have an effect: -* ``flat_injected`` should be set to ``True`` -* ``compute_driver`` should be set to ``xenapi.XenAPIDriver`` - -"""), - cfg.BoolOpt('disable_agent', - default=False, - help=""" -Disables the use of XenAPI agent. - -This configuration option suggests whether the use of agent should be enabled -or not regardless of what image properties are present. Image properties have -an effect only when this is set to ``True``. Read description of config option -``use_agent_default`` for more information. - -Related options: - -* ``use_agent_default`` - -"""), - cfg.BoolOpt('use_agent_default', - default=False, - help=""" -Whether or not to use the agent by default when its usage is enabled but not -indicated by the image. - -The use of XenAPI agent can be disabled altogether using the configuration -option ``disable_agent``. However, if it is not disabled, the use of an agent -can still be controlled by the image in use through one of its properties, -``xenapi_use_agent``. If this property is either not present or specified -incorrectly on the image, the use of agent is determined by this configuration -option. - -Note that if this configuration is set to ``True`` when the agent is not -present, the boot times will increase significantly. - -Related options: - -* ``disable_agent`` - -"""), -] - - -xenapi_session_opts = [ - cfg.IntOpt('login_timeout', - default=10, - min=0, - help='Timeout in seconds for XenAPI login.'), - cfg.IntOpt('connection_concurrent', - default=5, - min=1, - help=""" -Maximum number of concurrent XenAPI connections. - -In nova, multiple XenAPI requests can happen at a time. -Configuring this option will parallelize access to the XenAPI -session, which allows you to make concurrent XenAPI connections. -"""), -] - - -xenapi_vm_utils_opts = [ - cfg.StrOpt('cache_images', - default='all', - choices=[ - ('all', 'Will cache all images'), - ('some', 'Will only cache images that have the image_property ' - '``cache_in_nova=True``'), - ('none', 'Turns off caching entirely')], - help=""" -Cache glance images locally. - -The value for this option must be chosen from the choices listed -here. Configuring a value other than these will default to 'all'. - -Note: There is nothing that deletes these images. -"""), - cfg.IntOpt('image_compression_level', - min=1, - max=9, - help=""" -Compression level for images. - -By setting this option we can configure the gzip compression level. -This option sets GZIP environment variable before spawning tar -cz -to force the compression level. It defaults to none, which means the -GZIP environment variable is not set and the default (usually -6) -is used. - -Possible values: - -* Range is 1-9, e.g., 9 for gzip -9, 9 being most - compressed but most CPU intensive on dom0. -* Any values out of this range will default to None. -"""), - cfg.StrOpt('default_os_type', - default='linux', - help='Default OS type used when uploading an image to glance'), - cfg.IntOpt('block_device_creation_timeout', - default=10, - min=1, - help='Time in secs to wait for a block device to be created'), - cfg.IntOpt('max_kernel_ramdisk_size', - default=16 * units.Mi, - help=""" -Maximum size in bytes of kernel or ramdisk images. - -Specifying the maximum size of kernel or ramdisk will avoid copying -large files to dom0 and fill up /boot/guest. -"""), - cfg.StrOpt('sr_matching_filter', - default='default-sr:true', - help=""" -Filter for finding the SR to be used to install guest instances on. - -Possible values: - -* To use the Local Storage in default XenServer/XCP installations - set this flag to other-config:i18n-key=local-storage. -* To select an SR with a different matching criteria, you could - set it to other-config:my_favorite_sr=true. -* To fall back on the Default SR, as displayed by XenCenter, - set this flag to: default-sr:true. -"""), - cfg.BoolOpt('sparse_copy', - default=True, - help=""" -Whether to use sparse_copy for copying data on a resize down. -(False will use standard dd). This speeds up resizes down -considerably since large runs of zeros won't have to be rsynced. -"""), - cfg.IntOpt('num_vbd_unplug_retries', - default=10, - min=0, - help=""" -Maximum number of retries to unplug VBD. -If set to 0, should try once, no retries. -"""), - cfg.StrOpt('ipxe_network_name', - help=""" -Name of network to use for booting iPXE ISOs. - -An iPXE ISO is a specially crafted ISO which supports iPXE booting. -This feature gives a means to roll your own image. - -By default this option is not set. Enable this option to -boot an iPXE ISO. - -Related Options: - -* `ipxe_boot_menu_url` -* `ipxe_mkisofs_cmd` -"""), - cfg.StrOpt('ipxe_boot_menu_url', - help=""" -URL to the iPXE boot menu. - -An iPXE ISO is a specially crafted ISO which supports iPXE booting. -This feature gives a means to roll your own image. - -By default this option is not set. Enable this option to -boot an iPXE ISO. - -Related Options: - -* `ipxe_network_name` -* `ipxe_mkisofs_cmd` -"""), - cfg.StrOpt('ipxe_mkisofs_cmd', - default='mkisofs', - help=""" -Name and optionally path of the tool used for ISO image creation. - -An iPXE ISO is a specially crafted ISO which supports iPXE booting. -This feature gives a means to roll your own image. - -Note: By default `mkisofs` is not present in the Dom0, so the -package can either be manually added to Dom0 or include the -`mkisofs` binary in the image itself. - -Related Options: - -* `ipxe_network_name` -* `ipxe_boot_menu_url` -"""), -] - - -xenapi_opts = [ - cfg.StrOpt('connection_url', - help=""" -URL for connection to XenServer/Xen Cloud Platform. A special value -of unix://local can be used to connect to the local unix socket. - -Possible values: - -* Any string that represents a URL. The connection_url is - generally the management network IP address of the XenServer. -* This option must be set if you chose the XenServer driver. -"""), - cfg.StrOpt('connection_username', - default='root', - help='Username for connection to XenServer/Xen Cloud Platform'), - cfg.StrOpt('connection_password', - secret=True, - help='Password for connection to XenServer/Xen Cloud Platform'), - cfg.FloatOpt('vhd_coalesce_poll_interval', - default=5.0, - min=0, - help=""" -The interval used for polling of coalescing vhds. - -This is the interval after which the task of coalesce VHD is -performed, until it reaches the max attempts that is set by -vhd_coalesce_max_attempts. - -Related options: - -* `vhd_coalesce_max_attempts` -"""), - cfg.BoolOpt('check_host', - default=True, - help=""" -Ensure compute service is running on host XenAPI connects to. -This option must be set to false if the 'independent_compute' -option is set to true. - -Possible values: - -* Setting this option to true will make sure that compute service - is running on the same host that is specified by connection_url. -* Setting this option to false, doesn't perform the check. - -Related options: - -* `independent_compute` -"""), - cfg.IntOpt('vhd_coalesce_max_attempts', - default=20, - min=0, - help=""" -Max number of times to poll for VHD to coalesce. - -This option determines the maximum number of attempts that can be -made for coalescing the VHD before giving up. - -Related opitons: - -* `vhd_coalesce_poll_interval` -"""), - cfg.StrOpt('sr_base_path', - default='/var/run/sr-mount', - help='Base path to the storage repository on the XenServer host.'), - cfg.HostAddressOpt('target_host', - help=""" -The iSCSI Target Host. - -This option represents the hostname or ip of the iSCSI Target. -If the target host is not present in the connection information from -the volume provider then the value from this option is taken. - -Possible values: - -* Any string that represents hostname/ip of Target. -"""), - cfg.PortOpt('target_port', - default=3260, - help=""" -The iSCSI Target Port. - -This option represents the port of the iSCSI Target. If the -target port is not present in the connection information from the -volume provider then the value from this option is taken. -"""), - cfg.BoolOpt('independent_compute', - default=False, - help=""" -Used to prevent attempts to attach VBDs locally, so Nova can -be run in a VM on a different host. - -Related options: - -* ``CONF.flat_injected`` (Must be False) -* ``CONF.xenserver.check_host`` (Must be False) -* ``CONF.default_ephemeral_format`` (Must be unset or 'ext3') -* Joining host aggregates (will error if attempted) -* Swap disks for Windows VMs (will error if attempted) -* Nova-based auto_configure_disk (will error if attempted) -""") -] - -xenapi_vmops_opts = [ - cfg.IntOpt('running_timeout', - default=60, - min=0, - help=""" -Wait time for instances to go to running state. - -Provide an integer value representing time in seconds to set the -wait time for an instance to go to running state. - -When a request to create an instance is received by nova-api and -communicated to nova-compute, the creation of the instance occurs -through interaction with Xen via XenAPI in the compute node. Once -the node on which the instance(s) are to be launched is decided by -nova-schedule and the launch is triggered, a certain amount of wait -time is involved until the instance(s) can become available and -'running'. This wait time is defined by running_timeout. If the -instances do not go to running state within this specified wait -time, the launch expires and the instance(s) are set to 'error' -state. -"""), - # TODO(dharinic): Make this, a stevedore plugin - cfg.StrOpt('image_upload_handler', - default='', - deprecated_for_removal=True, - deprecated_since='18.0.0', - deprecated_reason=""" -Instead of setting the class path here, we will use short names -to represent image handlers. The download and upload handlers -must also be matching. So another new option "image_handler" -will be used to set the short name for a specific image handler -for both image download and upload. -""", - help=""" -Dom0 plugin driver used to handle image uploads. - -Provide a string value representing a plugin driver required to -handle the image uploading to GlanceStore. - -Images, and snapshots from XenServer need to be uploaded to the data -store for use. image_upload_handler takes in a value for the Dom0 -plugin driver. This driver is then called to uplaod images to the -GlanceStore. -"""), - cfg.StrOpt('image_handler', - default='direct_vhd', - choices=[ - ('direct_vhd', 'This plugin directly processes the VHD files in ' - 'XenServer SR(Storage Repository). So this plugin only works ' - 'when the host\'s SR type is file system based e.g. ext, nfs.'), - ('vdi_local_dev', 'This plugin implements an image handler which ' - 'attaches the instance\'s VDI as a local disk to the VM where ' - 'the OpenStack Compute service runs. It uploads the raw disk ' - 'to glance when creating image; when booting an instance from a ' - 'glance image, it downloads the image and streams it into the ' - 'disk which is attached to the compute VM.'), - ('vdi_remote_stream', 'This plugin implements an image handler ' - 'which works as a proxy between glance and XenServer. The VHD ' - 'streams to XenServer via a remote import API supplied by XAPI ' - 'for image download; and for image upload, the VHD streams from ' - 'XenServer via a remote export API supplied by XAPI. This ' - 'plugin works for all SR types supported by XenServer.'), - ], - help=""" -The plugin used to handle image uploads and downloads. - -Provide a short name representing an image driver required to -handle the image between compute host and glance. -"""), -] - -xenapi_volume_utils_opts = [ - cfg.IntOpt('introduce_vdi_retry_wait', - default=20, - min=0, - help=""" -Number of seconds to wait for SR to settle if the VDI -does not exist when first introduced. - -Some SRs, particularly iSCSI connections are slow to see the VDIs -right after they got introduced. Setting this option to a -time interval will make the SR to wait for that time period -before raising VDI not found exception. -""") -] - -xenapi_ovs_integration_bridge_opts = [ - cfg.StrOpt('ovs_integration_bridge', - help=""" -The name of the integration Bridge that is used with xenapi -when connecting with Open vSwitch. - -Note: The value of this config option is dependent on the -environment, therefore this configuration value must be set -accordingly if you are using XenAPI. - -Possible values: - -* Any string that represents a bridge name. -"""), -] - -xenapi_pool_opts = [ - # TODO(macsz): This should be deprecated. Until providing solid reason, - # leaving it as-it-is. - cfg.BoolOpt('use_join_force', - default=True, - help=""" -When adding new host to a pool, this will append a --force flag to the -command, forcing hosts to join a pool, even if they have different CPUs. - -Since XenServer version 5.6 it is possible to create a pool of hosts that have -different CPU capabilities. To accommodate CPU differences, XenServer limited -features it uses to determine CPU compatibility to only the ones that are -exposed by CPU and support for CPU masking was added. -Despite this effort to level differences between CPUs, it is still possible -that adding new host will fail, thus option to force join was introduced. -"""), -] - -xenapi_console_opts = [ - cfg.StrOpt('console_public_hostname', - default=socket.gethostname(), - sample_default='', - deprecated_group='DEFAULT', - help=""" -Publicly visible name for this console host. - -Possible values: - -* Current hostname (default) or any string representing hostname. -"""), -] - -ALL_XENSERVER_OPTS = (xenapi_agent_opts + - xenapi_session_opts + - xenapi_vm_utils_opts + - xenapi_opts + - xenapi_vmops_opts + - xenapi_volume_utils_opts + - xenapi_ovs_integration_bridge_opts + - xenapi_pool_opts + - xenapi_console_opts) - - -def register_opts(conf): - conf.register_group(xenserver_group) - conf.register_opts(ALL_XENSERVER_OPTS, group=xenserver_group) - - -def list_opts(): - return {xenserver_group: ALL_XENSERVER_OPTS} diff --git a/nova/exception.py b/nova/exception.py index 48a7e01d8890..f58705d8c1de 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -713,19 +713,11 @@ class ImageDeleteConflict(NovaException): msg_fmt = _("Conflict deleting image. Reason: %(reason)s.") -class ImageHandlerUnsupported(NovaException): - msg_fmt = _("Error: unsupported image handler %(image_handler)s.") - - class PreserveEphemeralNotSupported(Invalid): msg_fmt = _("The current driver does not support " "preserving ephemeral partitions.") -class StorageRepositoryNotFound(NotFound): - msg_fmt = _("Cannot find SR to read/write VDI.") - - class InstanceMappingNotFound(NotFound): msg_fmt = _("Instance %(uuid)s has no mapping to a cell.") @@ -1193,10 +1185,6 @@ class BootFromVolumeRequiredForZeroDiskFlavor(Forbidden): "zero disk.") -class InsufficientFreeMemory(NovaException): - msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.") - - class NoValidHost(NovaException): msg_fmt = _("No valid host was found. %(reason)s") @@ -1255,6 +1243,7 @@ class PortLimitExceeded(QuotaError): msg_fmt = _("Maximum number of ports exceeded") +# TODO(stephenfin): Remove this XenAPI relic class AggregateError(NovaException): msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' " "caused an error: %(reason)s.") @@ -1458,19 +1447,6 @@ class ObjectActionError(NovaException): msg_fmt = _('Object action %(action)s failed because: %(reason)s') -class AgentError(NovaException): - msg_fmt = _('Error during following call to agent: %(method)s') - - -class AgentTimeout(AgentError): - msg_fmt = _('Unable to contact guest agent. ' - 'The following call timed out: %(method)s') - - -class AgentNotImplemented(AgentError): - msg_fmt = _('Agent does not support the call: %(method)s') - - class InstanceGroupNotFound(NotFound): msg_fmt = _("Instance group %(group_uuid)s could not be found.") diff --git a/nova/privsep/xenapi.py b/nova/privsep/xenapi.py deleted file mode 100644 index d1e470dff525..000000000000 --- a/nova/privsep/xenapi.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2018 Michael Still and Aptira -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -xenapi specific routines. -""" - -from oslo_concurrency import processutils - -import nova.privsep - - -@nova.privsep.sys_admin_pctxt.entrypoint -def xenstore_read(path): - return processutils.execute('xenstore-read', path) - - -@nova.privsep.sys_admin_pctxt.entrypoint -def block_copy(src_path, dst_path, block_size, num_blocks): - processutils.execute('dd', - 'if=%s' % src_path, - 'of=%s' % dst_path, - 'bs=%d' % block_size, - 'count=%d' % num_blocks, - 'iflag=direct,sync', - 'oflag=direct,sync') diff --git a/nova/tests/unit/compute/test_compute_xen.py b/nova/tests/unit/compute/test_compute_xen.py deleted file mode 100644 index 6fbab5e6eb81..000000000000 --- a/nova/tests/unit/compute/test_compute_xen.py +++ /dev/null @@ -1,71 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Tests for expectations of behaviour from the Xen driver.""" - -import mock - -from nova.compute import manager -from nova.compute import power_state -from nova import context -from nova import objects -from nova.objects import instance as instance_obj -from nova.tests.unit.compute import eventlet_utils -from nova.tests.unit import fake_instance -from nova.tests.unit.virt.xenapi import stubs -from nova.virt.xenapi import vm_utils - - -class ComputeXenTestCase(stubs.XenAPITestBaseNoDB): - def setUp(self): - super(ComputeXenTestCase, self).setUp() - self.flags(compute_driver='xenapi.XenAPIDriver') - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.compute = manager.ComputeManager() - # execute power syncing synchronously for testing: - self.compute._sync_power_pool = eventlet_utils.SyncPool() - - def test_sync_power_states_instance_not_found(self): - db_instance = fake_instance.fake_db_instance() - ctxt = context.get_admin_context() - instance_list = instance_obj._make_instance_list(ctxt, - objects.InstanceList(), [db_instance], None) - instance = instance_list[0] - - @mock.patch.object(vm_utils, 'lookup') - @mock.patch.object(objects.InstanceList, 'get_by_host') - @mock.patch.object(self.compute.driver, 'get_num_instances') - @mock.patch.object(self.compute, '_sync_instance_power_state') - def do_test(mock_compute_sync_powerstate, - mock_compute_get_num_instances, - mock_instance_list_get_by_host, - mock_vm_utils_lookup): - mock_instance_list_get_by_host.return_value = instance_list - mock_compute_get_num_instances.return_value = 1 - mock_vm_utils_lookup.return_value = None - - self.compute._sync_power_states(ctxt) - - mock_instance_list_get_by_host.assert_called_once_with( - ctxt, self.compute.host, expected_attrs=[], use_slave=True) - mock_compute_get_num_instances.assert_called_once_with() - mock_compute_sync_powerstate.assert_called_once_with( - ctxt, instance, power_state.NOSTATE, use_slave=True) - mock_vm_utils_lookup.assert_called_once_with( - self.compute.driver._session, instance['name'], - False) - - do_test() diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 1c28860fb7ed..ccb6699ead49 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -19727,17 +19727,15 @@ class LibvirtConnTestCase(test.NoDBTestCase, def test_unplug_vifs_ignores_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: - vif_driver.unplug.side_effect = exception.AgentError( - method='unplug') + vif_driver.unplug.side_effect = exception.InternalError('foo') drvr._unplug_vifs('inst', [1], ignore_errors=True) vif_driver.unplug.assert_called_once_with('inst', 1) def test_unplug_vifs_reports_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: - vif_driver.unplug.side_effect = exception.AgentError( - method='unplug') - self.assertRaises(exception.AgentError, + vif_driver.unplug.side_effect = exception.InternalError('foo') + self.assertRaises(exception.InternalError, drvr.unplug_vifs, 'inst', [1]) vif_driver.unplug.assert_called_once_with('inst', 1) diff --git a/nova/tests/unit/virt/xenapi/__init__.py b/nova/tests/unit/virt/xenapi/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/unit/virt/xenapi/image/__init__.py b/nova/tests/unit/virt/xenapi/image/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/unit/virt/xenapi/image/test_glance.py b/nova/tests/unit/virt/xenapi/image/test_glance.py deleted file mode 100644 index be49622e7332..000000000000 --- a/nova/tests/unit/virt/xenapi/image/test_glance.py +++ /dev/null @@ -1,334 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import random -import time - -import mock -from os_xenapi.client import exception as xenapi_exception -from os_xenapi.client import host_glance -from os_xenapi.client import XenAPI - -from nova.compute import utils as compute_utils -from nova import context -from nova import exception -from nova.image import glance as common_glance -from nova.tests.unit.virt.xenapi import stubs -from nova import utils -from nova.virt.xenapi import driver as xenapi_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi.image import glance -from nova.virt.xenapi import vm_utils - - -class TestGlanceStore(stubs.XenAPITestBaseNoDB): - def setUp(self): - super(TestGlanceStore, self).setUp() - self.store = glance.GlanceStore() - - self.flags(api_servers=['http://localhost:9292'], group='glance') - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - - self.context = context.RequestContext( - 'user', 'project', auth_token='foobar') - - fake.reset() - stubs.stubout_session(self, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - self.session = driver._session - - self.stub_out('nova.virt.xenapi.vm_utils.get_sr_path', - lambda *a, **kw: '/fake/sr/path') - - self.instance = {'uuid': 'blah', - 'system_metadata': [], - 'auto_disk_config': True, - 'os_type': 'default', - 'xenapi_use_agent': 'true'} - - def _get_params(self): - return {'image_id': 'fake_image_uuid', - 'endpoint': 'http://localhost:9292', - 'sr_path': '/fake/sr/path', - 'api_version': 2, - 'extra_headers': {'X-Auth-Token': 'foobar', - 'X-Roles': '', - 'X-Tenant-Id': 'project', - 'X-User-Id': 'user', - 'X-Identity-Status': 'Confirmed'}} - - def _get_download_params(self): - params = self._get_params() - params['uuid_stack'] = ['uuid1'] - return params - - @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1']) - def test_download_image(self, mock_make_uuid_stack): - params = self._get_download_params() - with mock.patch.object(self.session, 'call_plugin_serialized' - ) as mock_call_plugin: - self.store.download_image(self.context, self.session, - self.instance, 'fake_image_uuid') - - mock_call_plugin.assert_called_once_with('glance.py', - 'download_vhd2', - **params) - mock_make_uuid_stack.assert_called_once_with() - - @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1']) - @mock.patch.object(random, 'shuffle') - @mock.patch.object(time, 'sleep') - @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') - def test_download_image_retry(self, mock_fault, mock_sleep, - mock_shuffle, mock_make_uuid_stack): - params = self._get_download_params() - self.flags(num_retries=2, group='glance') - - params.pop("endpoint") - calls = [mock.call('glance.py', 'download_vhd2', - endpoint='http://10.0.1.1:9292', - **params), - mock.call('glance.py', 'download_vhd2', - endpoint='http://10.0.0.1:9293', - **params)] - - glance_api_servers = ['http://10.0.1.1:9292', - 'http://10.0.0.1:9293'] - self.flags(api_servers=glance_api_servers, group='glance') - - with (mock.patch.object(self.session, 'call_plugin_serialized') - ) as mock_call_plugin_serialized: - error_details = ["", "", "RetryableError", ""] - error = self.session.XenAPI.Failure(details=error_details) - mock_call_plugin_serialized.side_effect = [error, "success"] - - self.store.download_image(self.context, self.session, - self.instance, 'fake_image_uuid') - - mock_call_plugin_serialized.assert_has_calls(calls) - - self.assertEqual(1, mock_fault.call_count) - - def _get_upload_params(self, auto_disk_config=True, - expected_os_type='default'): - params = {} - params['vdi_uuids'] = ['fake_vdi_uuid'] - params['properties'] = {'auto_disk_config': auto_disk_config, - 'os_type': expected_os_type} - return params - - @mock.patch.object(utils, 'get_auto_disk_config_from_instance') - @mock.patch.object(common_glance, 'generate_identity_headers') - @mock.patch.object(vm_utils, 'get_sr_path') - @mock.patch.object(host_glance, 'upload_vhd') - def test_upload_image(self, mock_upload, mock_sr_path, mock_extra_header, - mock_disk_config): - params = self._get_upload_params() - mock_upload.return_value = 'fake_upload' - mock_sr_path.return_value = 'fake_sr_path' - mock_extra_header.return_value = 'fake_extra_header' - mock_disk_config.return_value = 'true' - self.store.upload_image(self.context, self.session, self.instance, - 'fake_image_uuid', ['fake_vdi_uuid']) - - mock_sr_path.assert_called_once_with(self.session) - mock_extra_header.assert_called_once_with(self.context) - mock_upload.assert_called_once_with( - self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid', - 'fake_sr_path', 'fake_extra_header', **params) - - @mock.patch.object(utils, 'get_auto_disk_config_from_instance') - @mock.patch.object(common_glance, 'generate_identity_headers') - @mock.patch.object(vm_utils, 'get_sr_path') - @mock.patch.object(host_glance, 'upload_vhd') - def test_upload_image_None_os_type(self, mock_upload, mock_sr_path, - mock_extra_header, mock_disk_config): - self.instance['os_type'] = None - mock_sr_path.return_value = 'fake_sr_path' - mock_extra_header.return_value = 'fake_extra_header' - mock_upload.return_value = 'fake_upload' - mock_disk_config.return_value = 'true' - params = self._get_upload_params(True, 'linux') - self.store.upload_image(self.context, self.session, self.instance, - 'fake_image_uuid', ['fake_vdi_uuid']) - - mock_sr_path.assert_called_once_with(self.session) - mock_extra_header.assert_called_once_with(self.context) - mock_upload.assert_called_once_with( - self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid', - 'fake_sr_path', 'fake_extra_header', **params) - mock_disk_config.assert_called_once_with(self.instance) - - @mock.patch.object(utils, 'get_auto_disk_config_from_instance') - @mock.patch.object(common_glance, 'generate_identity_headers') - @mock.patch.object(vm_utils, 'get_sr_path') - @mock.patch.object(host_glance, 'upload_vhd') - def test_upload_image_no_os_type(self, mock_upload, mock_sr_path, - mock_extra_header, mock_disk_config): - mock_sr_path.return_value = 'fake_sr_path' - mock_extra_header.return_value = 'fake_extra_header' - mock_upload.return_value = 'fake_upload' - del self.instance['os_type'] - params = self._get_upload_params(True, 'linux') - self.store.upload_image(self.context, self.session, self.instance, - 'fake_image_uuid', ['fake_vdi_uuid']) - - mock_sr_path.assert_called_once_with(self.session) - mock_extra_header.assert_called_once_with(self.context) - mock_upload.assert_called_once_with( - self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid', - 'fake_sr_path', 'fake_extra_header', **params) - mock_disk_config.assert_called_once_with(self.instance) - - @mock.patch.object(common_glance, 'generate_identity_headers') - @mock.patch.object(vm_utils, 'get_sr_path') - @mock.patch.object(host_glance, 'upload_vhd') - def test_upload_image_auto_config_disk_disabled( - self, mock_upload, mock_sr_path, mock_extra_header): - mock_sr_path.return_value = 'fake_sr_path' - mock_extra_header.return_value = 'fake_extra_header' - mock_upload.return_value = 'fake_upload' - sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}] - self.instance["system_metadata"] = sys_meta - params = self._get_upload_params("disabled") - self.store.upload_image(self.context, self.session, self.instance, - 'fake_image_uuid', ['fake_vdi_uuid']) - - mock_sr_path.assert_called_once_with(self.session) - mock_extra_header.assert_called_once_with(self.context) - mock_upload.assert_called_once_with( - self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid', - 'fake_sr_path', 'fake_extra_header', **params) - - @mock.patch.object(common_glance, 'generate_identity_headers') - @mock.patch.object(vm_utils, 'get_sr_path') - @mock.patch.object(host_glance, 'upload_vhd') - def test_upload_image_raises_exception(self, mock_upload, mock_sr_path, - mock_extra_header): - - mock_sr_path.return_value = 'fake_sr_path' - mock_extra_header.return_value = 'fake_extra_header' - mock_upload.side_effect = RuntimeError - params = self._get_upload_params() - self.assertRaises(RuntimeError, self.store.upload_image, - self.context, self.session, self.instance, - 'fake_image_uuid', ['fake_vdi_uuid']) - - mock_sr_path.assert_called_once_with(self.session) - mock_extra_header.assert_called_once_with(self.context) - mock_upload.assert_called_once_with( - self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid', - 'fake_sr_path', 'fake_extra_header', **params) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') - def test_upload_image_retries_then_raises_exception(self, - mock_add_inst, - mock_time_sleep): - self.flags(num_retries=2, group='glance') - params = self._get_params() - params.update(self._get_upload_params()) - - error_details = ["", "", "RetryableError", ""] - error = XenAPI.Failure(details=error_details) - - with mock.patch.object(self.session, 'call_plugin_serialized', - side_effect=error) as mock_call_plugin: - self.assertRaises(exception.CouldNotUploadImage, - self.store.upload_image, - self.context, self.session, self.instance, - 'fake_image_uuid', ['fake_vdi_uuid']) - - time_sleep_args = [mock.call(0.5), mock.call(1)] - call_plugin_args = [ - mock.call('glance.py', 'upload_vhd2', **params), - mock.call('glance.py', 'upload_vhd2', **params), - mock.call('glance.py', 'upload_vhd2', **params)] - add_inst_args = [ - mock.call(self.context, self.instance, error, - (XenAPI.Failure, error, mock.ANY)), - mock.call(self.context, self.instance, error, - (XenAPI.Failure, error, mock.ANY)), - mock.call(self.context, self.instance, error, - (XenAPI.Failure, error, mock.ANY))] - mock_time_sleep.assert_has_calls(time_sleep_args) - mock_call_plugin.assert_has_calls(call_plugin_args) - mock_add_inst.assert_has_calls(add_inst_args) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') - def test_upload_image_retries_on_signal_exception(self, - mock_add_inst, - mock_time_sleep): - self.flags(num_retries=2, group='glance') - params = self._get_params() - params.update(self._get_upload_params()) - - error_details = ["", "task signaled", "", ""] - error = XenAPI.Failure(details=error_details) - - # Note(johngarbutt) XenServer 6.1 and later has this error - error_details_v61 = ["", "signal: SIGTERM", "", ""] - error_v61 = self.session.XenAPI.Failure(details=error_details_v61) - - with mock.patch.object(self.session, 'call_plugin_serialized', - side_effect=[error, error_v61, None] - ) as mock_call_plugin: - self.store.upload_image(self.context, self.session, self.instance, - 'fake_image_uuid', ['fake_vdi_uuid']) - - time_sleep_args = [mock.call(0.5), mock.call(1)] - call_plugin_args = [ - mock.call('glance.py', 'upload_vhd2', **params), - mock.call('glance.py', 'upload_vhd2', **params), - mock.call('glance.py', 'upload_vhd2', **params)] - add_inst_args = [ - mock.call(self.context, self.instance, error, - (XenAPI.Failure, error, mock.ANY)), - mock.call(self.context, self.instance, error_v61, - (XenAPI.Failure, error_v61, mock.ANY))] - mock_time_sleep.assert_has_calls(time_sleep_args) - mock_call_plugin.assert_has_calls(call_plugin_args) - mock_add_inst.assert_has_calls(add_inst_args) - - @mock.patch.object(utils, 'get_auto_disk_config_from_instance') - @mock.patch.object(common_glance, 'generate_identity_headers') - @mock.patch.object(vm_utils, 'get_sr_path') - @mock.patch.object(host_glance, 'upload_vhd') - def test_upload_image_raises_exception_image_not_found(self, - mock_upload, - mock_sr_path, - mock_extra_header, - mock_disk_config): - params = self._get_upload_params() - mock_upload.return_value = 'fake_upload' - mock_sr_path.return_value = 'fake_sr_path' - mock_extra_header.return_value = 'fake_extra_header' - mock_disk_config.return_value = 'true' - image_id = 'fake_image_id' - mock_upload.side_effect = xenapi_exception.PluginImageNotFound( - image_id=image_id - ) - self.assertRaises(exception.ImageNotFound, self.store.upload_image, - self.context, self.session, self.instance, - 'fake_image_uuid', ['fake_vdi_uuid']) - - mock_sr_path.assert_called_once_with(self.session) - mock_extra_header.assert_called_once_with(self.context) - mock_upload.assert_called_once_with( - self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid', - 'fake_sr_path', 'fake_extra_header', **params) diff --git a/nova/tests/unit/virt/xenapi/image/test_utils.py b/nova/tests/unit/virt/xenapi/image/test_utils.py deleted file mode 100644 index 27839e868bc9..000000000000 --- a/nova/tests/unit/virt/xenapi/image/test_utils.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import tarfile - -import mock - -from nova import test -from nova.virt.xenapi.image import utils - - -@mock.patch.object(utils, 'IMAGE_API') -class GlanceImageTestCase(test.NoDBTestCase): - - def _get_image(self): - return utils.GlanceImage(mock.sentinel.context, - mock.sentinel.image_ref) - - def test_meta(self, mocked): - mocked.get.return_value = mock.sentinel.meta - - image = self._get_image() - self.assertEqual(mock.sentinel.meta, image.meta) - mocked.get.assert_called_once_with(mock.sentinel.context, - mock.sentinel.image_ref) - - def test_download_to(self, mocked): - mocked.download.return_value = None - - image = self._get_image() - result = image.download_to(mock.sentinel.fobj) - self.assertIsNone(result) - mocked.download.assert_called_once_with(mock.sentinel.context, - mock.sentinel.image_ref, - mock.sentinel.fobj) - - def test_is_raw_tgz_empty_meta(self, mocked): - mocked.get.return_value = {} - - image = self._get_image() - self.assertFalse(image.is_raw_tgz()) - - def test_is_raw_tgz_for_raw_tgz(self, mocked): - mocked.get.return_value = { - 'disk_format': 'raw', - 'container_format': 'tgz' - } - - image = self._get_image() - self.assertTrue(image.is_raw_tgz()) - - def test_data(self, mocked): - mocked.download.return_value = mock.sentinel.image - image = self._get_image() - - self.assertEqual(mock.sentinel.image, image.data()) - - -class RawImageTestCase(test.NoDBTestCase): - @mock.patch.object(utils, 'GlanceImage', spec_set=True, autospec=True) - def test_get_size(self, mock_glance_image): - mock_glance_image.meta = {'size': '123'} - raw_image = utils.RawImage(mock_glance_image) - - self.assertEqual(123, raw_image.get_size()) - - @mock.patch.object(utils, 'GlanceImage', spec_set=True, autospec=True) - def test_stream_to(self, mock_glance_image): - mock_glance_image.download_to.return_value = 'result' - raw_image = utils.RawImage(mock_glance_image) - - self.assertEqual('result', raw_image.stream_to('file')) - mock_glance_image.download_to.assert_called_once_with('file') - - -class TestIterableBasedFile(test.NoDBTestCase): - def test_constructor(self): - class FakeIterable(object): - def __iter__(_self): - return 'iterator' - - the_file = utils.IterableToFileAdapter(FakeIterable()) - - self.assertEqual('iterator', the_file.iterator) - - def test_read_one_character(self): - the_file = utils.IterableToFileAdapter([ - 'chunk1', 'chunk2' - ]) - - self.assertEqual('c', the_file.read(1)) - - def test_read_stores_remaining_characters(self): - the_file = utils.IterableToFileAdapter([ - 'chunk1', 'chunk2' - ]) - - the_file.read(1) - - self.assertEqual('hunk1', the_file.remaining_data) - - def test_read_remaining_characters(self): - the_file = utils.IterableToFileAdapter([ - 'chunk1', 'chunk2' - ]) - - self.assertEqual('c', the_file.read(1)) - self.assertEqual('h', the_file.read(1)) - - def test_read_reached_end_of_file(self): - the_file = utils.IterableToFileAdapter([ - 'chunk1', 'chunk2' - ]) - - self.assertEqual('chunk1', the_file.read(100)) - self.assertEqual('chunk2', the_file.read(100)) - self.assertEqual('', the_file.read(100)) - - def test_empty_chunks(self): - the_file = utils.IterableToFileAdapter([ - '', '', 'chunk2' - ]) - - self.assertEqual('chunk2', the_file.read(100)) - - -class RawTGZTestCase(test.NoDBTestCase): - @mock.patch.object(utils.RawTGZImage, '_as_file', return_value='the_file') - @mock.patch.object(utils.tarfile, 'open', return_value='tf') - def test_as_tarfile(self, mock_open, mock_as_file): - image = utils.RawTGZImage(None) - result = image._as_tarfile() - self.assertEqual('tf', result) - mock_as_file.assert_called_once_with() - mock_open.assert_called_once_with(mode='r|gz', fileobj='the_file') - - @mock.patch.object(utils, 'GlanceImage', spec_set=True, autospec=True) - @mock.patch.object(utils, 'IterableToFileAdapter', - return_value='data-as-file') - def test_as_file(self, mock_adapter, mock_glance_image): - mock_glance_image.data.return_value = 'iterable-data' - image = utils.RawTGZImage(mock_glance_image) - result = image._as_file() - self.assertEqual('data-as-file', result) - mock_glance_image.data.assert_called_once_with() - mock_adapter.assert_called_once_with('iterable-data') - - @mock.patch.object(tarfile, 'TarFile', spec_set=True, autospec=True) - @mock.patch.object(tarfile, 'TarInfo', autospec=True) - @mock.patch.object(utils.RawTGZImage, '_as_tarfile') - def test_get_size(self, mock_as_tar, mock_tar_info, mock_tar_file): - mock_tar_file.next.return_value = mock_tar_info - mock_tar_info.size = 124 - mock_as_tar.return_value = mock_tar_file - - image = utils.RawTGZImage(None) - result = image.get_size() - - self.assertEqual(124, result) - self.assertEqual(image._tar_info, mock_tar_info) - self.assertEqual(image._tar_file, mock_tar_file) - mock_as_tar.assert_called_once_with() - mock_tar_file.next.assert_called_once_with() - - @mock.patch.object(tarfile, 'TarFile', spec_set=True, autospec=True) - @mock.patch.object(tarfile, 'TarInfo', autospec=True) - @mock.patch.object(utils.RawTGZImage, '_as_tarfile') - def test_get_size_called_twice(self, mock_as_tar, mock_tar_info, - mock_tar_file): - mock_tar_file.next.return_value = mock_tar_info - mock_tar_info.size = 124 - mock_as_tar.return_value = mock_tar_file - - image = utils.RawTGZImage(None) - image.get_size() - result = image.get_size() - - self.assertEqual(124, result) - self.assertEqual(image._tar_info, mock_tar_info) - self.assertEqual(image._tar_file, mock_tar_file) - mock_as_tar.assert_called_once_with() - mock_tar_file.next.assert_called_once_with() - - @mock.patch.object(tarfile, 'TarFile', spec_set=True, autospec=True) - @mock.patch.object(tarfile, 'TarInfo', spec_set=True, autospec=True) - @mock.patch.object(utils.RawTGZImage, '_as_tarfile') - @mock.patch.object(utils.shutil, 'copyfileobj') - def test_stream_to_without_size_retrieved(self, mock_copyfile, - mock_as_tar, mock_tar_info, - mock_tar_file): - target_file = mock.create_autospec(open) - source_file = mock.create_autospec(open) - mock_tar_file.next.return_value = mock_tar_info - mock_tar_file.extractfile.return_value = source_file - mock_as_tar.return_value = mock_tar_file - - image = utils.RawTGZImage(None) - image._image_service_and_image_id = ('service', 'id') - image.stream_to(target_file) - - mock_as_tar.assert_called_once_with() - mock_tar_file.next.assert_called_once_with() - mock_tar_file.extractfile.assert_called_once_with(mock_tar_info) - mock_copyfile.assert_called_once_with( - source_file, target_file) - mock_tar_file.close.assert_called_once_with() - - @mock.patch.object(tarfile, 'TarFile', spec_set=True, autospec=True) - @mock.patch.object(tarfile, 'TarInfo', autospec=True) - @mock.patch.object(utils.RawTGZImage, '_as_tarfile') - @mock.patch.object(utils.shutil, 'copyfileobj') - def test_stream_to_with_size_retrieved(self, mock_copyfile, - mock_as_tar, mock_tar_info, - mock_tar_file): - target_file = mock.create_autospec(open) - source_file = mock.create_autospec(open) - mock_tar_info.size = 124 - mock_tar_file.next.return_value = mock_tar_info - mock_tar_file.extractfile.return_value = source_file - mock_as_tar.return_value = mock_tar_file - - image = utils.RawTGZImage(None) - image._image_service_and_image_id = ('service', 'id') - image.get_size() - image.stream_to(target_file) - - mock_as_tar.assert_called_once_with() - mock_tar_file.next.assert_called_once_with() - mock_tar_file.extractfile.assert_called_once_with(mock_tar_info) - mock_copyfile.assert_called_once_with( - source_file, target_file) - mock_tar_file.close.assert_called_once_with() diff --git a/nova/tests/unit/virt/xenapi/image/test_vdi_stream.py b/nova/tests/unit/virt/xenapi/image/test_vdi_stream.py deleted file mode 100644 index 79790148c538..000000000000 --- a/nova/tests/unit/virt/xenapi/image/test_vdi_stream.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2017 Citrix System -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from os_xenapi.client import exception as xenapi_except -from os_xenapi.client import image - -from nova import context -from nova import exception -from nova.image.glance import API as image_api -from nova.tests.unit.virt.xenapi import stubs -from nova.virt.xenapi.image import utils -from nova.virt.xenapi.image import vdi_stream -from nova.virt.xenapi import vm_utils - - -class TestVdiStreamStore(stubs.XenAPITestBaseNoDB): - def setUp(self): - super(TestVdiStreamStore, self).setUp() - self.store = vdi_stream.VdiStreamStore() - - self.flags(connection_url='test_url', - image_compression_level=5, - group='xenserver') - - self.session = mock.Mock() - self.context = context.RequestContext( - 'user', 'project', auth_token='foobar') - self.instance = {'uuid': 'e6ad57c9-115e-4b7d-a872-63cea0ac3cf2', - 'system_metadata': [], - 'auto_disk_config': True, - 'os_type': 'default', - 'xenapi_use_agent': 'true'} - - @mock.patch.object(image_api, 'download', - return_value='fake_data') - @mock.patch.object(utils, 'IterableToFileAdapter', - return_value='fake_stream') - @mock.patch.object(vm_utils, 'safe_find_sr', - return_value='fake_sr_ref') - @mock.patch.object(image, 'stream_to_vdis') - def test_download_image(self, stream_to, find_sr, to_file, download): - self.store.download_image(self.context, self.session, - self.instance, 'fake_image_uuid') - - download.assert_called_once_with(self.context, 'fake_image_uuid') - to_file.assert_called_once_with('fake_data') - find_sr.assert_called_once_with(self.session) - stream_to.assert_called_once_with(self.context, self.session, - self.instance, 'test_url', - 'fake_sr_ref', 'fake_stream') - - @mock.patch.object(image_api, 'download', - return_value='fake_data') - @mock.patch.object(utils, 'IterableToFileAdapter', - return_value='fake_stream') - @mock.patch.object(vm_utils, 'safe_find_sr', - return_value='fake_sr_ref') - @mock.patch.object(image, 'stream_to_vdis', - side_effect=xenapi_except.OsXenApiException) - def test_download_image_exception(self, stream_to, find_sr, to_file, - download): - self.assertRaises(exception.CouldNotFetchImage, - self.store.download_image, - self.context, self.session, - self.instance, 'fake_image_uuid') - - @mock.patch.object(vdi_stream.VdiStreamStore, '_get_metadata', - return_value='fake_meta_data') - @mock.patch.object(image, 'stream_from_vdis', - return_value='fake_data') - @mock.patch.object(utils, 'IterableToFileAdapter', - return_value='fake_stream') - @mock.patch.object(image_api, 'update') - def test_upload_image(self, update, to_file, to_stream, get): - fake_vdi_uuids = ['fake-vdi-uuid'] - self.store.upload_image(self.context, self.session, - self.instance, 'fake_image_uuid', - fake_vdi_uuids) - - get.assert_called_once_with(self.context, self.instance, - 'fake_image_uuid') - to_stream.assert_called_once_with(self.context, self.session, - self.instance, 'test_url', - fake_vdi_uuids, compresslevel=5) - to_file.assert_called_once_with('fake_data') - update.assert_called_once_with(self.context, 'fake_image_uuid', - 'fake_meta_data', data='fake_stream') - - @mock.patch.object(vdi_stream.VdiStreamStore, '_get_metadata') - @mock.patch.object(image, 'stream_from_vdis', - side_effect=xenapi_except.OsXenApiException) - @mock.patch.object(utils, 'IterableToFileAdapter', - return_value='fake_stream') - @mock.patch.object(image_api, 'update') - def test_upload_image_exception(self, update, to_file, to_stream, get): - fake_vdi_uuids = ['fake-vdi-uuid'] - self.assertRaises(exception.CouldNotUploadImage, - self.store.upload_image, - self.context, self.session, - self.instance, 'fake_image_uuid', - fake_vdi_uuids) - - @mock.patch.object(image_api, 'get', - return_value={}) - def test_get_metadata(self, image_get): - expect_metadata = {'disk_format': 'vhd', - 'container_format': 'ovf', - 'auto_disk_config': 'True', - 'os_type': 'default', - 'size': 0} - - result = self.store._get_metadata(self.context, self.instance, - 'fake_image_uuid') - - self.assertEqual(result, expect_metadata) - - @mock.patch.object(image_api, 'get', - return_value={}) - def test_get_metadata_disabled(self, image_get): - # Verify the metadata contains auto_disk_config=disabled, when - # image_auto_disk_config is ""Disabled". - self.instance['system_metadata'] = [ - {"key": "image_auto_disk_config", - "value": "Disabled"}] - - expect_metadata = {'disk_format': 'vhd', - 'container_format': 'ovf', - 'auto_disk_config': 'disabled', - 'os_type': 'default', - 'size': 0} - - result = self.store._get_metadata(self.context, self.instance, - 'fake_image_uuid') - - self.assertEqual(result, expect_metadata) diff --git a/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py deleted file mode 100644 index ae961f4d4822..000000000000 --- a/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import tarfile - -import eventlet -import mock -from os_xenapi.client import session as xenapi_session -import six - -from nova.image import glance -from nova import test -from nova.virt.xenapi.image import vdi_through_dev - - -@contextlib.contextmanager -def fake_context(result=None): - yield result - - -class TestDelegatingToCommand(test.NoDBTestCase): - def test_upload_image_is_delegated_to_command(self): - command = mock.create_autospec(vdi_through_dev.UploadToGlanceAsRawTgz, - spec_set=True) - command.upload_image.return_value = 'result' - - with mock.patch.object(vdi_through_dev, 'UploadToGlanceAsRawTgz', - return_value=command) as mock_upload: - store = vdi_through_dev.VdiThroughDevStore() - result = store.upload_image( - 'ctx', 'session', 'instance', 'image_id', 'vdis') - - self.assertEqual('result', result) - mock_upload.assert_called_once_with( - 'ctx', 'session', 'instance', 'image_id', 'vdis') - command.upload_image.assert_called_once_with() - - -class TestUploadToGlanceAsRawTgz(test.NoDBTestCase): - @mock.patch.object(vdi_through_dev.vm_utils, 'vdi_attached') - @mock.patch.object(vdi_through_dev.utils, 'make_dev_path') - @mock.patch.object(vdi_through_dev.utils, 'temporary_chown') - def test_upload_image(self, mock_vdi_temp_chown, - mock_vdi_make_dev_path, mock_vdi_attached): - mock_vdi_attached.return_value = fake_context('dev') - mock_vdi_make_dev_path.return_value = 'devpath' - mock_vdi_temp_chown.return_value = fake_context() - - store = vdi_through_dev.UploadToGlanceAsRawTgz( - 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) - - with test.nested( - mock.patch.object(store, '_perform_upload'), - mock.patch.object(store, '_get_vdi_ref', - return_value='vdi_ref'), - ) as (mock_upload, mock_get_vdi): - - store.upload_image() - - mock_get_vdi.assert_called_once_with() - mock_upload.assert_called_once_with('devpath') - mock_vdi_attached.assert_called_once_with( - 'session', 'vdi_ref', read_only=True) - mock_vdi_make_dev_path.assert_called_once_with('dev') - mock_vdi_temp_chown.assert_called_once_with('devpath') - - def test__perform_upload(self): - producer = mock.create_autospec(vdi_through_dev.TarGzProducer, - spec_set=True) - consumer = mock.create_autospec(glance.UpdateGlanceImage, - spec_set=True) - pool = mock.create_autospec(eventlet.GreenPool, - spec_set=True) - store = vdi_through_dev.UploadToGlanceAsRawTgz( - 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) - - with test.nested( - mock.patch.object(store, '_create_pipe', - return_value=('readfile', 'writefile')), - mock.patch.object(store, '_get_virtual_size', - return_value='324'), - mock.patch.object(glance, 'UpdateGlanceImage', - return_value=consumer), - mock.patch.object(vdi_through_dev, 'TarGzProducer', - return_value=producer), - mock.patch.object(vdi_through_dev.eventlet, 'GreenPool', - return_value=pool) - ) as (mock_create_pipe, mock_virtual_size, - mock_upload, mock_TarGzProducer, mock_greenpool): - producer.get_metadata.return_value = "metadata" - - store._perform_upload('devpath') - - producer.get_metadata.assert_called_once_with() - mock_virtual_size.assert_called_once_with() - mock_create_pipe.assert_called_once_with() - mock_TarGzProducer.assert_called_once_with( - 'devpath', 'writefile', '324', 'disk.raw') - mock_upload.assert_called_once_with( - 'context', 'id', 'metadata', 'readfile') - mock_greenpool.assert_called_once_with() - pool.spawn.assert_has_calls([mock.call(producer.start), - mock.call(consumer.start)]) - pool.waitall.assert_called_once_with() - - def test__get_vdi_ref(self): - session = mock.create_autospec(xenapi_session.XenAPISession, - spec_set=True) - store = vdi_through_dev.UploadToGlanceAsRawTgz( - 'context', session, 'instance', 'id', ['vdi0', 'vdi1']) - session.call_xenapi.return_value = 'vdi_ref' - - self.assertEqual('vdi_ref', store._get_vdi_ref()) - session.call_xenapi.assert_called_once_with( - 'VDI.get_by_uuid', 'vdi0') - - def test__get_virtual_size(self): - session = mock.create_autospec(xenapi_session.XenAPISession, - spec_set=True) - store = vdi_through_dev.UploadToGlanceAsRawTgz( - 'context', session, 'instance', 'id', ['vdi0', 'vdi1']) - - with mock.patch.object(store, '_get_vdi_ref', - return_value='vdi_ref') as mock_get_vdi: - store._get_virtual_size() - - mock_get_vdi.assert_called_once_with() - session.call_xenapi.assert_called_once_with( - 'VDI.get_virtual_size', 'vdi_ref') - - @mock.patch.object(vdi_through_dev.os, 'pipe') - @mock.patch.object(vdi_through_dev.greenio, 'GreenPipe') - def test__create_pipe(self, mock_vdi_greenpipe, mock_vdi_os_pipe): - store = vdi_through_dev.UploadToGlanceAsRawTgz( - 'context', 'session', 'instance', 'id', ['vdi0', 'vdi1']) - - mock_vdi_os_pipe.return_value = ('rpipe', 'wpipe') - mock_vdi_greenpipe.side_effect = ['rfile', 'wfile'] - - result = store._create_pipe() - self.assertEqual(('rfile', 'wfile'), result) - mock_vdi_os_pipe.assert_called_once_with() - mock_vdi_greenpipe.assert_has_calls( - [mock.call('rpipe', 'rb', 0), - mock.call('wpipe', 'wb', 0)]) - - -class TestTarGzProducer(test.NoDBTestCase): - def test_constructor(self): - producer = vdi_through_dev.TarGzProducer('devpath', 'writefile', - '100', 'fname') - - self.assertEqual('devpath', producer.fpath) - self.assertEqual('writefile', producer.output) - self.assertEqual('100', producer.size) - self.assertEqual('writefile', producer.output) - - @mock.patch.object(vdi_through_dev.tarfile, 'TarInfo') - @mock.patch.object(vdi_through_dev.tarfile, 'open') - def test_start(self, mock_tar_open, mock_tar_TarInfo): - outf = six.StringIO() - producer = vdi_through_dev.TarGzProducer('fpath', outf, - '100', 'fname') - - tfile = mock.create_autospec(tarfile.TarFile, spec_set=True) - tinfo = mock.create_autospec(tarfile.TarInfo) - - inf = mock.create_autospec(open, spec_set=True) - - mock_tar_open.return_value = fake_context(tfile) - mock_tar_TarInfo.return_value = tinfo - - with mock.patch.object(producer, '_open_file', - return_value=fake_context(inf) - ) as mock_open_file: - producer.start() - - self.assertEqual(100, tinfo.size) - mock_tar_TarInfo.assert_called_once_with(name='fname') - mock_tar_open.assert_called_once_with(fileobj=outf, mode='w|gz') - mock_open_file.assert_called_once_with('fpath', 'rb') - tfile.addfile.assert_called_once_with(tinfo, fileobj=inf) - - def test_get_metadata(self): - producer = vdi_through_dev.TarGzProducer('devpath', 'writefile', - '100', 'fname') - - self.assertEqual({ - 'disk_format': 'raw', - 'container_format': 'tgz'}, - producer.get_metadata()) diff --git a/nova/tests/unit/virt/xenapi/stubs.py b/nova/tests/unit/virt/xenapi/stubs.py deleted file mode 100644 index 2d0d985e36d2..000000000000 --- a/nova/tests/unit/virt/xenapi/stubs.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Stubouts, mocks and fixtures for the test suite.""" - -import pickle -import random -import sys - -import fixtures -import mock -from os_xenapi.client import session -from os_xenapi.client import XenAPI - -from nova import test -from nova.virt.xenapi import fake - - -def stubout_session(test, cls, product_version=(5, 6, 2), - product_brand='XenServer', platform_version=(1, 9, 0), - **opt_args): - """Stubs out methods from XenAPISession.""" - test.stub_out('os_xenapi.client.session.XenAPISession._create_session', - lambda s, url: cls(url, **opt_args)) - test.stub_out('os_xenapi.client.session.XenAPISession.' - '_get_product_version_and_brand', - lambda s: (product_version, product_brand)) - test.stub_out('os_xenapi.client.session.XenAPISession.' - '_get_platform_version', - lambda s: platform_version) - - -def _make_fake_vdi(): - sr_ref = fake.get_all('SR')[0] - vdi_ref = fake.create_vdi('', sr_ref) - vdi_rec = fake.get_record('VDI', vdi_ref) - return vdi_rec['uuid'] - - -class FakeSessionForVMTests(fake.SessionBase): - """Stubs out a XenAPISession for VM tests.""" - - def host_call_plugin(self, _1, _2, plugin, method, _5): - plugin = plugin.rstrip('.py') - - if plugin == 'glance' and method == 'download_vhd2': - root_uuid = _make_fake_vdi() - return pickle.dumps(dict(root=dict(uuid=root_uuid))) - else: - return (super(FakeSessionForVMTests, self). - host_call_plugin(_1, _2, plugin, method, _5)) - - def VM_start(self, _1, ref, _2, _3): - vm = fake.get_record('VM', ref) - if vm['power_state'] != 'Halted': - raise XenAPI.Failure(['VM_BAD_POWER_STATE', ref, 'Halted', - vm['power_state']]) - vm['power_state'] = 'Running' - vm['is_a_template'] = False - vm['is_control_domain'] = False - vm['domid'] = random.randrange(1, 1 << 16) - return vm - - def VM_start_on(self, _1, vm_ref, host_ref, _2, _3): - vm_rec = self.VM_start(_1, vm_ref, _2, _3) - vm_rec['resident_on'] = host_ref - - def VDI_snapshot(self, session_ref, vm_ref, _1): - sr_ref = "fakesr" - return fake.create_vdi('fakelabel', sr_ref, read_only=True) - - def SR_scan(self, session_ref, sr_ref): - pass - - -class ReplaceModule(fixtures.Fixture): - """Replace a module with a fake module.""" - - def __init__(self, name, new_value): - self.name = name - self.new_value = new_value - - def _restore(self, old_value): - sys.modules[self.name] = old_value - - def setUp(self): - super(ReplaceModule, self).setUp() - old_value = sys.modules.get(self.name) - sys.modules[self.name] = self.new_value - self.addCleanup(self._restore, old_value) - - -class FakeSessionForVolumeTests(fake.SessionBase): - """Stubs out a XenAPISession for Volume tests.""" - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - valid_vdi = False - refs = fake.get_all('VDI') - for ref in refs: - rec = fake.get_record('VDI', ref) - if rec['uuid'] == uuid: - valid_vdi = True - if not valid_vdi: - raise XenAPI.Failure([['INVALID_VDI', 'session', self._session]]) - - -class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests): - """Stubs out a XenAPISession for Volume tests: it injects failures.""" - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, - _6, _7, _8, _9, _10, _11): - # This is for testing failure - raise XenAPI.Failure([['INVALID_VDI', 'session', self._session]]) - - def PBD_unplug(self, _1, ref): - rec = fake.get_record('PBD', ref) - rec['currently-attached'] = False - - def SR_forget(self, _1, ref): - pass - - -class FakeSessionForFailedMigrateTests(FakeSessionForVMTests): - def VM_assert_can_migrate(self, session, vmref, migrate_data, - live, vdi_map, vif_map, options): - raise XenAPI.Failure("XenAPI VM.assert_can_migrate failed") - - def host_migrate_receive(self, session, hostref, networkref, options): - raise XenAPI.Failure("XenAPI host.migrate_receive failed") - - def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map, - vif_map, options): - raise XenAPI.Failure("XenAPI VM.migrate_send failed") - - -# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted -# over to use XenAPITestBaseNoDB -class XenAPITestBase(test.TestCase): - def setUp(self): - super(XenAPITestBase, self).setUp() - self.useFixture(ReplaceModule('XenAPI', fake)) - fake.reset() - - def stubout_get_this_vm_uuid(self): - def f(session): - vms = [rec['uuid'] for rec - in fake.get_all_records('VM').values() - if rec['is_control_domain']] - return vms[0] - self.stub_out('nova.virt.xenapi.vm_utils.get_this_vm_uuid', f) - - -class XenAPITestBaseNoDB(test.NoDBTestCase): - def setUp(self): - super(XenAPITestBaseNoDB, self).setUp() - self.useFixture(ReplaceModule('XenAPI', fake)) - fake.reset() - - @staticmethod - def get_fake_session(error=None): - fake_session = mock.MagicMock() - session.apply_session_helpers(fake_session) - - if error is not None: - class FakeException(Exception): - details = [error, "a", "b", "c"] - - fake_session.XenAPI.Failure = FakeException - fake_session.call_xenapi.side_effect = FakeException - - return fake_session diff --git a/nova/tests/unit/virt/xenapi/test_agent.py b/nova/tests/unit/virt/xenapi/test_agent.py deleted file mode 100644 index 2848fc288213..000000000000 --- a/nova/tests/unit/virt/xenapi/test_agent.py +++ /dev/null @@ -1,471 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import time - -import mock -from os_xenapi.client import host_agent -from os_xenapi.client import XenAPI -from oslo_concurrency import processutils -from oslo_utils import uuidutils - -from nova import exception -from nova import test -from nova.virt.xenapi import agent - - -def _get_fake_instance(**kwargs): - system_metadata = [] - for k, v in kwargs.items(): - system_metadata.append({ - "key": k, - "value": v - }) - - return { - "system_metadata": system_metadata, - "uuid": "uuid", - "key_data": "ssh-rsa asdf", - "os_type": "asdf", - } - - -class AgentTestCaseBase(test.NoDBTestCase): - def _create_agent(self, instance, session="session"): - self.session = session - self.virtapi = "virtapi" - self.vm_ref = "vm_ref" - return agent.XenAPIBasedAgent(self.session, self.virtapi, - instance, self.vm_ref) - - -class AgentImageFlagsTestCase(AgentTestCaseBase): - def test_agent_is_present(self): - self.flags(use_agent_default=False, group='xenserver') - instance = {"system_metadata": - [{"key": "image_xenapi_use_agent", "value": "true"}]} - self.assertTrue(agent.should_use_agent(instance)) - - def test_agent_is_disabled(self): - self.flags(use_agent_default=True, group='xenserver') - instance = {"system_metadata": - [{"key": "image_xenapi_use_agent", "value": "false"}]} - self.assertFalse(agent.should_use_agent(instance)) - - def test_agent_uses_deafault_when_prop_invalid(self): - self.flags(use_agent_default=True, group='xenserver') - instance = {"system_metadata": - [{"key": "image_xenapi_use_agent", "value": "bob"}], - "uuid": "uuid"} - self.assertTrue(agent.should_use_agent(instance)) - - def test_agent_default_not_present(self): - self.flags(use_agent_default=False, group='xenserver') - instance = {"system_metadata": []} - self.assertFalse(agent.should_use_agent(instance)) - - def test_agent_default_present(self): - self.flags(use_agent_default=True, group='xenserver') - instance = {"system_metadata": []} - self.assertTrue(agent.should_use_agent(instance)) - - -class SysMetaKeyTestBase(object): - key = None - - def _create_agent_with_value(self, value): - kwargs = {self.key: value} - instance = _get_fake_instance(**kwargs) - return self._create_agent(instance) - - def test_get_sys_meta_key_true(self): - agent = self._create_agent_with_value("true") - self.assertTrue(agent._get_sys_meta_key(self.key)) - - def test_get_sys_meta_key_false(self): - agent = self._create_agent_with_value("False") - self.assertFalse(agent._get_sys_meta_key(self.key)) - - def test_get_sys_meta_key_invalid_is_false(self): - agent = self._create_agent_with_value("invalid") - self.assertFalse(agent._get_sys_meta_key(self.key)) - - def test_get_sys_meta_key_missing_is_false(self): - instance = _get_fake_instance() - agent = self._create_agent(instance) - self.assertFalse(agent._get_sys_meta_key(self.key)) - - -class SkipSshFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase): - key = "image_xenapi_skip_agent_inject_ssh" - - def test_skip_ssh_key_inject(self): - agent = self._create_agent_with_value("True") - self.assertTrue(agent._skip_ssh_key_inject()) - - -class SkipFileInjectAtBootFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase): - key = "image_xenapi_skip_agent_inject_files_at_boot" - - def test_skip_inject_files_at_boot(self): - agent = self._create_agent_with_value("True") - self.assertTrue(agent._skip_inject_files_at_boot()) - - -class InjectSshTestCase(AgentTestCaseBase): - @mock.patch.object(agent.XenAPIBasedAgent, 'inject_file') - def test_inject_ssh_key_succeeds(self, mock_inject_file): - instance = _get_fake_instance() - agent = self._create_agent(instance) - - agent.inject_ssh_key() - mock_inject_file.assert_called_once_with("/root/.ssh/authorized_keys", - "\n# The following ssh key " - "was injected by Nova" - "\nssh-rsa asdf\n") - - @mock.patch.object(agent.XenAPIBasedAgent, 'inject_file') - def _test_inject_ssh_key_skipped(self, instance, mock_inject_file): - agent = self._create_agent(instance) - - # make sure its not called - agent.inject_ssh_key() - mock_inject_file.assert_not_called() - - def test_inject_ssh_key_skipped_no_key_data(self): - instance = _get_fake_instance() - instance["key_data"] = None - self._test_inject_ssh_key_skipped(instance) - - def test_inject_ssh_key_skipped_windows(self): - instance = _get_fake_instance() - instance["os_type"] = "windows" - self._test_inject_ssh_key_skipped(instance) - - def test_inject_ssh_key_skipped_cloud_init_present(self): - instance = _get_fake_instance( - image_xenapi_skip_agent_inject_ssh="True") - self._test_inject_ssh_key_skipped(instance) - - -class FileInjectionTestCase(AgentTestCaseBase): - @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent') - def test_inject_file(self, mock_call_agent): - instance = _get_fake_instance() - agent = self._create_agent(instance) - - b64_path = base64.b64encode(b'path') - b64_contents = base64.b64encode(b'contents') - - agent.inject_file("path", "contents") - mock_call_agent.assert_called_once_with(host_agent.inject_file, - {'b64_contents': b64_contents, - 'b64_path': b64_path}) - - @mock.patch.object(agent.XenAPIBasedAgent, 'inject_file') - def test_inject_files(self, mock_inject_file): - instance = _get_fake_instance() - agent = self._create_agent(instance) - - files = [("path1", "content1"), ("path2", "content2")] - - agent.inject_files(files) - mock_inject_file.assert_has_calls( - [mock.call("path1", "content1"), mock.call("path2", "content2")]) - - @mock.patch.object(agent.XenAPIBasedAgent, 'inject_file') - def test_inject_files_skipped_when_cloud_init_installed(self, - mock_inject_file): - instance = _get_fake_instance( - image_xenapi_skip_agent_inject_files_at_boot="True") - agent = self._create_agent(instance) - - files = [("path1", "content1"), ("path2", "content2")] - - agent.inject_files(files) - mock_inject_file.assert_not_called() - - -class RebootRetryTestCase(AgentTestCaseBase): - @mock.patch.object(agent, '_wait_for_new_dom_id') - def test_retry_on_reboot(self, mock_wait): - mock_session = mock.Mock() - mock_session.VM.get_domid.return_value = "fake_dom_id" - agent = self._create_agent(None, mock_session) - mock_method = mock.Mock().method() - mock_method.side_effect = [XenAPI.Failure(["REBOOT: fake"]), - {"returncode": '0', "message": "done"}] - result = agent._call_agent(mock_method) - self.assertEqual("done", result) - self.assertTrue(mock_session.VM.get_domid.called) - self.assertEqual(2, mock_method.call_count) - mock_wait.assert_called_once_with(mock_session, self.vm_ref, - "fake_dom_id", mock_method) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(time, 'time') - def test_wait_for_new_dom_id_found(self, mock_time, mock_sleep): - mock_session = mock.Mock() - mock_session.VM.get_domid.return_value = "new" - - agent._wait_for_new_dom_id(mock_session, "vm_ref", "old", "method") - - mock_session.VM.get_domid.assert_called_once_with("vm_ref") - self.assertFalse(mock_sleep.called) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(time, 'time') - def test_wait_for_new_dom_id_after_retry(self, mock_time, mock_sleep): - self.flags(agent_timeout=3, group="xenserver") - mock_time.return_value = 0 - mock_session = mock.Mock() - old = "40" - new = "42" - mock_session.VM.get_domid.side_effect = [old, "-1", new] - - agent._wait_for_new_dom_id(mock_session, "vm_ref", old, "method") - - mock_session.VM.get_domid.assert_called_with("vm_ref") - self.assertEqual(3, mock_session.VM.get_domid.call_count) - self.assertEqual(2, mock_sleep.call_count) - - @mock.patch.object(time, 'sleep') - @mock.patch.object(time, 'time') - def test_wait_for_new_dom_id_timeout(self, mock_time, mock_sleep): - self.flags(agent_timeout=3, group="xenserver") - - def fake_time(): - fake_time.time = fake_time.time + 1 - return fake_time.time - - fake_time.time = 0 - mock_time.side_effect = fake_time - mock_session = mock.Mock() - mock_session.VM.get_domid.return_value = "old" - mock_method = mock.Mock().method() - mock_method.__name__ = "mock_method" - - self.assertRaises(exception.AgentTimeout, - agent._wait_for_new_dom_id, - mock_session, "vm_ref", "old", mock_method) - - self.assertEqual(4, mock_session.VM.get_domid.call_count) - - -class SetAdminPasswordTestCase(AgentTestCaseBase): - @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent') - @mock.patch("nova.virt.xenapi.agent.SimpleDH") - def test_exchange_key_with_agent(self, mock_simple_dh, mock_call_agent): - agent = self._create_agent(None) - instance_mock = mock_simple_dh() - instance_mock.get_public.return_value = 4321 - mock_call_agent.return_value = "1234" - - result = agent._exchange_key_with_agent() - - mock_call_agent.assert_called_once_with(host_agent.key_init, - {"pub": "4321"}, - success_codes=['D0'], - ignore_errors=False) - result.compute_shared.assert_called_once_with(1234) - - @mock.patch.object(agent.XenAPIBasedAgent, '_call_agent') - @mock.patch.object(agent.XenAPIBasedAgent, - '_save_instance_password_if_sshkey_present') - @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent') - def test_set_admin_password_works(self, mock_exchange, mock_save, - mock_call_agent): - mock_dh = mock.Mock(spec_set=agent.SimpleDH) - mock_dh.encrypt.return_value = "enc_pass" - mock_exchange.return_value = mock_dh - agent_inst = self._create_agent(None) - - agent_inst.set_admin_password("new_pass") - - mock_dh.encrypt.assert_called_once_with("new_pass\n") - mock_call_agent.assert_called_once_with(host_agent.password, - {'enc_pass': 'enc_pass'}) - mock_save.assert_called_once_with("new_pass") - - @mock.patch.object(agent.XenAPIBasedAgent, '_add_instance_fault') - @mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent') - def test_set_admin_password_silently_fails(self, mock_exchange, - mock_add_fault): - error = exception.AgentTimeout(method="fake") - mock_exchange.side_effect = error - agent_inst = self._create_agent(None) - - agent_inst.set_admin_password("new_pass") - - mock_add_fault.assert_called_once_with(error, mock.ANY) - - @mock.patch('oslo_concurrency.processutils.execute') - def test_run_ssl_successful(self, mock_execute): - mock_execute.return_value = ('0', - '*** WARNING : deprecated key derivation used.' - 'Using -iter or -pbkdf2 would be better.') - agent.SimpleDH()._run_ssl('foo') - - @mock.patch('oslo_concurrency.processutils.execute', - side_effect=processutils.ProcessExecutionError( - exit_code=1, stderr=('ERROR: Something bad happened'))) - def test_run_ssl_failure(self, mock_execute): - self.assertRaises(RuntimeError, agent.SimpleDH()._run_ssl, 'foo') - - -class UpgradeRequiredTestCase(test.NoDBTestCase): - def test_less_than(self): - self.assertTrue(agent.is_upgrade_required('1.2.3.4', '1.2.3.5')) - - def test_greater_than(self): - self.assertFalse(agent.is_upgrade_required('1.2.3.5', '1.2.3.4')) - - def test_equal(self): - self.assertFalse(agent.is_upgrade_required('1.2.3.4', '1.2.3.4')) - - def test_non_lexical(self): - self.assertFalse(agent.is_upgrade_required('1.2.3.10', '1.2.3.4')) - - def test_length(self): - self.assertTrue(agent.is_upgrade_required('1.2.3', '1.2.3.4')) - - -@mock.patch.object(uuidutils, 'generate_uuid') -class CallAgentTestCase(AgentTestCaseBase): - def test_call_agent_success(self, mock_uuid): - session = mock.Mock() - instance = {"uuid": "fake"} - addl_args = {"foo": "bar"} - - session.VM.get_domid.return_value = '42' - mock_uuid.return_value = 1 - mock_method = mock.Mock().method() - mock_method.return_value = {'returncode': '4', 'message': "asdf\\r\\n"} - mock_method.__name__ = "mock_method" - - self.assertEqual("asdf", - agent._call_agent(session, instance, "vm_ref", - mock_method, addl_args, timeout=300, - success_codes=['0', '4'])) - - expected_args = {} - expected_args.update(addl_args) - mock_method.assert_called_once_with(session, 1, '42', 300, - **expected_args) - session.VM.get_domid.assert_called_once_with("vm_ref") - - def _call_agent_setup(self, session, mock_uuid, mock_method, - returncode='0', success_codes=None, - exception=None): - session.XenAPI.Failure = XenAPI.Failure - instance = {"uuid": "fake"} - addl_args = {"foo": "bar"} - - session.VM.get_domid.return_value = "42" - mock_uuid.return_value = 1 - if exception: - mock_method.side_effect = exception - else: - mock_method.return_value = {'returncode': returncode, - 'message': "asdf\\r\\n"} - - return agent._call_agent(session, instance, "vm_ref", mock_method, - addl_args, success_codes=success_codes) - - def _assert_agent_called(self, session, mock_uuid, mock_method): - expected_args = {"foo": "bar"} - mock_uuid.assert_called_once_with() - mock_method.assert_called_once_with(session, 1, '42', 30, - **expected_args) - session.VM.get_domid.assert_called_once_with("vm_ref") - - def test_call_agent_works_with_defaults(self, mock_uuid): - session = mock.Mock() - mock_method = mock.Mock().method() - mock_method.__name__ = "mock_method" - self._call_agent_setup(session, mock_uuid, mock_method) - self._assert_agent_called(session, mock_uuid, mock_method) - - def test_call_agent_fails_with_timeout(self, mock_uuid): - session = mock.Mock() - mock_method = mock.Mock().method() - mock_method.__name__ = "mock_method" - self.assertRaises(exception.AgentTimeout, self._call_agent_setup, - session, mock_uuid, mock_method, - exception=XenAPI.Failure(["TIMEOUT:fake"])) - self._assert_agent_called(session, mock_uuid, mock_method) - - def test_call_agent_fails_with_not_implemented(self, mock_uuid): - session = mock.Mock() - mock_method = mock.Mock().method() - mock_method.__name__ = "mock_method" - self.assertRaises(exception.AgentNotImplemented, - self._call_agent_setup, - session, mock_uuid, mock_method, - exception=XenAPI.Failure(["NOT IMPLEMENTED:"])) - self._assert_agent_called(session, mock_uuid, mock_method) - - def test_call_agent_fails_with_other_error(self, mock_uuid): - session = mock.Mock() - mock_method = mock.Mock().method() - mock_method.__name__ = "mock_method" - self.assertRaises(exception.AgentError, self._call_agent_setup, - session, mock_uuid, mock_method, - exception=XenAPI.Failure(["asdf"])) - self._assert_agent_called(session, mock_uuid, mock_method) - - def test_call_agent_fails_with_returned_error(self, mock_uuid): - session = mock.Mock() - mock_method = mock.Mock().method() - mock_method.__name__ = "mock_method" - self.assertRaises(exception.AgentError, self._call_agent_setup, - session, mock_uuid, mock_method, returncode='42') - self._assert_agent_called(session, mock_uuid, mock_method) - - -class XenAPIBasedAgent(AgentTestCaseBase): - @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault") - @mock.patch.object(agent, "_call_agent") - def test_call_agent_swallows_error(self, mock_call_agent, - mock_add_instance_fault): - fake_error = exception.AgentError(method="bob") - mock_call_agent.side_effect = fake_error - - instance = _get_fake_instance() - agent = self._create_agent(instance) - - agent._call_agent("bob") - - mock_call_agent.assert_called_once_with(agent.session, agent.instance, - agent.vm_ref, "bob", None, None, None) - mock_add_instance_fault.assert_called_once_with(fake_error, mock.ANY) - - @mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault") - @mock.patch.object(agent, "_call_agent") - def test_call_agent_throws_error(self, mock_call_agent, - mock_add_instance_fault): - fake_error = exception.AgentError(method="bob") - mock_call_agent.side_effect = fake_error - - instance = _get_fake_instance() - agent = self._create_agent(instance) - - self.assertRaises(exception.AgentError, agent._call_agent, - "bob", ignore_errors=False) - - mock_call_agent.assert_called_once_with(agent.session, agent.instance, - agent.vm_ref, "bob", None, None, None) - self.assertFalse(mock_add_instance_fault.called) diff --git a/nova/tests/unit/virt/xenapi/test_driver.py b/nova/tests/unit/virt/xenapi/test_driver.py deleted file mode 100644 index 5d45299e4bb4..000000000000 --- a/nova/tests/unit/virt/xenapi/test_driver.py +++ /dev/null @@ -1,433 +0,0 @@ -# Copyright (c) 2013 Rackspace Hosting -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import os_resource_classes as orc -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import units - -from nova.compute import provider_tree -from nova import conf -from nova import exception -from nova.objects import fields as obj_fields -from nova.tests.unit.virt.xenapi import stubs -from nova.virt import driver -from nova.virt import fake -from nova.virt import xenapi -from nova.virt.xenapi import driver as xenapi_driver -from nova.virt.xenapi import host - -CONF = conf.CONF - - -class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB): - """Unit tests for Driver operations.""" - - def _get_driver(self): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.flags(connection_url='http://localhost', - connection_password='test_pass', group='xenserver') - return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False) - - def host_stats(self, refresh=True): - return {'host_memory_total': 3 * units.Mi, - 'host_memory_free_computed': 2 * units.Mi, - 'disk_total': 5 * units.Gi, - 'disk_used': 2 * units.Gi, - 'disk_allocated': 4 * units.Gi, - 'host_hostname': 'somename', - 'supported_instances': obj_fields.Architecture.X86_64, - 'host_cpu_info': {'cpu_count': 50}, - 'cpu_model': { - 'vendor': 'GenuineIntel', - 'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz', - 'topology': { - 'sockets': 1, - 'cores': 4, - 'threads': 1, - }, - 'features': [ - 'fpu', 'de', 'tsc', 'msr', 'pae', 'mce', - 'cx8', 'apic', 'sep', 'mtrr', 'mca', - 'cmov', 'pat', 'clflush', 'acpi', 'mmx', - 'fxsr', 'sse', 'sse2', 'ss', 'ht', - 'nx', 'constant_tsc', 'nonstop_tsc', - 'aperfmperf', 'pni', 'vmx', 'est', 'ssse3', - 'sse4_1', 'sse4_2', 'popcnt', 'hypervisor', - 'ida', 'tpr_shadow', 'vnmi', 'flexpriority', - 'ept', 'vpid', - ], - }, - 'vcpus_used': 10, - 'pci_passthrough_devices': '', - 'host_other-config': {'iscsi_iqn': 'someiqn'}, - 'vgpu_stats': { - 'c8328467-badf-43d8-8e28-0e096b0f88b1': - {'uuid': '6444c6ee-3a49-42f5-bebb-606b52175e67', - 'type_name': 'Intel GVT-g', - 'max_heads': 1, - 'total': 7, - 'remaining': 7, - }, - }} - - def test_available_resource(self): - driver = self._get_driver() - driver._session.product_version = (6, 8, 2) - - with mock.patch.object(driver.host_state, 'get_host_stats', - side_effect=self.host_stats) as mock_get: - - resources = driver.get_available_resource(None) - self.assertEqual(6008002, resources['hypervisor_version']) - self.assertEqual(50, resources['vcpus']) - self.assertEqual(3, resources['memory_mb']) - self.assertEqual(5, resources['local_gb']) - self.assertEqual(10, resources['vcpus_used']) - self.assertEqual(3 - 2, resources['memory_mb_used']) - self.assertEqual(2, resources['local_gb_used']) - self.assertEqual('XenServer', resources['hypervisor_type']) - self.assertEqual('somename', resources['hypervisor_hostname']) - self.assertEqual(1, resources['disk_available_least']) - mock_get.assert_called_once_with(refresh=True) - - def test_set_bootable(self): - driver = self._get_driver() - - with mock.patch.object(driver._vmops, - 'set_bootable') as mock_set_bootable: - driver.set_bootable('inst', True) - mock_set_bootable.assert_called_once_with('inst', True) - - def test_post_interrupted_snapshot_cleanup(self): - driver = self._get_driver() - fake_vmops_cleanup = mock.Mock() - driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup - - driver.post_interrupted_snapshot_cleanup("context", "instance") - - fake_vmops_cleanup.assert_called_once_with("context", "instance") - - def test_public_api_signatures(self): - inst = self._get_driver() - self.assertPublicAPISignatures(driver.ComputeDriver(None), inst) - - def test_get_volume_connector(self): - ip = '123.123.123.123' - driver = self._get_driver() - self.flags(connection_url='http://%s' % ip, - connection_password='test_pass', group='xenserver') - with mock.patch.object(driver.host_state, 'get_host_stats', - side_effect=self.host_stats) as mock_get: - - connector = driver.get_volume_connector({'uuid': 'fake'}) - self.assertIn('ip', connector) - self.assertEqual(connector['ip'], ip) - self.assertIn('initiator', connector) - self.assertEqual(connector['initiator'], 'someiqn') - mock_get.assert_called_once_with(refresh=True) - - def test_get_block_storage_ip(self): - my_ip = '123.123.123.123' - connection_ip = '124.124.124.124' - driver = self._get_driver() - self.flags(connection_url='http://%s' % connection_ip, - group='xenserver') - self.flags(my_ip=my_ip, my_block_storage_ip=my_ip) - - ip = driver._get_block_storage_ip() - self.assertEqual(connection_ip, ip) - - def test_get_block_storage_ip_conf(self): - driver = self._get_driver() - my_ip = '123.123.123.123' - my_block_storage_ip = '124.124.124.124' - self.flags(my_ip=my_ip, my_block_storage_ip=my_block_storage_ip) - - ip = driver._get_block_storage_ip() - self.assertEqual(my_block_storage_ip, ip) - - @mock.patch.object(xenapi_driver, 'invalid_option') - @mock.patch.object(xenapi_driver.vm_utils, 'ensure_correct_host') - def test_invalid_options(self, mock_ensure, mock_invalid): - driver = self._get_driver() - self.flags(independent_compute=True, group='xenserver') - self.flags(check_host=True, group='xenserver') - self.flags(flat_injected=True) - self.flags(default_ephemeral_format='vfat') - - driver.init_host('host') - - expected_calls = [ - mock.call('CONF.xenserver.check_host', False), - mock.call('CONF.flat_injected', False), - mock.call('CONF.default_ephemeral_format', 'ext3')] - mock_invalid.assert_has_calls(expected_calls) - - @mock.patch.object(xenapi_driver.vm_utils, 'cleanup_attached_vdis') - @mock.patch.object(xenapi_driver.vm_utils, 'ensure_correct_host') - def test_independent_compute_no_vdi_cleanup(self, mock_ensure, - mock_cleanup): - driver = self._get_driver() - self.flags(independent_compute=True, group='xenserver') - self.flags(check_host=False, group='xenserver') - self.flags(flat_injected=False) - - driver.init_host('host') - - self.assertFalse(mock_cleanup.called) - self.assertFalse(mock_ensure.called) - - @mock.patch.object(xenapi_driver.vm_utils, 'cleanup_attached_vdis') - @mock.patch.object(xenapi_driver.vm_utils, 'ensure_correct_host') - def test_dependent_compute_vdi_cleanup(self, mock_ensure, mock_cleanup): - driver = self._get_driver() - self.assertFalse(mock_cleanup.called) - self.flags(independent_compute=False, group='xenserver') - self.flags(check_host=True, group='xenserver') - - driver.init_host('host') - - self.assertTrue(mock_cleanup.called) - self.assertTrue(mock_ensure.called) - - @mock.patch.object(xenapi_driver.vmops.VMOps, 'attach_interface') - def test_attach_interface(self, mock_attach_interface): - driver = self._get_driver() - driver.attach_interface('fake_context', 'fake_instance', - 'fake_image_meta', 'fake_vif') - mock_attach_interface.assert_called_once_with('fake_instance', - 'fake_vif') - - @mock.patch.object(xenapi_driver.vmops.VMOps, 'detach_interface') - def test_detach_interface(self, mock_detach_interface): - driver = self._get_driver() - driver.detach_interface('fake_context', 'fake_instance', 'fake_vif') - mock_detach_interface.assert_called_once_with('fake_instance', - 'fake_vif') - - @mock.patch.object(xenapi_driver.vmops.VMOps, - 'post_live_migration_at_source') - def test_post_live_migration_at_source(self, mock_post_live_migration): - driver = self._get_driver() - driver.post_live_migration_at_source('fake_context', 'fake_instance', - 'fake_network_info') - mock_post_live_migration.assert_called_once_with( - 'fake_context', 'fake_instance', 'fake_network_info') - - @mock.patch.object(xenapi_driver.vmops.VMOps, - 'rollback_live_migration_at_destination') - def test_rollback_live_migration_at_destination(self, mock_rollback): - driver = self._get_driver() - driver.rollback_live_migration_at_destination( - 'fake_context', 'fake_instance', 'fake_network_info', - 'fake_block_device') - mock_rollback.assert_called_once_with('fake_instance', - 'fake_network_info', - 'fake_block_device') - - @mock.patch.object(host.HostState, 'get_host_stats') - def test_update_provider_tree(self, mock_get_stats): - # Add a wrinkle such that cpu_allocation_ratio is configured to a - # non-default value and overrides initial_cpu_allocation_ratio. - self.flags(cpu_allocation_ratio=1.0) - # Add a wrinkle such that reserved_host_memory_mb is set to a - # non-default value. - self.flags(reserved_host_memory_mb=2048) - expected_reserved_disk = ( - xenapi_driver.XenAPIDriver._get_reserved_host_disk_gb_from_config() - ) - expected_inv = { - orc.VCPU: { - 'total': 50, - 'min_unit': 1, - 'max_unit': 50, - 'step_size': 1, - 'allocation_ratio': CONF.cpu_allocation_ratio, - 'reserved': CONF.reserved_host_cpus, - }, - orc.MEMORY_MB: { - 'total': 3, - 'min_unit': 1, - 'max_unit': 3, - 'step_size': 1, - 'allocation_ratio': CONF.initial_ram_allocation_ratio, - 'reserved': CONF.reserved_host_memory_mb, - }, - orc.DISK_GB: { - 'total': 5, - 'min_unit': 1, - 'max_unit': 5, - 'step_size': 1, - 'allocation_ratio': CONF.initial_disk_allocation_ratio, - 'reserved': expected_reserved_disk, - }, - orc.VGPU: { - 'total': 7, - 'min_unit': 1, - 'max_unit': 1, - 'step_size': 1, - }, - } - - mock_get_stats.side_effect = self.host_stats - drv = self._get_driver() - pt = provider_tree.ProviderTree() - nodename = 'fake-node' - pt.new_root(nodename, uuids.rp_uuid) - drv.update_provider_tree(pt, nodename) - inv = pt.data(nodename).inventory - mock_get_stats.assert_called_once_with(refresh=True) - self.assertEqual(expected_inv, inv) - - @mock.patch.object(host.HostState, 'get_host_stats') - def test_update_provider_tree_no_vgpu(self, mock_get_stats): - # Test when there are no vGPU resources in the inventory. - host_stats = self.host_stats() - host_stats.update(vgpu_stats={}) - mock_get_stats.return_value = host_stats - - drv = self._get_driver() - pt = provider_tree.ProviderTree() - nodename = 'fake-node' - pt.new_root(nodename, uuids.rp_uuid) - drv.update_provider_tree(pt, nodename) - inv = pt.data(nodename).inventory - - # check if the inventory data does NOT contain VGPU. - self.assertNotIn(orc.VGPU, inv) - - def test_get_vgpu_total_single_grp(self): - # Test when only one group included in the host_stats. - vgpu_stats = { - 'grp_uuid_1': { - 'total': 7 - } - } - - drv = self._get_driver() - vgpu_total = drv._get_vgpu_total(vgpu_stats) - - self.assertEqual(7, vgpu_total) - - def test_get_vgpu_total_multiple_grps(self): - # Test when multiple groups included in the host_stats. - vgpu_stats = { - 'grp_uuid_1': { - 'total': 7 - }, - 'grp_uuid_2': { - 'total': 4 - } - } - - drv = self._get_driver() - vgpu_total = drv._get_vgpu_total(vgpu_stats) - - self.assertEqual(11, vgpu_total) - - def test_get_vgpu_info_no_vgpu_alloc(self): - # no vgpu in allocation. - alloc = { - 'rp1': { - 'resources': { - 'VCPU': 1, - 'MEMORY_MB': 512, - 'DISK_GB': 1, - } - } - } - - drv = self._get_driver() - vgpu_info = drv._get_vgpu_info(alloc) - - self.assertIsNone(vgpu_info) - - @mock.patch.object(host.HostState, 'get_host_stats') - def test_get_vgpu_info_has_vgpu_alloc(self, mock_get_stats): - # Have vgpu in allocation. - alloc = { - 'rp1': { - 'resources': { - 'VCPU': 1, - 'MEMORY_MB': 512, - 'DISK_GB': 1, - 'VGPU': 1, - } - } - } - # The following fake data assumes there are two GPU - # groups both of which supply the same type of vGPUs. - # If the 1st GPU group has no remaining available vGPUs; - # the 2nd GPU group still has remaining available vGPUs. - # it should return the uuid from the 2nd GPU group. - vgpu_stats = { - uuids.gpu_group_1: { - 'uuid': uuids.vgpu_type, - 'type_name': 'GRID K180Q', - 'max_heads': 4, - 'total': 2, - 'remaining': 0, - }, - uuids.gpu_group_2: { - 'uuid': uuids.vgpu_type, - 'type_name': 'GRID K180Q', - 'max_heads': 4, - 'total': 2, - 'remaining': 2, - }, - } - - host_stats = self.host_stats() - host_stats.update(vgpu_stats=vgpu_stats) - mock_get_stats.return_value = host_stats - - drv = self._get_driver() - vgpu_info = drv._get_vgpu_info(alloc) - - expected_info = {'gpu_grp_uuid': uuids.gpu_group_2, - 'vgpu_type_uuid': uuids.vgpu_type} - self.assertEqual(expected_info, vgpu_info) - - @mock.patch.object(host.HostState, 'get_host_stats') - def test_get_vgpu_info_has_vgpu_alloc_except(self, mock_get_stats): - # Allocated vGPU but got exception due to no remaining vGPU. - alloc = { - 'rp1': { - 'resources': { - 'VCPU': 1, - 'MEMORY_MB': 512, - 'DISK_GB': 1, - 'VGPU': 1, - } - } - } - vgpu_stats = { - uuids.gpu_group: { - 'uuid': uuids.vgpu_type, - 'type_name': 'Intel GVT-g', - 'max_heads': 1, - 'total': 7, - 'remaining': 0, - }, - } - - host_stats = self.host_stats() - host_stats.update(vgpu_stats=vgpu_stats) - mock_get_stats.return_value = host_stats - - drv = self._get_driver() - self.assertRaises(exception.ComputeResourcesUnavailable, - drv._get_vgpu_info, - alloc) diff --git a/nova/tests/unit/virt/xenapi/test_network_utils.py b/nova/tests/unit/virt/xenapi/test_network_utils.py deleted file mode 100644 index 5aa660f2a7fd..000000000000 --- a/nova/tests/unit/virt/xenapi/test_network_utils.py +++ /dev/null @@ -1,76 +0,0 @@ - -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova import exception -from nova.tests.unit.virt.xenapi import stubs -from nova.virt.xenapi import network_utils - - -class NetworkUtilsTestCase(stubs.XenAPITestBaseNoDB): - def test_find_network_with_name_label_works(self): - session = mock.Mock() - session.network.get_by_name_label.return_value = ["net"] - - result = network_utils.find_network_with_name_label(session, "label") - - self.assertEqual("net", result) - session.network.get_by_name_label.assert_called_once_with("label") - - def test_find_network_with_name_returns_none(self): - session = mock.Mock() - session.network.get_by_name_label.return_value = [] - - result = network_utils.find_network_with_name_label(session, "label") - - self.assertIsNone(result) - - def test_find_network_with_name_label_raises(self): - session = mock.Mock() - session.network.get_by_name_label.return_value = ["net", "net2"] - - self.assertRaises(exception.NovaException, - network_utils.find_network_with_name_label, - session, "label") - - def test_find_network_with_bridge_works(self): - session = mock.Mock() - session.network.get_all_records_where.return_value = {"net": "asdf"} - - result = network_utils.find_network_with_bridge(session, "bridge") - - self.assertEqual(result, "net") - expr = 'field "name__label" = "bridge" or field "bridge" = "bridge"' - session.network.get_all_records_where.assert_called_once_with(expr) - - def test_find_network_with_bridge_raises_too_many(self): - session = mock.Mock() - session.network.get_all_records_where.return_value = { - "net": "asdf", - "net2": "asdf2" - } - - self.assertRaises(exception.NovaException, - network_utils.find_network_with_bridge, - session, "bridge") - - def test_find_network_with_bridge_raises_no_networks(self): - session = mock.Mock() - session.network.get_all_records_where.return_value = {} - - self.assertRaises(exception.NovaException, - network_utils.find_network_with_bridge, - session, "bridge") diff --git a/nova/tests/unit/virt/xenapi/test_vgpu.py b/nova/tests/unit/virt/xenapi/test_vgpu.py deleted file mode 100644 index 7ea78356fcb0..000000000000 --- a/nova/tests/unit/virt/xenapi/test_vgpu.py +++ /dev/null @@ -1,206 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova import test -from nova.virt.xenapi import host - - -class VGPUTestCase(test.NoDBTestCase): - """Unit tests for Driver operations.""" - @mock.patch.object(host.HostState, 'update_status', - return_value='fake_stats_1') - @mock.patch.object(host.HostState, '_get_vgpu_stats_in_group') - def test_get_vgpu_stats_empty_cfg(self, mock_get, mock_update): - # no vGPU type configured. - self.flags(enabled_vgpu_types=[], group='devices') - session = mock.Mock() - - host_obj = host.HostState(session) - stats = host_obj._get_vgpu_stats() - - session.call_xenapi.assert_not_called() - self.assertEqual(stats, {}) - - @mock.patch.object(host.HostState, 'update_status', - return_value='fake_stats_1') - @mock.patch.object(host.HostState, '_get_vgpu_stats_in_group') - def test_get_vgpu_stats_single_type(self, mock_get, mock_update): - # configured single vGPU type - self.flags(enabled_vgpu_types=['type_name_1'], group='devices') - session = mock.Mock() - # multiple GPU groups - session.call_xenapi.side_effect = [ - ['grp_ref1', 'grp_ref2'], # GPU_group.get_all - 'uuid_1', # GPU_group.get_uuid - 'uuid_2', # GPU_group.get_uuid - ] - # Let it return None for the 2nd GPU group for the case - # that it doesn't have the specified vGPU type enabled. - mock_get.side_effect = ['fake_stats_1', None] - host_obj = host.HostState(session) - stats = host_obj._get_vgpu_stats() - - self.assertEqual(session.call_xenapi.call_count, 3) - self.assertEqual(mock_update.call_count, 1) - self.assertEqual(mock_get.call_count, 2) - self.assertEqual(stats, {'uuid_1': 'fake_stats_1'}) - - @mock.patch.object(host.HostState, 'update_status', - return_value='fake_stats_1') - @mock.patch.object(host.HostState, '_get_vgpu_stats_in_group') - def test_get_vgpu_stats_multi_types(self, mock_get, mock_update): - # when multiple vGPU types configured, it use the first one. - self.flags(enabled_vgpu_types=['type_name_1', 'type_name_2'], - group='devices') - session = mock.Mock() - session.call_xenapi.side_effect = [ - ['grp_ref1'], # GPU_group.get_all - 'uuid_1', # GPU_group.get_uuid - ] - mock_get.side_effect = ['fake_stats_1'] - host_obj = host.HostState(session) - stats = host_obj._get_vgpu_stats() - - self.assertEqual(session.call_xenapi.call_count, 2) - self.assertEqual(mock_update.call_count, 1) - self.assertEqual(stats, {'uuid_1': 'fake_stats_1'}) - # called with the first vGPU type: 'type_name_1' - mock_get.assert_called_with('grp_ref1', ['type_name_1']) - - @mock.patch.object(host.HostState, 'update_status', - return_value='fake_stats_1') - @mock.patch.object(host.HostState, '_get_total_vgpu_in_grp', - return_value=7) - def test_get_vgpu_stats_in_group(self, mock_get, mock_update): - # Test it will return vGPU stat for the enabled vGPU type. - enabled_vgpu_types = ['type_name_2'] - session = mock.Mock() - session.call_xenapi.side_effect = [ - ['type_ref_1', 'type_ref_2'], # GPU_group.get_enabled_VGPU_types - 'type_name_1', # VGPU_type.get_model_name - 'type_name_2', # VGPU_type.get_model_name - 'type_uuid_2', # VGPU_type.get_uuid - '4', # VGPU_type.get_max_heads - '6', # GPU_group.get_remaining_capacity - ] - host_obj = host.HostState(session) - - stats = host_obj._get_vgpu_stats_in_group('grp_ref', - enabled_vgpu_types) - - expect_stats = {'uuid': 'type_uuid_2', - 'type_name': 'type_name_2', - 'max_heads': 4, - 'total': 7, - 'remaining': 6, - } - self.assertEqual(session.call_xenapi.call_count, 6) - # It should get_uuid for the vGPU type passed via *enabled_vgpu_types* - # (the arg for get_uuid should be 'type_ref_2'). - get_uuid_call = [mock.call('VGPU_type.get_uuid', 'type_ref_2')] - session.call_xenapi.assert_has_calls(get_uuid_call) - mock_get.assert_called_once() - self.assertEqual(expect_stats, stats) - - @mock.patch.object(host.HostState, 'update_status') - @mock.patch.object(host.HostState, '_get_total_vgpu_in_grp', - return_value=7) - def test_get_vgpu_stats_in_group_multiple(self, mock_get, mock_update): - # Test when enabled multiple vGPU types in the same group. - # It should only return the first vGPU type's stats. - enabled_vgpu_types = ['type_name_1', 'type_name_2'] - session = mock.Mock() - session.call_xenapi.side_effect = [ - ['type_ref_1', 'type_ref_2'], # GPU_group.get_enabled_VGPU_types - 'type_name_1', # VGPU_type.get_model_name - 'type_name_2', # VGPU_type.get_model_name - 'type_uuid_1', # VGPU_type.get_uuid - '4', # VGPU_type.get_max_heads - '6', # GPU_group.get_remaining_capacity - ] - host_obj = host.HostState(session) - - stats = host_obj._get_vgpu_stats_in_group('grp_ref', - enabled_vgpu_types) - - expect_stats = { - 'uuid': 'type_uuid_1', - 'type_name': 'type_name_1', - 'max_heads': 4, - 'total': 7, - 'remaining': 6, - } - self.assertEqual(session.call_xenapi.call_count, 6) - # It should call get_uuid for the first vGPU type (the arg for get_uuid - # should be 'type_ref_1'). - get_uuid_call = [mock.call('VGPU_type.get_uuid', 'type_ref_1')] - session.call_xenapi.assert_has_calls(get_uuid_call) - mock_get.assert_called_once() - self.assertEqual(expect_stats, stats) - - @mock.patch.object(host.HostState, 'update_status') - @mock.patch.object(host.HostState, '_get_total_vgpu_in_grp', - return_value=7) - def test_get_vgpu_stats_in_group_cfg_not_in_grp(self, mock_get, - mock_update): - # Test when the enable_vgpu_types is not a valid - # type belong to the GPU group. It will return None. - enabled_vgpu_types = ['bad_type_name'] - session = mock.Mock() - session.call_xenapi.side_effect = [ - ['type_ref_1', 'type_ref_2'], # GPU_group.get_enabled_VGPU_types - 'type_name_1', # VGPU_type.get_model_name - 'type_name_2', # VGPU_type.get_model_name - ] - host_obj = host.HostState(session) - - stats = host_obj._get_vgpu_stats_in_group('grp_ref', - enabled_vgpu_types) - - expect_stats = None - self.assertEqual(session.call_xenapi.call_count, 3) - mock_get.assert_not_called() - self.assertEqual(expect_stats, stats) - - @mock.patch.object(host.HostState, 'update_status') - def test_get_total_vgpu_in_grp(self, mock_update): - session = mock.Mock() - # The fake PGPU records returned from call_xenapi's string function: - # "PGPU.get_all_records_where". - pgpu_records = { - 'pgpu_ref1': { - 'enabled_VGPU_types': ['type_ref1', 'type_ref2'], - 'supported_VGPU_max_capacities': { - 'type_ref1': '1', - 'type_ref2': '3', - } - }, - 'pgpu_ref2': { - 'enabled_VGPU_types': ['type_ref1', 'type_ref2'], - 'supported_VGPU_max_capacities': { - 'type_ref1': '1', - 'type_ref2': '3', - } - } - } - session.call_xenapi.return_value = pgpu_records - host_obj = host.HostState(session) - - total = host_obj._get_total_vgpu_in_grp('grp_ref', 'type_ref1') - - session.call_xenapi.assert_called_with( - 'PGPU.get_all_records_where', 'field "GPU_group" = "grp_ref"') - # The total amount of VGPUs is equal to sum of vaiable VGPU of - # 'type_ref1' in all PGPUs. - self.assertEqual(total, 2) diff --git a/nova/tests/unit/virt/xenapi/test_vif.py b/nova/tests/unit/virt/xenapi/test_vif.py deleted file mode 100644 index 66eddeb6c638..000000000000 --- a/nova/tests/unit/virt/xenapi/test_vif.py +++ /dev/null @@ -1,554 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova.compute import power_state -from nova import exception -from nova import test -from nova.tests.unit.virt.xenapi import stubs -from nova.virt.xenapi import network_utils -from nova.virt.xenapi import vif -from nova.virt.xenapi import vm_utils -import os_xenapi - - -fake_vif = { - 'created_at': None, - 'updated_at': None, - 'deleted_at': None, - 'deleted': 0, - 'id': '123456789123', - 'address': '00:00:00:00:00:00', - 'network_id': 123, - 'instance_uuid': 'fake-uuid', - 'uuid': 'fake-uuid-2', -} - - -def fake_call_xenapi(method, *args): - if method == "VM.get_VIFs": - return ["fake_vif_ref", "fake_vif_ref_A2"] - if method == "VIF.get_record": - if args[0] == "fake_vif_ref": - return {'uuid': fake_vif['uuid'], - 'MAC': fake_vif['address'], - 'network': 'fake_network', - 'other_config': {'neutron-port-id': fake_vif['id']} - } - else: - raise exception.Exception("Failed get vif record") - if method == "VIF.unplug": - return - if method == "VIF.destroy": - if args[0] == "fake_vif_ref": - return - else: - raise exception.Exception("unplug vif failed") - if method == "VIF.create": - if args[0] == "fake_vif_rec": - return "fake_vif_ref" - else: - raise exception.Exception("VIF existed") - return "Unexpected call_xenapi: %s.%s" % (method, args) - - -class XenVIFDriverTestBase(stubs.XenAPITestBaseNoDB): - def setUp(self): - super(XenVIFDriverTestBase, self).setUp() - self._session = mock.Mock() - self._session.call_xenapi.side_effect = fake_call_xenapi - - def mock_patch_object(self, target, attribute, return_val=None, - side_effect=None): - """Utilility function to mock object's attribute at runtime: - Some methods are dynamic, so standard mocking does not work - and we need to mock them at runtime. - e.g. self._session.VIF.get_record which is dynamically - created via the override function of __getattr__. - """ - - patcher = mock.patch.object(target, attribute, - return_value=return_val, - side_effect=side_effect) - mock_one = patcher.start() - self.addCleanup(patcher.stop) - return mock_one - - -class XenVIFDriverTestCase(XenVIFDriverTestBase): - def setUp(self): - super(XenVIFDriverTestCase, self).setUp() - self.base_driver = vif.XenVIFDriver(self._session) - - def test_get_vif_ref(self): - vm_ref = "fake_vm_ref" - vif_ref = 'fake_vif_ref' - ret_vif_ref = self.base_driver._get_vif_ref(fake_vif, vm_ref) - self.assertEqual(vif_ref, ret_vif_ref) - - expected = [mock.call('VM.get_VIFs', vm_ref), - mock.call('VIF.get_record', vif_ref)] - self.assertEqual(expected, self._session.call_xenapi.call_args_list) - - def test_get_vif_ref_none_and_exception(self): - vm_ref = "fake_vm_ref" - vif = {'address': "no_match_vif_address"} - ret_vif_ref = self.base_driver._get_vif_ref(vif, vm_ref) - self.assertIsNone(ret_vif_ref) - - expected = [mock.call('VM.get_VIFs', vm_ref), - mock.call('VIF.get_record', 'fake_vif_ref'), - mock.call('VIF.get_record', 'fake_vif_ref_A2')] - self.assertEqual(expected, self._session.call_xenapi.call_args_list) - - def test_create_vif(self): - vif_rec = "fake_vif_rec" - vm_ref = "fake_vm_ref" - ret_vif_ref = self.base_driver._create_vif(fake_vif, vif_rec, vm_ref) - self.assertEqual("fake_vif_ref", ret_vif_ref) - - expected = [mock.call('VIF.create', vif_rec)] - self.assertEqual(expected, self._session.call_xenapi.call_args_list) - - def test_create_vif_exception(self): - self.assertRaises(exception.NovaException, - self.base_driver._create_vif, - "fake_vif", "missing_vif_rec", "fake_vm_ref") - - @mock.patch.object(vif.XenVIFDriver, 'hot_unplug') - @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', - return_value='fake_vif_ref') - def test_unplug(self, mock_get_vif_ref, mock_hot_unplug): - instance = {'name': "fake_instance"} - vm_ref = "fake_vm_ref" - self.base_driver.unplug(instance, fake_vif, vm_ref) - expected = [mock.call('VIF.destroy', 'fake_vif_ref')] - self.assertEqual(expected, self._session.call_xenapi.call_args_list) - mock_hot_unplug.assert_called_once_with( - fake_vif, instance, 'fake_vm_ref', 'fake_vif_ref') - - @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', - return_value='missing_vif_ref') - def test_unplug_exception(self, mock_get_vif_ref): - instance = "fake_instance" - vm_ref = "fake_vm_ref" - self.assertRaises(exception.NovaException, - self.base_driver.unplug, - instance, fake_vif, vm_ref) - - -class XenAPIOpenVswitchDriverTestCase(XenVIFDriverTestBase): - def setUp(self): - super(XenAPIOpenVswitchDriverTestCase, self).setUp() - self.ovs_driver = vif.XenAPIOpenVswitchDriver(self._session) - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, 'hot_plug') - @mock.patch.object(vif.XenVIFDriver, '_create_vif', - return_value='fake_vif_ref') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, - 'create_vif_interim_network') - @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', return_value=None) - @mock.patch.object(vif.vm_utils, 'lookup', return_value='fake_vm_ref') - def test_plug(self, mock_lookup, mock_get_vif_ref, - mock_create_vif_interim_network, - mock_create_vif, mock_hot_plug): - instance = {'name': "fake_instance_name"} - ret_vif_ref = self.ovs_driver.plug( - instance, fake_vif, vm_ref=None, device=1) - self.assertTrue(mock_lookup.called) - self.assertTrue(mock_get_vif_ref.called) - self.assertTrue(mock_create_vif_interim_network.called) - self.assertTrue(mock_create_vif.called) - self.assertEqual('fake_vif_ref', ret_vif_ref) - mock_hot_plug.assert_called_once_with(fake_vif, instance, - 'fake_vm_ref', 'fake_vif_ref') - - @mock.patch.object(vif.vm_utils, 'lookup', return_value=None) - def test_plug_exception(self, mock_lookup): - instance = {'name': "fake_instance_name"} - self.assertRaises(exception.VirtualInterfacePlugException, - self.ovs_driver.plug, instance, fake_vif, - vm_ref=None, device=1) - mock_lookup.assert_called_once_with(self._session, instance['name']) - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, - 'delete_network_and_bridge') - @mock.patch.object(network_utils, 'find_network_with_name_label', - return_value='fake_network') - @mock.patch.object(vif.XenVIFDriver, 'unplug') - def test_unplug(self, mock_super_unplug, - mock_find_network_with_name_label, - mock_delete_network_bridge): - instance = {'name': "fake_instance"} - vm_ref = "fake_vm_ref" - - self.ovs_driver.unplug(instance, fake_vif, vm_ref) - - self.assertTrue(mock_super_unplug.called) - self.assertTrue(mock_delete_network_bridge.called) - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port') - @mock.patch.object(network_utils, 'find_network_with_name_label') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif') - def test_delete_network_and_bridge(self, mock_get_network, - mock_find_network, - mock_ovs_del_port, mock_ovs_del_br, - mock_delete_linux_port, - mock_delete_linux_bridge): - # Delete network and bridge - mock_get_network.return_value = 'fake_network' - instance = {'name': 'fake_instance'} - self._session.network = mock.Mock() - self._session.network.get_VIFs.return_value = None - self.ovs_driver.delete_network_and_bridge(instance, 'fake_vif_id') - self._session.network.get_bridge.assert_called_once_with( - 'fake_network') - self._session.network.destroy.assert_called_once_with('fake_network') - self.assertEqual(mock_ovs_del_port.call_count, 2) - self.assertEqual(mock_delete_linux_port.call_count, 2) - self.assertTrue(mock_delete_linux_bridge.called) - self.assertTrue(mock_ovs_del_br.called) - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port') - @mock.patch.object(network_utils, 'find_network_with_name_label') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif') - def test_delete_network_and_bridge_with_remote_vif_on( - self, - mock_get_network, - mock_find_network, - mock_ovs_del_port, - mock_ovs_del_br, - mock_delete_linux_port, - mock_delete_linux_bridge): - # If still has vifs attached to the network on remote hosts, delete - # network function would not be called, while the bridge would - # be deleted - mock_get_network.return_value = 'fake_network' - instance = {'name': 'fake_instance'} - fake_local_host_ref = 'fake_host_ref' - fake_vif_id = 'fake_vif_id' - expected_qbr_name = 'qbr' + fake_vif_id - self._session.host_ref = fake_local_host_ref - self.mock_patch_object( - self._session.network, 'get_VIFs', - return_val=['fake_vif']) - self.mock_patch_object( - self._session.VIF, 'get_all_records_where', - return_val={'rec': 'fake_rec'}) - self.mock_patch_object( - self._session.VIF, 'get_VM', - return_val='fake_vm_ref') - self.mock_patch_object( - self._session.network, 'get_bridge', - return_val='fake_bridge') - # The host ref which the remain vif resident on doesn't match the local - # host - self.mock_patch_object( - self._session.VM, 'get_resident_on', - return_val='fake_host_ref_remote') - - self.ovs_driver.delete_network_and_bridge(instance, fake_vif_id) - self._session.network.get_bridge.assert_called_once_with( - 'fake_network') - self._session.network.destroy.assert_not_called() - self.assertEqual(2, mock_ovs_del_port.call_count) - self.assertEqual(2, mock_delete_linux_port.call_count) - mock_delete_linux_bridge.assert_called_once_with(expected_qbr_name) - mock_ovs_del_br.assert_called_once_with(self._session, 'fake_bridge') - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port') - @mock.patch.object(network_utils, 'find_network_with_name_label') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif') - def test_delete_network_and_bridge_abort( - self, - mock_get_network, - mock_find_network, - mock_ovs_del_port, - mock_ovs_del_br, - mock_delete_linux_port, - mock_delete_linux_bridge): - # If still has vifs attached to the network on local hosts, all the - # operations would be abort - mock_get_network.return_value = 'fake_network' - instance = {'name': 'fake_instance'} - fake_local_host_ref = 'fake_host_ref' - self._session.host_ref = fake_local_host_ref - self.mock_patch_object( - self._session.network, 'get_VIFs', - return_val=['fake_vif']) - self.mock_patch_object( - self._session.VIF, 'get_all_records_where', - return_val={'rec': 'fake_rec'}) - self.mock_patch_object( - self._session.VIF, 'get_VM', - return_val='fake_vm_ref') - # The host ref which the remain vif resident on match the local host - self.mock_patch_object( - self._session.VM, 'get_resident_on', - return_val=fake_local_host_ref) - - self.ovs_driver.delete_network_and_bridge(instance, 'fake_vif_id') - self._session.network.get_bridge.assert_called_once_with( - 'fake_network') - self._session.network.destroy.assert_not_called() - mock_ovs_del_port.assert_not_called() - mock_delete_linux_port.assert_not_called() - mock_delete_linux_bridge.assert_not_called() - mock_ovs_del_br.assert_not_called() - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port') - @mock.patch.object(network_utils, 'find_network_with_name_label') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, - '_get_patch_port_pair_names') - def test_delete_network_and_bridge_del_port_exc(self, mock_get_port_name, - mock_get_network, - mock_find_network, - mock_ovs_del_port, - mock_ovs_del_br, - mock_delete_linux_port, - mock_delete_linux_bridge): - # Get an exception when deleting the patch port pair - mock_get_network.return_value = 'fake_network' - instance = {'name': 'fake_instance'} - self._session.network = mock.Mock() - self._session.network.get_VIFs.return_value = None - self._session.network.get_bridge.return_value = 'fake_bridge' - mock_get_port_name.return_value = ['fake_port', 'fake_tap'] - mock_ovs_del_port.side_effect = test.TestingException - self.assertRaises(exception.VirtualInterfaceUnplugException, - self.ovs_driver.delete_network_and_bridge, instance, - 'fake_vif_id') - self._session.network.get_bridge.assert_called_once_with( - 'fake_network') - self._session.network.destroy.assert_called_once_with('fake_network') - mock_ovs_del_port.assert_called_once_with(self._session, - 'fake_bridge', - 'fake_port') - mock_delete_linux_port.assert_not_called() - mock_delete_linux_bridge.assert_not_called() - mock_ovs_del_br.assert_not_called() - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port') - @mock.patch.object(network_utils, 'find_network_with_name_label') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, - '_get_patch_port_pair_names') - def test_delete_network_and_bridge_del_br_exc(self, mock_get_port_name, - mock_get_network, - mock_find_network, - mock_ovs_del_port, - mock_ovs_del_br, - mock_delete_linux_port, - mock_delete_linux_bridge): - # Get an exception when deleting the bridge and the patch ports - # existing on this bridge - mock_get_network.return_value = 'fake_network' - instance = {'name': 'fake_instance'} - self._session.network = mock.Mock() - self._session.network.get_VIFs.return_value = None - self._session.network.get_bridge.return_value = 'fake_bridge' - mock_get_port_name.return_value = ['fake_port', 'fake_tap'] - mock_ovs_del_br.side_effect = test.TestingException - self.assertRaises(exception.VirtualInterfaceUnplugException, - self.ovs_driver.delete_network_and_bridge, instance, - 'fake_vif_id') - self._session.network.get_bridge.assert_called_once_with( - 'fake_network') - self._session.network.destroy.assert_called_once_with('fake_network') - mock_ovs_del_port.assert_called_once_with(self._session, - 'fake_bridge', - 'fake_port') - mock_delete_linux_port.assert_not_called() - mock_delete_linux_bridge.assert_not_called() - mock_ovs_del_br.assert_called_once_with(self._session, 'fake_bridge') - - @mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port') - @mock.patch.object(network_utils, 'find_network_with_name_label', - return_value='fake_network') - def test_delete_network_and_bridge_destroy_network_exception( - self, - mock_find_network, - mock_ovs_del_port): - # Get an exception when destroying the network - instance = {'name': "fake_instance"} - self.mock_patch_object( - self._session.network, 'get_VIFs', return_val=None) - self.mock_patch_object( - self._session.network, 'get_bridge', return_val='fake_bridge') - self.mock_patch_object( - self._session.network, 'destroy', - side_effect=test.TestingException) - - self.assertRaises(exception.VirtualInterfaceUnplugException, - self.ovs_driver.delete_network_and_bridge, instance, - 'fake_vif_id') - self.assertTrue(mock_find_network.called) - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_device_exists') - @mock.patch.object(os_xenapi.client.host_network, 'brctl_add_if') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_create_linux_bridge') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_add_port') - def test_post_start_actions(self, mock_ovs_add_port, - mock_create_linux_bridge, - mock_brctl_add_if, mock_device_exists): - vif_ref = "fake_vif_ref" - instance = {'name': 'fake_instance_name'} - fake_vif_rec = {'uuid': fake_vif['uuid'], - 'MAC': fake_vif['address'], - 'network': 'fake_network', - 'other_config': { - 'neutron-port-id': 'fake-neutron-port-id'} - } - mock_VIF_get_record = self.mock_patch_object( - self._session.VIF, 'get_record', return_val=fake_vif_rec) - mock_network_get_bridge = self.mock_patch_object( - self._session.network, 'get_bridge', - return_val='fake_bridge_name') - mock_network_get_uuid = self.mock_patch_object( - self._session.network, 'get_uuid', - return_val='fake_network_uuid') - mock_device_exists.return_value = False - - self.ovs_driver.post_start_actions(instance, vif_ref) - - self.assertTrue(mock_VIF_get_record.called) - self.assertTrue(mock_network_get_bridge.called) - self.assertTrue(mock_network_get_uuid.called) - self.assertEqual(mock_ovs_add_port.call_count, 1) - self.assertTrue(mock_brctl_add_if.called) - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_device_exists') - @mock.patch.object(os_xenapi.client.host_network, 'brctl_add_if') - @mock.patch.object(vif.XenAPIOpenVswitchDriver, '_create_linux_bridge') - @mock.patch.object(os_xenapi.client.host_network, 'ovs_add_port') - def test_post_start_actions_tap_exist(self, mock_ovs_add_port, - mock_create_linux_bridge, - mock_brctl_add_if, mock_device_exists): - vif_ref = "fake_vif_ref" - instance = {'name': 'fake_instance_name'} - fake_vif_rec = {'uuid': fake_vif['uuid'], - 'MAC': fake_vif['address'], - 'network': 'fake_network', - 'other_config': { - 'neutron-port-id': 'fake-neutron-port-id'} - } - mock_VIF_get_record = self.mock_patch_object( - self._session.VIF, 'get_record', return_val=fake_vif_rec) - mock_network_get_bridge = self.mock_patch_object( - self._session.network, 'get_bridge', - return_val='fake_bridge_name') - mock_network_get_uuid = self.mock_patch_object( - self._session.network, 'get_uuid', - return_val='fake_network_uuid') - mock_device_exists.return_value = True - - self.ovs_driver.post_start_actions(instance, vif_ref) - - self.assertTrue(mock_VIF_get_record.called) - self.assertTrue(mock_network_get_bridge.called) - self.assertTrue(mock_network_get_uuid.called) - self.assertTrue(mock_create_linux_bridge.called) - self.assertFalse(mock_brctl_add_if.called) - self.assertFalse(mock_ovs_add_port.called) - - @mock.patch.object(network_utils, 'find_network_with_name_label', - return_value="exist_network_ref") - def test_create_vif_interim_network_exist(self, - mock_find_network_with_name_label): - mock_network_create = self.mock_patch_object( - self._session.network, 'create', return_val='new_network_ref') - network_ref = self.ovs_driver.create_vif_interim_network(fake_vif) - self.assertFalse(mock_network_create.called) - self.assertEqual(network_ref, 'exist_network_ref') - - @mock.patch.object(network_utils, 'find_network_with_name_label', - return_value=None) - def test_create_vif_interim_network_new(self, - mock_find_network_with_name_label): - mock_network_create = self.mock_patch_object( - self._session.network, 'create', return_val='new_network_ref') - network_ref = self.ovs_driver.create_vif_interim_network(fake_vif) - self.assertTrue(mock_network_create.called) - self.assertEqual(network_ref, 'new_network_ref') - - @mock.patch.object(vif.XenAPIOpenVswitchDriver, 'post_start_actions') - @mock.patch.object(vm_utils, 'get_power_state') - def test_hot_plug_power_on(self, mock_get_power_state, - mock_post_start_actions): - vif_ref = "fake_vif_ref" - vif = "fake_vif" - instance = "fake_instance" - vm_ref = "fake_vm_ref" - mock_get_power_state.return_value = power_state.RUNNING - mock_VIF_plug = self.mock_patch_object( - self._session.VIF, 'plug', return_val=None) - self.ovs_driver.hot_plug(vif, instance, vm_ref, vif_ref) - mock_VIF_plug.assert_called_once_with(vif_ref) - mock_post_start_actions.assert_called_once_with(instance, vif_ref) - mock_get_power_state.assert_called_once_with(self._session, vm_ref) - - @mock.patch.object(vm_utils, 'get_power_state') - def test_hot_plug_power_off(self, mock_get_power_state): - vif_ref = "fake_vif_ref" - vif = "fake_vif" - instance = "fake_instance" - vm_ref = "fake_vm_ref" - mock_get_power_state.return_value = power_state.SHUTDOWN - mock_VIF_plug = self.mock_patch_object( - self._session.VIF, 'plug', return_val=None) - self.ovs_driver.hot_plug(vif, instance, vm_ref, vif_ref) - mock_VIF_plug.assert_not_called() - mock_get_power_state.assert_called_once_with(self._session, vm_ref) - - @mock.patch.object(vm_utils, 'get_power_state') - def test_hot_unplug_power_on(self, mock_get_power_state): - vm_ref = 'fake_vm_ref' - vif_ref = 'fake_vif_ref' - instance = 'fake_instance' - mock_get_power_state.return_value = power_state.RUNNING - mock_VIF_unplug = self.mock_patch_object( - self._session.VIF, 'unplug', return_val=None) - self.ovs_driver.hot_unplug(fake_vif, instance, vm_ref, vif_ref) - mock_VIF_unplug.assert_called_once_with(vif_ref) - mock_get_power_state.assert_called_once_with(self._session, vm_ref) - - @mock.patch.object(vm_utils, 'get_power_state') - def test_hot_unplug_power_off(self, mock_get_power_state): - vm_ref = 'fake_vm_ref' - vif_ref = 'fake_vif_ref' - instance = 'fake_instance' - mock_get_power_state.return_value = power_state.SHUTDOWN - mock_VIF_unplug = self.mock_patch_object( - self._session.VIF, 'unplug', return_val=None) - self.ovs_driver.hot_unplug(fake_vif, instance, vm_ref, vif_ref) - mock_VIF_unplug.assert_not_called() - mock_get_power_state.assert_called_once_with(self._session, vm_ref) diff --git a/nova/tests/unit/virt/xenapi/test_vm_utils.py b/nova/tests/unit/virt/xenapi/test_vm_utils.py deleted file mode 100644 index e762389ad9b3..000000000000 --- a/nova/tests/unit/virt/xenapi/test_vm_utils.py +++ /dev/null @@ -1,2402 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from eventlet import greenthread -import mock -import os_xenapi -from oslo_concurrency import lockutils -from oslo_concurrency import processutils -from oslo_config import fixture as config_fixture -from oslo_utils import fixture as utils_fixture -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import timeutils -from oslo_utils import uuidutils -import six - -from nova.compute import flavors -from nova.compute import power_state -from nova.compute import utils as compute_utils -import nova.conf -from nova import context -from nova import exception -from nova import objects -from nova.objects import fields as obj_fields -from nova import test -from nova.tests.unit import fake_flavor -from nova.tests.unit import fake_instance -from nova.tests.unit.objects import test_flavor -from nova.tests.unit.virt.xenapi import stubs -from nova.virt import hardware -from nova.virt.xenapi import driver as xenapi_conn -from nova.virt.xenapi import fake -from nova.virt.xenapi.image import utils as image_utils -from nova.virt.xenapi import vm_utils -import time - -CONF = nova.conf.CONF -XENSM_TYPE = 'xensm' -ISCSI_TYPE = 'iscsi' - - -def get_fake_connection_data(sr_type): - fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR', - 'name_label': 'fake_storage', - 'name_description': 'test purposes', - 'server': 'myserver', - 'serverpath': '/local/scratch/myname', - 'sr_type': 'nfs', - 'introduce_sr_keys': ['server', - 'serverpath', - 'sr_type'], - 'vdi_uuid': 'falseVDI'}, - ISCSI_TYPE: {'volume_id': 'fake_volume_id', - 'target_lun': 1, - 'target_iqn': 'fake_iqn:volume-fake_volume_id', - 'target_portal': u'localhost:3260', - 'target_discovered': False}, } - return fakes[sr_type] - - -def _fake_noop(*args, **kwargs): - return - - -class VMUtilsTestBase(stubs.XenAPITestBaseNoDB): - pass - - -class LookupTestCase(VMUtilsTestBase): - def setUp(self): - super(LookupTestCase, self).setUp() - self.session = mock.Mock() - self.name_label = 'my_vm' - - def test_normal(self): - self.session.call_xenapi.return_value = ['x'] - result = vm_utils.lookup(self.session, self.name_label) - self.assertEqual('x', result) - self.session.call_xenapi.assert_called_once_with( - "VM.get_by_name_label", self.name_label) - - def test_no_result(self): - self.session.call_xenapi.return_value = [] - result = vm_utils.lookup(self.session, self.name_label) - self.assertIsNone(result) - self.session.call_xenapi.assert_called_once_with( - "VM.get_by_name_label", self.name_label) - - def test_too_many(self): - self.session.call_xenapi.return_value = ['a', 'b'] - self.assertRaises(exception.InstanceExists, - vm_utils.lookup, - self.session, self.name_label) - self.session.call_xenapi.assert_called_once_with( - "VM.get_by_name_label", self.name_label) - - def test_rescue_none(self): - self.session.call_xenapi.side_effect = [[], ['x']] - result = vm_utils.lookup(self.session, self.name_label, - check_rescue=True) - self.assertEqual('x', result) - self.session.call_xenapi.assert_has_calls([ - mock.call("VM.get_by_name_label", self.name_label + '-rescue'), - mock.call("VM.get_by_name_label", self.name_label)]) - - def test_rescue_found(self): - self.session.call_xenapi.return_value = ['y'] - result = vm_utils.lookup(self.session, self.name_label, - check_rescue=True) - self.assertEqual('y', result) - self.session.call_xenapi.assert_called_once_with( - "VM.get_by_name_label", self.name_label + '-rescue') - - def test_rescue_too_many(self): - self.session.call_xenapi.return_value = ['a', 'b', 'c'] - self.assertRaises(exception.InstanceExists, - vm_utils.lookup, - self.session, self.name_label, - check_rescue=True) - self.session.call_xenapi.assert_called_once_with( - "VM.get_by_name_label", self.name_label + '-rescue') - - -class GenerateConfigDriveTestCase(VMUtilsTestBase): - @mock.patch.object(compute_utils, 'disk_ops_semaphore') - @mock.patch.object(vm_utils, 'safe_find_sr') - @mock.patch.object(vm_utils, "create_vdi", return_value='vdi_ref') - @mock.patch.object(vm_utils.instance_metadata, "InstanceMetadata") - @mock.patch.object(vm_utils.configdrive, 'ConfigDriveBuilder') - @mock.patch('oslo_concurrency.processutils.execute') - @mock.patch.object(vm_utils.volume_utils, 'stream_to_vdi') - @mock.patch.object(vm_utils.os.path, 'getsize', return_value=100) - @mock.patch.object(vm_utils, 'create_vbd', return_value='vbd_ref') - @mock.patch.object(vm_utils.utils, 'tempdir') - def test_no_admin_pass(self, mock_tmpdir, mock_create_vbd, mock_size, - mock_stream, mock_execute, mock_builder, - mock_instance_metadata, mock_create_vdi, - mock_find_sr, mock_disk_op_sema): - - mock_tmpdir.return_value.__enter__.return_value = '/mock' - - with mock.patch.object(six.moves.builtins, 'open') as mock_open: - mock_open.return_value.__enter__.return_value = 'open_fd' - vm_utils.generate_configdrive('session', 'context', 'instance', - 'vm_ref', 'userdevice', - 'network_info') - mock_disk_op_sema.__enter__.assert_called_once() - mock_size.assert_called_with('/mock/configdrive.vhd') - mock_open.assert_called_with('/mock/configdrive.vhd') - mock_execute.assert_called_with('qemu-img', 'convert', '-Ovpc', - '/mock/configdrive', - '/mock/configdrive.vhd') - mock_instance_metadata.assert_called_with( - 'instance', content=None, extra_md={}, - network_info='network_info', request_context='context') - mock_stream.assert_called_with('session', 'instance', 'vhd', - 'open_fd', 100, 'vdi_ref') - - @mock.patch.object(vm_utils, "destroy_vdi") - @mock.patch.object(vm_utils, 'safe_find_sr') - @mock.patch.object(vm_utils, "create_vdi", return_value='vdi_ref') - @mock.patch.object(vm_utils.instance_metadata, "InstanceMetadata", - side_effect=test.TestingException) - def test_vdi_cleaned_up(self, mock_instance_metadata, mock_create, - mock_find_sr, mock_destroy): - self.assertRaises(test.TestingException, vm_utils.generate_configdrive, - 'session', None, None, None, None, None) - mock_destroy.assert_called_once_with('session', 'vdi_ref') - - -class XenAPIGetUUID(VMUtilsTestBase): - @mock.patch.object(vm_utils, '_get_sys_hypervisor_uuid', - return_value='2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f') - def test_get_this_vm_uuid_new_kernel(self, mock_get_sys_hypervisor_uuid): - result = vm_utils.get_this_vm_uuid(None) - - self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', result) - mock_get_sys_hypervisor_uuid.assert_called_once_with() - - @mock.patch('nova.virt.xenapi.vm_utils._get_sys_hypervisor_uuid', - side_effect=IOError(13, 'Permission denied')) - @mock.patch('nova.privsep.xenapi.xenstore_read', - side_effect=[('27', ''), - ('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', '')]) - def test_get_this_vm_uuid_old_kernel_reboot(self, fake_read, fake_uuid): - result = vm_utils.get_this_vm_uuid(None) - - self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', result) - fake_read.assert_has_calls([ - mock.call('domid'), - mock.call('/local/domain/27/vm')]) - fake_uuid.assert_called_once_with() - - -class FakeSession(object): - def call_xenapi(self, *args): - pass - - def call_plugin(self, *args): - pass - - def call_plugin_serialized(self, plugin, fn, *args, **kwargs): - pass - - def call_plugin_serialized_with_retry(self, plugin, fn, num_retries, - callback, *args, **kwargs): - pass - - -class FetchVhdImageTestCase(VMUtilsTestBase): - def setUp(self): - super(FetchVhdImageTestCase, self).setUp() - self.context = context.get_admin_context() - self.context.auth_token = 'auth_token' - self.session = FakeSession() - self.instance = {"uuid": "uuid"} - self.image_handler = image_utils.get_image_handler( - CONF.xenserver.image_handler) - self.flags(group='glance', api_servers=['http://localhost:9292']) - - make_uuid_stack_patcher = mock.patch.object( - vm_utils, '_make_uuid_stack', return_value=["uuid_stack"]) - self.addCleanup(make_uuid_stack_patcher.stop) - self.mock_make_uuid_stack = make_uuid_stack_patcher.start() - - get_sr_path_patcher = mock.patch.object( - vm_utils, 'get_sr_path', return_value='sr_path') - self.addCleanup(get_sr_path_patcher.stop) - self.mock_get_sr_path = get_sr_path_patcher.start() - - def _stub_glance_download_vhd(self, raise_exc=None): - call_plugin_patcher = mock.patch.object( - self.session, 'call_plugin_serialized_with_retry') - self.addCleanup(call_plugin_patcher.stop) - self.mock_call_plugin = call_plugin_patcher.start() - - if raise_exc: - self.mock_call_plugin.side_effect = raise_exc - else: - self.mock_call_plugin.return_value = {'root': {'uuid': 'vdi'}} - - def _assert_make_uuid_stack_and_get_sr_path(self): - self.mock_make_uuid_stack.assert_called_once_with() - self.mock_get_sr_path.assert_called_once_with(self.session) - - def _assert_call_plugin_serialized_with_retry(self): - self.mock_call_plugin.assert_called_once_with( - 'glance.py', - 'download_vhd2', - 3, - mock.ANY, - mock.ANY, - extra_headers={'X-Auth-Token': 'auth_token', - 'X-Roles': '', - 'X-Tenant-Id': None, - 'X-User-Id': None, - 'X-Identity-Status': 'Confirmed'}, - image_id='image_id', - uuid_stack=["uuid_stack"], - sr_path='sr_path') - - @mock.patch.object(vm_utils, '_check_vdi_size') - @mock.patch.object(vm_utils, '_scan_sr') - @mock.patch.object(vm_utils, 'safe_find_sr', return_value="sr") - def test_fetch_vhd_image_works_with_glance(self, mock_safe_find_sr, - mock_scan_sr, - mock_check_vdi_size): - self._stub_glance_download_vhd() - - result = vm_utils._fetch_vhd_image(self.context, self.session, - self.instance, 'image_id', - self.image_handler) - - self.assertEqual("vdi", result['root']['uuid']) - mock_safe_find_sr.assert_called_once_with(self.session) - mock_scan_sr.assert_called_once_with(self.session, "sr") - mock_check_vdi_size.assert_called_once_with(self.context, self.session, - self.instance, "vdi") - self._assert_call_plugin_serialized_with_retry() - self._assert_make_uuid_stack_and_get_sr_path() - - @mock.patch.object(vm_utils, 'destroy_vdi', - side_effect=exception.StorageError(reason="")) - @mock.patch.object(FakeSession, 'call_xenapi', return_value="ref") - @mock.patch.object( - vm_utils, '_check_vdi_size', - side_effect=exception.FlavorDiskSmallerThanImage(flavor_size=0, - image_size=1)) - @mock.patch.object(vm_utils, '_scan_sr') - @mock.patch.object(vm_utils, 'safe_find_sr', return_value="sr") - def test_fetch_vhd_image_cleans_up_vdi_on_fail( - self, mock_safe_find_sr, mock_scan_sr, mock_check_vdi_size, - mock_call_xenapi, mock_destroy_vdi): - self._stub_glance_download_vhd() - - self.assertRaises(exception.FlavorDiskSmallerThanImage, - vm_utils._fetch_vhd_image, self.context, self.session, - self.instance, 'image_id', self.image_handler) - - mock_safe_find_sr.assert_called_once_with(self.session) - mock_scan_sr.assert_called_once_with(self.session, "sr") - mock_check_vdi_size.assert_called_once_with(self.context, self.session, - self.instance, "vdi") - mock_call_xenapi.assert_called_once_with("VDI.get_by_uuid", "vdi") - mock_destroy_vdi.assert_called_once_with(self.session, "ref") - self._assert_call_plugin_serialized_with_retry() - self._assert_make_uuid_stack_and_get_sr_path() - - def test_fetch_vhd_image_download_exception(self): - self._stub_glance_download_vhd(raise_exc=RuntimeError) - - self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image, - self.context, self.session, self.instance, 'image_id', - self.image_handler) - self._assert_call_plugin_serialized_with_retry() - self._assert_make_uuid_stack_and_get_sr_path() - - -class TestImageCompression(VMUtilsTestBase): - def test_image_compression(self): - # Testing for nova.conf, too low, negative, and a correct value. - self.assertIsNone(vm_utils.get_compression_level()) - self.flags(image_compression_level=6, group='xenserver') - self.assertEqual(vm_utils.get_compression_level(), 6) - - -class ResizeHelpersTestCase(VMUtilsTestBase): - def setUp(self): - super(ResizeHelpersTestCase, self).setUp() - self.context = context.RequestContext('user', 'project') - - @mock.patch('nova.privsep.fs.ext_journal_disable') - @mock.patch('nova.privsep.fs.ext_journal_enable') - @mock.patch('nova.privsep.fs.resize_partition') - @mock.patch('nova.privsep.fs.resize2fs') - @mock.patch('nova.privsep.fs.e2fsck') - def test_resize_part_and_fs_down_succeeds( - self, mock_fsck, mock_resize2fs, mock_resize, - mock_disable_journal, mock_enable_journal): - dev_path = '/dev/fake' - partition_path = '%s1' % dev_path - vm_utils._resize_part_and_fs('fake', 0, 20, 10, 'boot') - - mock_fsck.assert_has_calls([ - mock.call(partition_path)]) - mock_resize2fs.assert_has_calls([ - mock.call(partition_path, [0], size='10s')]) - mock_resize.assert_has_calls([ - mock.call(dev_path, 0, 9, True)]) - mock_disable_journal.assert_has_calls([ - mock.call(partition_path)]) - mock_enable_journal.assert_has_calls([ - mock.call(partition_path)]) - - @mock.patch.object(vm_utils.LOG, 'debug') - def test_log_progress_if_required(self, mock_debug): - current = timeutils.utcnow() - time_fixture = self.useFixture(utils_fixture.TimeFixture(current)) - time_fixture.advance_time_seconds( - vm_utils.PROGRESS_INTERVAL_SECONDS + 1) - vm_utils._log_progress_if_required(1, current, 2) - mock_debug.assert_called_once_with( - "Sparse copy in progress, %(complete_pct).2f%% complete. " - "%(left)s bytes left to copy", - {"complete_pct": 50.0, "left": 1}) - - @mock.patch.object(vm_utils.LOG, 'debug') - def test_log_progress_if_not_required(self, mock_debug): - current = timeutils.utcnow() - time_fixture = self.useFixture(utils_fixture.TimeFixture(current)) - time_fixture.advance_time_seconds( - vm_utils.PROGRESS_INTERVAL_SECONDS - 1) - vm_utils._log_progress_if_required(1, current, 2) - mock_debug.assert_not_called() - - @mock.patch('nova.privsep.fs.ext_journal_disable') - @mock.patch('nova.privsep.fs.resize2fs', - side_effect=processutils.ProcessExecutionError) - @mock.patch('nova.privsep.fs.e2fsck') - def test_resize_part_and_fs_down_fails_disk_too_big( - self, mock_fsck, mock_resize2fs, mock_disable_journal): - self.assertRaises(exception.ResizeError, - vm_utils._resize_part_and_fs, - "fake", 0, 20, 10, "boot") - mock_fsck.assert_has_calls([mock.call('/dev/fake1')]) - - @mock.patch('nova.privsep.fs.ext_journal_disable') - @mock.patch('nova.privsep.fs.ext_journal_enable') - @mock.patch('nova.privsep.fs.resize_partition') - @mock.patch('nova.privsep.fs.resize2fs') - @mock.patch('nova.privsep.fs.e2fsck') - def test_resize_part_and_fs_up_succeeds( - self, mock_fsck, mock_resize2fs, mock_resize, - mock_disable_journal, mock_enable_journal): - dev_path = '/dev/fake' - partition_path = '%s1' % dev_path - vm_utils._resize_part_and_fs('fake', 0, 20, 30, '') - - mock_fsck.assert_has_calls([ - mock.call(partition_path)]) - mock_resize2fs.assert_has_calls([ - mock.call(partition_path, [0])]) - mock_resize.assert_has_calls([ - mock.call(dev_path, 0, 29, False)]) - mock_disable_journal.assert_has_calls([ - mock.call(partition_path)]) - mock_enable_journal.assert_has_calls([ - mock.call(partition_path)]) - - def test_resize_disk_throws_on_zero_size(self): - flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0) - self.assertRaises(exception.ResizeError, vm_utils.resize_disk, - "session", "instance", "vdi_ref", flavor) - - def test_auto_config_disk_returns_early_on_zero_size(self): - vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0) - - -class CheckVDISizeTestCase(VMUtilsTestBase): - def setUp(self): - super(CheckVDISizeTestCase, self).setUp() - self.context = 'fakecontext' - self.session = 'fakesession' - self.instance = objects.Instance(uuid=uuids.fake) - self.flavor = objects.Flavor() - self.vdi_uuid = 'fakeuuid' - self.stub_out('nova.objects.Instance.get_flavor', - lambda *a, **kw: self.flavor) - - @mock.patch.object(vm_utils, '_get_vdi_chain_size', - return_value=1073741824) - def test_not_too_large(self, mock_get_vdi_chain_size): - self.flavor.root_gb = 1 - - vm_utils._check_vdi_size(self.context, self.session, self.instance, - self.vdi_uuid) - - mock_get_vdi_chain_size.assert_called_once_with(self.session, - self.vdi_uuid) - - @mock.patch.object(vm_utils, '_get_vdi_chain_size', - return_value=11811160065) # 10GB overhead allowed - def test_too_large(self, mock_get_vdi_chain_size): - self.flavor.root_gb = 1 - self.assertRaises(exception.FlavorDiskSmallerThanImage, - vm_utils._check_vdi_size, self.context, - self.session, self.instance, self.vdi_uuid) - - mock_get_vdi_chain_size.assert_called_once_with(self.session, - self.vdi_uuid) - - def test_zero_root_gb_disables_check(self): - self.flavor.root_gb = 0 - vm_utils._check_vdi_size(self.context, self.session, self.instance, - self.vdi_uuid) - - -class GetInstanceForVdisForSrTestCase(VMUtilsTestBase): - def setUp(self): - super(GetInstanceForVdisForSrTestCase, self).setUp() - self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) - self.fixture.config(disable_process_locking=True, - group='oslo_concurrency') - self.flags(instance_name_template='%d') - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - - def test_get_instance_vdis_for_sr(self): - vm_ref = fake.create_vm("foo", "Running") - sr_ref = fake.create_sr() - - vdi_1 = fake.create_vdi('vdiname1', sr_ref) - vdi_2 = fake.create_vdi('vdiname2', sr_ref) - - for vdi_ref in [vdi_1, vdi_2]: - fake.create_vbd(vm_ref, vdi_ref) - - stubs.stubout_session(self, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - - result = list(vm_utils.get_instance_vdis_for_sr( - driver._session, vm_ref, sr_ref)) - - self.assertEqual([vdi_1, vdi_2], result) - - def test_get_instance_vdis_for_sr_no_vbd(self): - vm_ref = fake.create_vm("foo", "Running") - sr_ref = fake.create_sr() - - stubs.stubout_session(self, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - - result = list(vm_utils.get_instance_vdis_for_sr( - driver._session, vm_ref, sr_ref)) - - self.assertEqual([], result) - - -class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase): - - @mock.patch.object(vm_utils, 'lookup', return_value='ignored') - def test_lookup_call(self, mock_lookup): - vm_utils.vm_ref_or_raise('session', 'somename') - mock_lookup.assert_called_once_with('session', 'somename') - - @mock.patch.object(vm_utils, 'lookup', return_value='vmref') - def test_return_value(self, mock_lookup): - self.assertEqual( - 'vmref', vm_utils.vm_ref_or_raise('session', 'somename')) - mock_lookup.assert_called_once_with('session', 'somename') - - -class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase): - - @mock.patch.object(vm_utils, 'lookup', return_value=None) - def test_exception_raised(self, mock_lookup): - self.assertRaises( - exception.InstanceNotFound, - lambda: vm_utils.vm_ref_or_raise('session', 'somename') - ) - mock_lookup.assert_called_once_with('session', 'somename') - - @mock.patch.object(vm_utils, 'lookup', return_value=None) - def test_exception_msg_contains_vm_name(self, mock_lookup): - try: - vm_utils.vm_ref_or_raise('session', 'somename') - except exception.InstanceNotFound as e: - self.assertIn('somename', six.text_type(e)) - mock_lookup.assert_called_once_with('session', 'somename') - - -@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr') -class CreateCachedImageTestCase(VMUtilsTestBase): - def setUp(self): - super(CreateCachedImageTestCase, self).setUp() - self.session = self.get_fake_session() - - @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref') - def test_cached(self, mock_clone_vdi, mock_safe_find_sr): - self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2}, - None, None, None, 'vdi_uuid'] - self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), - vm_utils._create_cached_image('context', self.session, - 'instance', 'name', 'uuid', - vm_utils.ImageType.DISK_VHD, - 'image_handler')) - - @mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref') - def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr): - self.flags(use_cow_images=False) - self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2}, - None, None, None, 'vdi_uuid'] - self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), - vm_utils._create_cached_image('context', self.session, - 'instance', 'name', 'uuid', - vm_utils.ImageType.DISK_VHD, - 'image_handler')) - - def test_no_cow_no_ext(self, mock_safe_find_sr): - self.flags(use_cow_images=False) - self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2}, - 'vdi_ref', None, None, None, - 'vdi_uuid'] - self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}), - vm_utils._create_cached_image('context', self.session, - 'instance', 'name', 'uuid', - vm_utils.ImageType.DISK_VHD, - 'image_handler')) - - @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref') - @mock.patch.object(vm_utils, '_fetch_image', - return_value={'root': {'uuid': 'vdi_uuid', - 'file': None}}) - def test_noncached(self, mock_fetch_image, mock_clone_vdi, - mock_safe_find_sr): - self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref', - None, None, None, None, None, - None, None, 'vdi_uuid'] - self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}), - vm_utils._create_cached_image('context', self.session, - 'instance', 'name', 'uuid', - vm_utils.ImageType.DISK_VHD, - 'image_handler')) - - -class DestroyCachedImageTestCase(VMUtilsTestBase): - def setUp(self): - super(DestroyCachedImageTestCase, self).setUp() - self.session = self.get_fake_session() - - @mock.patch.object(vm_utils, '_find_cached_images') - @mock.patch.object(vm_utils, 'destroy_vdi') - @mock.patch.object(vm_utils, '_walk_vdi_chain') - @mock.patch.object(time, 'time') - def test_destroy_cached_image_out_of_keep_days(self, - mock_time, - mock_walk_vdi_chain, - mock_destroy_vdi, - mock_find_cached_images): - fake_cached_time = '0' - mock_find_cached_images.return_value = {'fake_image_id': { - 'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}} - self.session.call_xenapi.return_value = 'fake_uuid' - mock_walk_vdi_chain.return_value = ('just_one',) - - mock_time.return_value = 2 * 3600 * 24 - - fake_keep_days = 1 - expected_return = set() - expected_return.add('fake_uuid') - - uuid_return = vm_utils.destroy_cached_images(self.session, - 'fake_sr_ref', False, False, fake_keep_days) - mock_find_cached_images.assert_called_once() - mock_walk_vdi_chain.assert_called_once() - mock_time.assert_called() - mock_destroy_vdi.assert_called_once() - self.assertEqual(expected_return, uuid_return) - - @mock.patch.object(vm_utils, '_find_cached_images') - @mock.patch.object(vm_utils, 'destroy_vdi') - @mock.patch.object(vm_utils, '_walk_vdi_chain') - @mock.patch.object(time, 'time') - def test_destroy_cached_image(self, mock_time, mock_walk_vdi_chain, - mock_destroy_vdi, mock_find_cached_images): - fake_cached_time = '0' - mock_find_cached_images.return_value = {'fake_image_id': { - 'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}} - self.session.call_xenapi.return_value = 'fake_uuid' - mock_walk_vdi_chain.return_value = ('just_one',) - - mock_time.return_value = 2 * 3600 * 24 - - fake_keep_days = 1 - expected_return = set() - expected_return.add('fake_uuid') - - uuid_return = vm_utils.destroy_cached_images(self.session, - 'fake_sr_ref', False, False, fake_keep_days) - mock_find_cached_images.assert_called_once() - mock_walk_vdi_chain.assert_called_once() - mock_destroy_vdi.assert_called_once() - self.assertEqual(expected_return, uuid_return) - - @mock.patch.object(vm_utils, '_find_cached_images') - @mock.patch.object(vm_utils, 'destroy_vdi') - @mock.patch.object(vm_utils, '_walk_vdi_chain') - @mock.patch.object(time, 'time') - def test_destroy_cached_image_cached_time_not_exceed( - self, mock_time, mock_walk_vdi_chain, - mock_destroy_vdi, mock_find_cached_images): - fake_cached_time = '0' - mock_find_cached_images.return_value = {'fake_image_id': { - 'vdi_ref': 'fake_vdi_ref', 'cached_time': fake_cached_time}} - self.session.call_xenapi.return_value = 'fake_uuid' - mock_walk_vdi_chain.return_value = ('just_one',) - - mock_time.return_value = 1 * 3600 * 24 - - fake_keep_days = 2 - expected_return = set() - - uuid_return = vm_utils.destroy_cached_images(self.session, - 'fake_sr_ref', False, False, fake_keep_days) - mock_find_cached_images.assert_called_once() - mock_walk_vdi_chain.assert_called_once() - mock_destroy_vdi.assert_not_called() - self.assertEqual(expected_return, uuid_return) - - @mock.patch.object(vm_utils, '_find_cached_images') - @mock.patch.object(vm_utils, 'destroy_vdi') - @mock.patch.object(vm_utils, '_walk_vdi_chain') - @mock.patch.object(time, 'time') - def test_destroy_cached_image_no_cached_time( - self, mock_time, mock_walk_vdi_chain, - mock_destroy_vdi, mock_find_cached_images): - mock_find_cached_images.return_value = {'fake_image_id': { - 'vdi_ref': 'fake_vdi_ref', 'cached_time': None}} - self.session.call_xenapi.return_value = 'fake_uuid' - mock_walk_vdi_chain.return_value = ('just_one',) - fake_keep_days = 2 - expected_return = set() - - uuid_return = vm_utils.destroy_cached_images(self.session, - 'fake_sr_ref', False, False, fake_keep_days) - mock_find_cached_images.assert_called_once() - mock_walk_vdi_chain.assert_called_once() - mock_destroy_vdi.assert_not_called() - self.assertEqual(expected_return, uuid_return) - - -@mock.patch.object(vm_utils, 'is_vm_shutdown', return_value=True) -class ShutdownTestCase(VMUtilsTestBase): - - def test_hardshutdown_should_return_true_when_vm_is_shutdown( - self, mock_is_vm_shutdown): - session = FakeSession() - instance = "instance" - vm_ref = "vm-ref" - self.assertTrue(vm_utils.hard_shutdown_vm( - session, instance, vm_ref)) - mock_is_vm_shutdown.assert_called_once_with(session, vm_ref) - - def test_cleanshutdown_should_return_true_when_vm_is_shutdown( - self, mock_is_vm_shutdown): - session = FakeSession() - instance = "instance" - vm_ref = "vm-ref" - self.assertTrue(vm_utils.clean_shutdown_vm( - session, instance, vm_ref)) - mock_is_vm_shutdown.assert_called_once_with(session, vm_ref) - - -@mock.patch.object(FakeSession, 'call_xenapi', return_value='vbd_ref') -class CreateVBDTestCase(VMUtilsTestBase): - def setUp(self): - super(CreateVBDTestCase, self).setUp() - self.session = FakeSession() - self.vbd_rec = self._generate_vbd_rec() - - def _generate_vbd_rec(self): - vbd_rec = {} - vbd_rec['VM'] = 'vm_ref' - vbd_rec['VDI'] = 'vdi_ref' - vbd_rec['userdevice'] = '0' - vbd_rec['bootable'] = False - vbd_rec['mode'] = 'RW' - vbd_rec['type'] = 'disk' - vbd_rec['unpluggable'] = True - vbd_rec['empty'] = False - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - return vbd_rec - - def test_create_vbd_default_args(self, mock_call_xenapi): - result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0) - self.assertEqual(result, "vbd_ref") - mock_call_xenapi.assert_called_once_with('VBD.create', self.vbd_rec) - - def test_create_vbd_osvol(self, mock_call_xenapi): - result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0, - osvol=True) - - self.assertEqual(result, "vbd_ref") - mock_call_xenapi.assert_has_calls([ - mock.call('VBD.create', self.vbd_rec), - mock.call('VBD.add_to_other_config', "vbd_ref", "osvol", "True")]) - - def test_create_vbd_extra_args(self, mock_call_xenapi): - self.vbd_rec['VDI'] = 'OpaqueRef:NULL' - self.vbd_rec['type'] = 'a' - self.vbd_rec['mode'] = 'RO' - self.vbd_rec['bootable'] = True - self.vbd_rec['empty'] = True - self.vbd_rec['unpluggable'] = False - - result = vm_utils.create_vbd(self.session, "vm_ref", None, 0, - vbd_type="a", read_only=True, bootable=True, - empty=True, unpluggable=False) - self.assertEqual(result, "vbd_ref") - mock_call_xenapi.assert_called_once_with('VBD.create', self.vbd_rec) - - @mock.patch.object(vm_utils, 'create_vbd', return_value='vbd_ref') - def test_attach_cd(self, mock_create_vbd, mock_call_xenapi): - mock_call_xenapi.return_value = None - - result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1) - - self.assertEqual(result, "vbd_ref") - mock_create_vbd.assert_called_once_with( - self.session, "vm_ref", None, 1, vbd_type='cd', read_only=True, - bootable=True, empty=True, unpluggable=False) - mock_call_xenapi.assert_called_once_with('VBD.insert', 'vbd_ref', - 'vdi_ref') - - -class UnplugVbdTestCase(VMUtilsTestBase): - @mock.patch.object(greenthread, 'sleep') - def test_unplug_vbd_works(self, mock_sleep): - session = self.get_fake_session() - vbd_ref = "vbd_ref" - vm_ref = 'vm_ref' - - vm_utils.unplug_vbd(session, vbd_ref, vm_ref) - - session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref) - self.assertEqual(0, mock_sleep.call_count) - - def test_unplug_vbd_raises_unexpected_error(self): - session = self.get_fake_session() - session.XenAPI.Failure = fake.Failure - vbd_ref = "vbd_ref" - vm_ref = 'vm_ref' - session.call_xenapi.side_effect = test.TestingException() - - self.assertRaises(test.TestingException, vm_utils.unplug_vbd, - session, vm_ref, vbd_ref) - self.assertEqual(1, session.call_xenapi.call_count) - - def test_unplug_vbd_already_detached_works(self): - error = "DEVICE_ALREADY_DETACHED" - session = self.get_fake_session(error) - vbd_ref = "vbd_ref" - vm_ref = 'vm_ref' - - vm_utils.unplug_vbd(session, vbd_ref, vm_ref) - self.assertEqual(1, session.call_xenapi.call_count) - - def test_unplug_vbd_already_raises_unexpected_xenapi_error(self): - session = self.get_fake_session("") - vbd_ref = "vbd_ref" - vm_ref = 'vm_ref' - - self.assertRaises(exception.StorageError, vm_utils.unplug_vbd, - session, vbd_ref, vm_ref) - self.assertEqual(1, session.call_xenapi.call_count) - - def _test_uplug_vbd_retries(self, mock_sleep, error): - session = self.get_fake_session(error) - vbd_ref = "vbd_ref" - vm_ref = 'vm_ref' - - self.assertRaises(exception.StorageError, vm_utils.unplug_vbd, - session, vm_ref, vbd_ref) - - self.assertEqual(11, session.call_xenapi.call_count) - self.assertEqual(10, mock_sleep.call_count) - - def _test_uplug_vbd_retries_with_neg_val(self): - session = self.get_fake_session() - self.flags(num_vbd_unplug_retries=-1, group='xenserver') - vbd_ref = "vbd_ref" - vm_ref = 'vm_ref' - - vm_utils.unplug_vbd(session, vbd_ref, vm_ref) - self.assertEqual(1, session.call_xenapi.call_count) - - @mock.patch.object(greenthread, 'sleep') - def test_uplug_vbd_retries_on_rejected(self, mock_sleep): - self._test_uplug_vbd_retries(mock_sleep, - "DEVICE_DETACH_REJECTED") - - @mock.patch.object(greenthread, 'sleep') - def test_uplug_vbd_retries_on_internal_error(self, mock_sleep): - self._test_uplug_vbd_retries(mock_sleep, - "INTERNAL_ERROR") - - @mock.patch.object(greenthread, 'sleep') - def test_uplug_vbd_retries_on_missing_pv_drivers_error(self, mock_sleep): - self._test_uplug_vbd_retries(mock_sleep, - "VM_MISSING_PV_DRIVERS") - - -class VDIOtherConfigTestCase(VMUtilsTestBase): - """Tests to ensure that the code is populating VDI's `other_config` - attribute with the correct metadta. - """ - - def setUp(self): - super(VDIOtherConfigTestCase, self).setUp() - - class _FakeSession(object): - def call_xenapi(self, operation, *args, **kwargs): - # VDI.add_to_other_config -> VDI_add_to_other_config - method = getattr(self, operation.replace('.', '_'), None) - if method: - return method(*args, **kwargs) - - self.operation = operation - self.args = args - self.kwargs = kwargs - - self.session = _FakeSession() - self.context = context.get_admin_context() - self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd', - 'name': 'myinstance'} - - def test_create_vdi(self): - # Some images are registered with XenServer explicitly by calling - # `create_vdi` - vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance, - 'myvdi', 'root', 1024, read_only=True) - - expected = {'nova_disk_type': 'root', - 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} - - self.assertEqual(expected, self.session.args[0]['other_config']) - - @mock.patch.object(vm_utils, '_fetch_image', - return_value={'root': {'uuid': 'fake-uuid'}}) - def test_create_image(self, mock_vm_utils): - # Other images are registered implicitly when they are dropped into - # the SR by a dom0 plugin or some other process - self.flags(cache_images='none', group='xenserver') - - other_config = {} - - def VDI_add_to_other_config(ref, key, value): - other_config[key] = value - - # Stubbing on the session object and not class so we don't pollute - # other tests - self.session.VDI_add_to_other_config = VDI_add_to_other_config - self.session.VDI_get_other_config = lambda vdi: {} - - vm_utils.create_image(self.context, self.session, self.fake_instance, - 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD, - 'image_handler') - - expected = {'nova_disk_type': 'root', - 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} - - self.assertEqual(expected, other_config) - - @mock.patch.object(os_xenapi.client.vm_management, 'receive_vhd') - @mock.patch.object(vm_utils, 'scan_default_sr') - @mock.patch.object(vm_utils, 'get_sr_path') - def test_import_migrated_vhds(self, mock_sr_path, mock_scan_sr, - mock_recv_vhd): - # Migrated images should preserve the `other_config` - other_config = {} - - def VDI_add_to_other_config(ref, key, value): - other_config[key] = value - - # Stubbing on the session object and not class so we don't pollute - # other tests - self.session.VDI_add_to_other_config = VDI_add_to_other_config - self.session.VDI_get_other_config = lambda vdi: {} - - mock_sr_path.return_value = {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}} - - vm_utils._import_migrated_vhds(self.session, self.fake_instance, - "disk_label", "root", "vdi_label") - - expected = {'nova_disk_type': 'root', - 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'} - - self.assertEqual(expected, other_config) - mock_scan_sr.assert_called_once_with(self.session) - mock_recv_vhd.assert_called_with( - self.session, "disk_label", - {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}, mock.ANY) - mock_sr_path.assert_called_once_with(self.session) - - -class GenerateDiskTestCase(VMUtilsTestBase): - - @mock.patch.object(vm_utils, 'vdi_attached') - @mock.patch('nova.privsep.fs.mkfs', - side_effect = test.TestingException()) - @mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref') - @mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref') - @mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref') - @mock.patch.object(vm_utils, 'create_vbd') - def test_generate_disk_with_no_fs_given(self, mock_create_vbd, - mock_create_vdi, mock_findsr, - mock_dom0ref, mock_mkfs, - mock_attached_here): - session = self.get_fake_session() - vdi_ref = mock.MagicMock() - mock_attached_here.return_value = vdi_ref - - instance = {'uuid': 'fake_uuid'} - vm_utils._generate_disk(session, instance, 'vm_ref', '2', - 'name', 'user', 10, None, None) - - mock_attached_here.assert_called_once_with(session, 'vdi_ref', - read_only=False, - dom0=True) - - mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2', - bootable=False) - - @mock.patch.object(vm_utils, 'vdi_attached') - @mock.patch('nova.privsep.fs.mkfs') - @mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref') - @mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref') - @mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref') - @mock.patch.object(vm_utils.utils, 'make_dev_path', - return_value='/dev/fake_devp1') - @mock.patch.object(vm_utils, 'create_vbd') - def test_generate_disk_swap(self, mock_create_vbd, mock_make_path, - mock_create_vdi, - mock_findsr, mock_dom0ref, mock_mkfs, - mock_attached_here): - session = self.get_fake_session() - vdi_dev = mock.MagicMock() - mock_attached_here.return_value = vdi_dev - vdi_dev.__enter__.return_value = 'fakedev' - instance = {'uuid': 'fake_uuid'} - - vm_utils._generate_disk(session, instance, 'vm_ref', '2', - 'name', 'user', 10, 'swap', - 'swap-1') - - mock_attached_here.assert_any_call(session, 'vdi_ref', - read_only=False, - dom0=True) - - # As swap is supported in dom0, mkfs will run there - session.call_plugin_serialized.assert_any_call( - 'partition_utils.py', 'mkfs', 'fakedev', '1', 'swap', 'swap-1') - - mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2', - bootable=False) - - @mock.patch.object(vm_utils, 'vdi_attached') - @mock.patch('nova.privsep.fs.mkfs') - @mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref') - @mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref') - @mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref') - @mock.patch.object(vm_utils.utils, 'make_dev_path', - return_value='/dev/fake_devp1') - @mock.patch.object(vm_utils, 'create_vbd') - def test_generate_disk_ephemeral(self, mock_create_vbd, mock_make_path, - mock_create_vdi, mock_findsr, - mock_dom0ref, mock_mkfs, - mock_attached_here): - session = self.get_fake_session() - vdi_ref = mock.MagicMock() - mock_attached_here.return_value = vdi_ref - instance = {'uuid': 'fake_uuid'} - - vm_utils._generate_disk(session, instance, 'vm_ref', '2', - 'name', 'ephemeral', 10, 'ext4', - 'ephemeral-1') - - mock_attached_here.assert_any_call(session, 'vdi_ref', - read_only=False, - dom0=True) - - # As ext4 is not supported in dom0, mkfs will run in domU - mock_attached_here.assert_any_call(session, 'vdi_ref', - read_only=False) - mock_mkfs.assert_called_with('ext4', '/dev/fake_devp1', - 'ephemeral-1') - - mock_create_vbd.assert_called_with(session, 'vm_ref', 'vdi_ref', '2', - bootable=False) - - @mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref') - @mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref') - @mock.patch.object(vm_utils, '_get_dom0_ref', - side_effect = test.TestingException()) - @mock.patch.object(vm_utils, 'safe_destroy_vdis') - def test_generate_disk_ensure_cleanup_called(self, mock_destroy_vdis, - mock_dom0ref, - mock_create_vdi, - mock_findsr): - session = self.get_fake_session() - instance = {'uuid': 'fake_uuid'} - - self.assertRaises(test.TestingException, vm_utils._generate_disk, - session, instance, None, '2', 'name', 'user', 10, - None, None) - - mock_destroy_vdis.assert_called_once_with(session, ['vdi_ref']) - - @mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref') - @mock.patch.object(vm_utils, 'create_vdi', return_value='vdi_ref') - @mock.patch.object(vm_utils, 'vdi_attached') - @mock.patch.object(vm_utils, '_get_dom0_ref', return_value='dom0_ref') - @mock.patch.object(vm_utils, 'create_vbd') - def test_generate_disk_ephemeral_no_vmref(self, mock_create_vbd, - mock_dom0_ref, - mock_attached_here, - mock_create_vdi, - mock_findsr): - session = self.get_fake_session() - vdi_ref = mock.MagicMock() - mock_attached_here.return_value = vdi_ref - instance = {'uuid': 'fake_uuid'} - - vdi_ref = vm_utils._generate_disk( - session, instance, - None, None, 'name', 'user', 10, None, None) - - mock_attached_here.assert_called_once_with(session, 'vdi_ref', - read_only=False, dom0=True) - self.assertFalse(mock_create_vbd.called) - - -@mock.patch.object(vm_utils, '_generate_disk') -class GenerateEphemeralTestCase(VMUtilsTestBase): - def setUp(self): - super(GenerateEphemeralTestCase, self).setUp() - self.session = "session" - self.instance = "instance" - self.vm_ref = "vm_ref" - self.name_label = "name" - self.ephemeral_name_label = "name ephemeral" - self.userdevice = 4 - self.fs_label = "ephemeral" - - def test_get_ephemeral_disk_sizes_simple(self, mock_generate_disk): - result = vm_utils.get_ephemeral_disk_sizes(20) - expected = [20] - self.assertEqual(expected, list(result)) - - def test_get_ephemeral_disk_sizes_three_disks_2000(self, - mock_generate_disk): - result = vm_utils.get_ephemeral_disk_sizes(4030) - expected = [2000, 2000, 30] - self.assertEqual(expected, list(result)) - - def test_get_ephemeral_disk_sizes_two_disks_1024(self, mock_generate_disk): - result = vm_utils.get_ephemeral_disk_sizes(2048) - expected = [1024, 1024] - self.assertEqual(expected, list(result)) - - def test_generate_ephemeral_adds_one_disk(self, mock_generate_disk): - mock_generate_disk.return_value = self.userdevice - - vm_utils.generate_ephemeral( - self.session, self.instance, self.vm_ref, - str(self.userdevice), self.name_label, 20) - - mock_generate_disk.assert_called_once_with( - self.session, self.instance, self.vm_ref, str(self.userdevice), - self.ephemeral_name_label, 'ephemeral', 20480, None, self.fs_label) - - def test_generate_ephemeral_adds_multiple_disks(self, mock_generate_disk): - mock_generate_disk.side_effect = [self.userdevice, - self.userdevice + 1, - self.userdevice + 2] - - vm_utils.generate_ephemeral( - self.session, self.instance, self.vm_ref, - str(self.userdevice), self.name_label, 4030) - - mock_generate_disk.assert_has_calls([ - mock.call(self.session, self.instance, self.vm_ref, - str(self.userdevice), self.ephemeral_name_label, - 'ephemeral', 2048000, None, self.fs_label), - mock.call(self.session, self.instance, self.vm_ref, - str(self.userdevice + 1), - self.ephemeral_name_label + " (1)", - 'ephemeral', 2048000, None, self.fs_label + "1"), - mock.call(self.session, self.instance, self.vm_ref, - str(self.userdevice + 2), - self.ephemeral_name_label + " (2)", - 'ephemeral', 30720, None, self.fs_label + "2")]) - - @mock.patch.object(vm_utils, 'safe_destroy_vdis') - def test_generate_ephemeral_cleans_up_on_error( - self, mock_safe_destroy_vdis, mock_generate_disk): - mock_generate_disk.side_effect = [self.userdevice, - self.userdevice + 1, - exception.NovaException] - - self.assertRaises( - exception.NovaException, vm_utils.generate_ephemeral, - self.session, self.instance, self.vm_ref, - str(self.userdevice), self.name_label, 4096) - - mock_safe_destroy_vdis.assert_called_once_with(self.session, [4, 5]) - mock_generate_disk.assert_has_calls([ - mock.call(self.session, self.instance, self.vm_ref, - str(self.userdevice), self.ephemeral_name_label, - 'ephemeral', 1048576, None, self.fs_label), - mock.call(self.session, self.instance, self.vm_ref, - str(self.userdevice + 1), - self.ephemeral_name_label + " (1)", - 'ephemeral', 1048576, None, self.fs_label + "1"), - mock.call(self.session, self.instance, self.vm_ref, - str(self.userdevice + 2), - "name ephemeral (2)", - 'ephemeral', 1048576, None, 'ephemeral2')]) - - -@mock.patch.object(vm_utils, '_write_partition') -@mock.patch.object(vm_utils.utils, 'temporary_chown') -@mock.patch.object(vm_utils.utils, 'make_dev_path', return_value='some_path') -class StreamDiskTestCase(VMUtilsTestBase): - - def setUp(self): - super(StreamDiskTestCase, self).setUp() - # NOTE(matelakat): This might hide the fail reason, as test runners - # are unhappy with a mocked out open. - self.image_service_func = mock.Mock() - - def test_non_ami(self, mock_make_dev_path, mock_temporary_chown, - mock_write_partition): - mock_temporary_chown.return_value.__enter__.return_value = None - - mock_open = mock.mock_open() - with mock.patch.object(six.moves.builtins, 'open', mock_open): - vm_utils._stream_disk("session", self.image_service_func, - vm_utils.ImageType.KERNEL, None, 'dev') - - mock_make_dev_path.assert_called_once_with('dev') - mock_temporary_chown.assert_called_once_with('some_path') - mock_write_partition.assert_not_called() - mock_open.assert_called_once_with('some_path', 'wb') - fake_file = mock_open() - fake_file.seek.assert_called_once_with(0) - self.image_service_func.assert_called_once_with(fake_file) - - def test_ami_disk(self, mock_make_dev_path, mock_temporary_chown, - mock_write_partition): - mock_temporary_chown.return_value.__enter__.return_value = None - - mock_open = mock.mock_open() - with mock.patch.object(six.moves.builtins, 'open', mock_open): - vm_utils._stream_disk("session", self.image_service_func, - vm_utils.ImageType.DISK, 100, 'dev') - - mock_write_partition.assert_called_once_with("session", 100, 'dev') - mock_make_dev_path.assert_called_once_with('dev') - mock_temporary_chown.assert_called_once_with('some_path') - mock_open.assert_called_once_with('some_path', 'wb') - fake_file = mock_open() - fake_file.seek.assert_called_once_with(vm_utils.MBR_SIZE_BYTES) - self.image_service_func.assert_called_once_with(fake_file) - - -@mock.patch('os_xenapi.client.session.XenAPISession.call_xenapi') -@mock.patch.object(vm_utils, 'safe_find_sr', return_value='sr_ref') -class VMUtilsSRPath(VMUtilsTestBase): - def setUp(self): - super(VMUtilsSRPath, self).setUp() - self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) - self.fixture.config(disable_process_locking=True, - group='oslo_concurrency') - self.flags(instance_name_template='%d') - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - self.session = driver._session - self.session.is_local_connection = False - - def test_defined(self, mock_safe_find_sr, mock_call_xenapi): - self.session.host_ref = "host_ref" - mock_call_xenapi.return_value = {'pbd_ref': {'device_config': - {'path': 'sr_path'}}} - - self.assertEqual('sr_path', vm_utils.get_sr_path(self.session)) - mock_safe_find_sr.assert_called_once_with(self.session) - mock_call_xenapi.assert_called_once_with( - 'PBD.get_all_records_where', - 'field "host"="host_ref" and field "SR"="sr_ref"') - - def test_default(self, mock_safe_find_sr, mock_call_xenapi): - self.session.host_ref = "host_ref" - mock_call_xenapi.side_effect = [ - {'pbd_ref': {'device_config': {}}}, - {'uuid': 'sr_uuid', 'type': 'ext'}] - - self.assertEqual('/var/run/sr-mount/sr_uuid', - vm_utils.get_sr_path(self.session)) - mock_safe_find_sr.assert_called_once_with(self.session) - mock_call_xenapi.assert_has_calls([ - mock.call('PBD.get_all_records_where', - 'field "host"="host_ref" and field "SR"="sr_ref"'), - mock.call("SR.get_record", "sr_ref")]) - - -class CreateKernelRamdiskTestCase(VMUtilsTestBase): - def setUp(self): - super(CreateKernelRamdiskTestCase, self).setUp() - self.context = "context" - self.session = FakeSession() - self.instance = {"kernel_id": None, "ramdisk_id": None} - self.name_label = "name" - self.stub_out('os_xenapi.client.session.XenAPISession.call_xenapi', - lambda *a, **k: None) - - def test_create_kernel_and_ramdisk_no_create(self): - result = vm_utils.create_kernel_and_ramdisk(self.context, - self.session, self.instance, self.name_label) - self.assertEqual((None, None), result) - - @mock.patch.object(uuidutils, 'generate_uuid', - side_effect=['fake_uuid1', 'fake_uuid2']) - @mock.patch.object(os_xenapi.client.disk_management, - 'create_kernel_ramdisk') - def test_create_kernel_and_ramdisk_create_both_cached( - self, mock_ramdisk, mock_generate_uuid): - kernel_id = "kernel" - ramdisk_id = "ramdisk" - self.instance["kernel_id"] = kernel_id - self.instance["ramdisk_id"] = ramdisk_id - mock_ramdisk.side_effect = ["k", "r"] - - result = vm_utils.create_kernel_and_ramdisk(self.context, - self.session, self.instance, self.name_label) - - self.assertEqual(("k", "r"), result) - mock_generate_uuid.assert_has_calls([mock.call(), mock.call()]) - - @mock.patch.object(uuidutils, 'generate_uuid', return_value='fake_uuid1') - @mock.patch.object(vm_utils, '_fetch_disk_image', - return_value={"kernel": {"file": "k"}}) - @mock.patch.object(os_xenapi.client.disk_management, - 'create_kernel_ramdisk') - def test_create_kernel_and_ramdisk_create_kernel_not_cached( - self, mock_ramdisk, mock_fetch_disk_image, mock_generate_uuid): - kernel_id = "kernel" - self.instance["kernel_id"] = kernel_id - mock_ramdisk.return_value = "" - - result = vm_utils.create_kernel_and_ramdisk(self.context, - self.session, self.instance, self.name_label) - - self.assertEqual(("k", None), result) - mock_generate_uuid.assert_called_once_with() - mock_ramdisk.assert_called_once_with(self.session, kernel_id, - 'fake_uuid1') - mock_fetch_disk_image.assert_called_once_with( - self.context, self.session, self.instance, self.name_label, - kernel_id, 0) - - @mock.patch.object(uuidutils, 'generate_uuid') - @mock.patch.object(vm_utils, '_fetch_disk_image') - def _test_create_kernel_image(self, cache_images, mock_fetch_disk_image, - mock_generate_uuid): - kernel_id = "kernel" - self.instance["kernel_id"] = kernel_id - self.flags(cache_images=cache_images, group='xenserver') - - if cache_images == 'all': - mock_generate_uuid.return_value = 'fake_uuid1' - else: - mock_fetch_disk_image.return_value = { - "kernel": {"file": "new_image", "uuid": None}} - - result = vm_utils._create_kernel_image(self.context, - self.session, - self.instance, - self.name_label, - kernel_id, 0) - - if cache_images == 'all': - self.assertEqual(result, {"kernel": - {"file": "cached_image", "uuid": None}}) - mock_generate_uuid.assert_called_once_with() - mock_fetch_disk_image.assert_not_called() - else: - self.assertEqual(result, {"kernel": - {"file": "new_image", "uuid": None}}) - mock_fetch_disk_image.assert_called_once_with( - self.context, self.session, self.instance, self.name_label, - kernel_id, 0) - mock_generate_uuid.assert_not_called() - - @mock.patch.object(os_xenapi.client.disk_management, - 'create_kernel_ramdisk') - def test_create_kernel_image_cached_config(self, mock_ramdisk): - mock_ramdisk.return_value = "cached_image" - self._test_create_kernel_image('all') - mock_ramdisk.assert_called_once_with(self.session, "kernel", - "fake_uuid1") - - def test_create_kernel_image_uncached_config(self): - self._test_create_kernel_image('none') - - -class ScanSrTestCase(VMUtilsTestBase): - @mock.patch.object(vm_utils, "_scan_sr") - @mock.patch.object(vm_utils, "safe_find_sr") - def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr): - mock_safe_find_sr.return_value = "sr_ref" - - self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session")) - - mock_scan_sr.assert_called_once_with("fake_session", "sr_ref") - - def test_scan_sr_works(self): - session = mock.Mock() - vm_utils._scan_sr(session, "sr_ref") - session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref") - - def test_scan_sr_unknown_error_fails_once(self): - session = mock.Mock() - session.XenAPI.Failure = fake.Failure - session.call_xenapi.side_effect = test.TestingException - self.assertRaises(test.TestingException, - vm_utils._scan_sr, session, "sr_ref") - session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref") - - @mock.patch.object(greenthread, 'sleep') - def test_scan_sr_known_error_retries_then_throws(self, mock_sleep): - session = mock.Mock() - - class FakeException(Exception): - details = ['SR_BACKEND_FAILURE_40', "", "", ""] - - session.XenAPI.Failure = FakeException - session.call_xenapi.side_effect = FakeException - - self.assertRaises(FakeException, - vm_utils._scan_sr, session, "sr_ref") - - session.call_xenapi.assert_called_with('SR.scan', "sr_ref") - self.assertEqual(4, session.call_xenapi.call_count) - mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)]) - - @mock.patch.object(greenthread, 'sleep') - def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep): - session = mock.Mock() - - class FakeException(Exception): - details = ['SR_BACKEND_FAILURE_40', "", "", ""] - - session.XenAPI.Failure = FakeException - - def fake_call_xenapi(*args): - fake_call_xenapi.count += 1 - if fake_call_xenapi.count != 2: - raise FakeException() - - fake_call_xenapi.count = 0 - session.call_xenapi.side_effect = fake_call_xenapi - - vm_utils._scan_sr(session, "sr_ref") - - session.call_xenapi.assert_called_with('SR.scan', "sr_ref") - self.assertEqual(2, session.call_xenapi.call_count) - mock_sleep.assert_called_once_with(2) - - -@mock.patch.object(flavors, 'extract_flavor', - return_value={ - 'memory_mb': 1024, - 'vcpus': 1, - 'vcpu_weight': 1.0, - }) -class CreateVmTestCase(VMUtilsTestBase): - def test_vss_provider(self, mock_extract): - self.flags(vcpu_pin_set="2,3") - session = self.get_fake_session() - instance = objects.Instance(uuid=uuids.nova_uuid, - os_type="windows", - system_metadata={}) - - with mock.patch.object(instance, 'get_flavor') as get: - get.return_value = objects.Flavor._from_db_object( - None, objects.Flavor(), test_flavor.fake_flavor) - vm_utils.create_vm(session, instance, "label", - "kernel", "ramdisk") - - vm_rec = { - 'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1'}, - 'PV_args': '', - 'memory_static_min': '0', - 'ha_restart_priority': '', - 'HVM_boot_policy': 'BIOS order', - 'PV_bootloader': '', 'tags': [], - 'VCPUs_max': '4', - 'memory_static_max': '1073741824', - 'actions_after_shutdown': 'destroy', - 'memory_dynamic_max': '1073741824', - 'user_version': '0', - 'xenstore_data': {'vm-data/allowvssprovider': 'false'}, - 'blocked_operations': {}, - 'is_a_template': False, - 'name_description': '', - 'memory_dynamic_min': '1073741824', - 'actions_after_crash': 'destroy', - 'memory_target': '1073741824', - 'PV_ramdisk': '', - 'PV_bootloader_args': '', - 'PCI_bus': '', - 'other_config': {'nova_uuid': uuids.nova_uuid}, - 'name_label': 'label', - 'actions_after_reboot': 'restart', - 'VCPUs_at_startup': '4', - 'HVM_boot_params': {'order': 'dc'}, - 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true', - 'timeoffset': '0', 'viridian': 'true', - 'acpi': 'true'}, - 'PV_legacy_args': '', - 'PV_kernel': '', - 'affinity': '', - 'recommendations': '', - 'ha_always_run': False - } - session.call_xenapi.assert_called_once_with("VM.create", vm_rec) - - def test_invalid_cpu_mask_raises(self, mock_extract): - self.flags(vcpu_pin_set="asdf") - session = mock.Mock() - instance = objects.Instance(uuid=uuids.fake, system_metadata={}) - with mock.patch.object(instance, 'get_flavor') as get: - get.return_value = objects.Flavor._from_db_object( - None, objects.Flavor(), test_flavor.fake_flavor) - self.assertRaises(exception.Invalid, - vm_utils.create_vm, - session, instance, "label", - "kernel", "ramdisk") - - def test_destroy_vm(self, mock_extract): - session = mock.Mock() - instance = objects.Instance(uuid=uuids.fake) - - vm_utils.destroy_vm(session, instance, "vm_ref") - - session.VM.destroy.assert_called_once_with("vm_ref") - - def test_destroy_vm_silently_fails(self, mock_extract): - session = mock.Mock() - exc = test.TestingException() - session.XenAPI.Failure = test.TestingException - session.VM.destroy.side_effect = exc - instance = objects.Instance(uuid=uuids.fake) - - vm_utils.destroy_vm(session, instance, "vm_ref") - - session.VM.destroy.assert_called_once_with("vm_ref") - - -class DetermineVmModeTestCase(VMUtilsTestBase): - def _fake_object(self, updates): - return fake_instance.fake_instance_obj(None, **updates) - - def test_determine_vm_mode_returns_xen_mode(self): - instance = self._fake_object({"vm_mode": "xen"}) - self.assertEqual(obj_fields.VMMode.XEN, - vm_utils.determine_vm_mode(instance, None)) - - def test_determine_vm_mode_returns_hvm_mode(self): - instance = self._fake_object({"vm_mode": "hvm"}) - self.assertEqual(obj_fields.VMMode.HVM, - vm_utils.determine_vm_mode(instance, None)) - - def test_determine_vm_mode_returns_xen_for_linux(self): - instance = self._fake_object({"vm_mode": None, "os_type": "linux"}) - self.assertEqual(obj_fields.VMMode.XEN, - vm_utils.determine_vm_mode(instance, None)) - - def test_determine_vm_mode_returns_hvm_for_windows(self): - instance = self._fake_object({"vm_mode": None, "os_type": "windows"}) - self.assertEqual(obj_fields.VMMode.HVM, - vm_utils.determine_vm_mode(instance, None)) - - def test_determine_vm_mode_returns_hvm_by_default(self): - instance = self._fake_object({"vm_mode": None, "os_type": None}) - self.assertEqual(obj_fields.VMMode.HVM, - vm_utils.determine_vm_mode(instance, None)) - - def test_determine_vm_mode_returns_xen_for_VHD(self): - instance = self._fake_object({"vm_mode": None, "os_type": None}) - self.assertEqual(obj_fields.VMMode.XEN, - vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD)) - - def test_determine_vm_mode_returns_xen_for_DISK(self): - instance = self._fake_object({"vm_mode": None, "os_type": None}) - self.assertEqual(obj_fields.VMMode.XEN, - vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK)) - - -class CallXenAPIHelpersTestCase(VMUtilsTestBase): - def test_vm_get_vbd_refs(self): - session = mock.Mock() - session.call_xenapi.return_value = "foo" - self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref")) - session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref") - - def test_vbd_get_rec(self): - session = mock.Mock() - session.call_xenapi.return_value = "foo" - self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref")) - session.call_xenapi.assert_called_once_with("VBD.get_record", - "vbd_ref") - - def test_vdi_get_rec(self): - session = mock.Mock() - session.call_xenapi.return_value = "foo" - self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref")) - session.call_xenapi.assert_called_once_with("VDI.get_record", - "vdi_ref") - - def test_vdi_snapshot(self): - session = mock.Mock() - session.call_xenapi.return_value = "foo" - self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref")) - session.call_xenapi.assert_called_once_with("VDI.snapshot", - "vdi_ref", {}) - - def test_vdi_get_virtual_size(self): - session = mock.Mock() - session.call_xenapi.return_value = "123" - self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref")) - session.call_xenapi.assert_called_once_with("VDI.get_virtual_size", - "ref") - - @mock.patch.object(vm_utils, '_get_resize_func_name') - def test_vdi_resize(self, mock_get_resize_func_name): - session = mock.Mock() - mock_get_resize_func_name.return_value = "VDI.fake" - vm_utils._vdi_resize(session, "ref", 123) - session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123") - - @mock.patch.object(vm_utils, '_vdi_resize') - @mock.patch.object(vm_utils, '_vdi_get_virtual_size') - def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize): - mock_get_size.return_value = (1024 ** 3) - 1 - instance = {"uuid": "a"} - - vm_utils.update_vdi_virtual_size("s", instance, "ref", 1) - - mock_get_size.assert_called_once_with("s", "ref") - mock_resize.assert_called_once_with("s", "ref", 1024 ** 3) - - @mock.patch.object(vm_utils, '_vdi_resize') - @mock.patch.object(vm_utils, '_vdi_get_virtual_size') - def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size, - mock_resize): - mock_get_size.return_value = 1024 ** 3 - instance = {"uuid": "a"} - - vm_utils.update_vdi_virtual_size("s", instance, "ref", 1) - - mock_get_size.assert_called_once_with("s", "ref") - self.assertFalse(mock_resize.called) - - @mock.patch.object(vm_utils, '_vdi_resize') - @mock.patch.object(vm_utils, '_vdi_get_virtual_size') - def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size, - mock_resize): - mock_get_size.return_value = 1024 ** 3 + 1 - instance = {"uuid": "a"} - - self.assertRaises(exception.ResizeError, - vm_utils.update_vdi_virtual_size, - "s", instance, "ref", 1) - - mock_get_size.assert_called_once_with("s", "ref") - self.assertFalse(mock_resize.called) - - -@mock.patch.object(vm_utils, '_vdi_get_rec') -@mock.patch.object(vm_utils, '_vbd_get_rec') -@mock.patch.object(vm_utils, '_vm_get_vbd_refs') -class GetVdiForVMTestCase(VMUtilsTestBase): - def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs, - vbd_get_rec, vdi_get_rec): - session = "session" - - vm_get_vbd_refs.return_value = ["a", "b"] - vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'} - vdi_get_rec.return_value = {} - - result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref") - self.assertEqual(('vdi_ref', {}), result) - - vm_get_vbd_refs.assert_called_once_with(session, "vm_ref") - vbd_get_rec.assert_called_once_with(session, "a") - vdi_get_rec.assert_called_once_with(session, "vdi_ref") - - def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs, - vbd_get_rec, vdi_get_rec): - session = "session" - - vm_get_vbd_refs.return_value = ["a", "b"] - vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'} - - self.assertRaises(exception.NovaException, - vm_utils.get_vdi_for_vm_safely, - session, "vm_ref", userdevice='1') - - self.assertEqual([], vdi_get_rec.call_args_list) - self.assertEqual(2, len(vbd_get_rec.call_args_list)) - - -@mock.patch.object(vm_utils, '_vdi_get_uuid') -@mock.patch.object(vm_utils, '_vbd_get_rec') -@mock.patch.object(vm_utils, '_vm_get_vbd_refs') -class GetAllVdiForVMTestCase(VMUtilsTestBase): - def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs, - vbd_get_rec, vdi_get_uuid): - def fake_vbd_get_rec(session, vbd_ref): - return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref} - - def fake_vdi_get_uuid(session, vdi_ref): - return vdi_ref - - vm_get_vbd_refs.return_value = ["0", "2"] - vbd_get_rec.side_effect = fake_vbd_get_rec - vdi_get_uuid.side_effect = fake_vdi_get_uuid - - def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs, - vbd_get_rec, vdi_get_uuid): - self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs, - vbd_get_rec, vdi_get_uuid) - - result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref") - expected = ['vdi_ref_0', 'vdi_ref_2'] - self.assertEqual(expected, list(result)) - - def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs, - vbd_get_rec, vdi_get_uuid): - self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs, - vbd_get_rec, vdi_get_uuid) - - result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref", - min_userdevice=1) - expected = ["vdi_ref_2"] - self.assertEqual(expected, list(result)) - - -class GetAllVdisTestCase(VMUtilsTestBase): - def test_get_all_vdis_in_sr(self): - - def fake_get_rec(record_type, ref): - if ref == "2": - return "vdi_rec_2" - - session = mock.Mock() - session.call_xenapi.return_value = ["1", "2"] - session.get_rec.side_effect = fake_get_rec - - sr_ref = "sr_ref" - actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref)) - self.assertEqual(actual, [('2', 'vdi_rec_2')]) - - session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref) - - -class SnapshotAttachedHereTestCase(VMUtilsTestBase): - @mock.patch.object(vm_utils, '_snapshot_attached_here_impl') - def test_snapshot_attached_here(self, mock_impl): - def fake_impl(session, instance, vm_ref, label, userdevice, - post_snapshot_callback): - self.assertEqual("session", session) - self.assertEqual("instance", instance) - self.assertEqual("vm_ref", vm_ref) - self.assertEqual("label", label) - self.assertEqual('0', userdevice) - self.assertIsNone(post_snapshot_callback) - yield "fake" - - mock_impl.side_effect = fake_impl - - with vm_utils.snapshot_attached_here("session", "instance", "vm_ref", - "label") as result: - self.assertEqual("fake", result) - - mock_impl.assert_called_once_with("session", "instance", "vm_ref", - "label", '0', None) - - @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain') - @mock.patch.object(vm_utils, 'safe_destroy_vdis') - @mock.patch.object(vm_utils, '_walk_vdi_chain') - @mock.patch.object(vm_utils, '_wait_for_vhd_coalesce') - @mock.patch.object(vm_utils, '_vdi_get_uuid') - @mock.patch.object(vm_utils, '_vdi_snapshot') - @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') - def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely, - mock_vdi_snapshot, mock_vdi_get_uuid, - mock_wait_for_vhd_coalesce, mock_walk_vdi_chain, - mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain): - session = "session" - instance = {"uuid": "uuid"} - mock_callback = mock.Mock() - - mock_get_vdi_for_vm_safely.return_value = ("vdi_ref", - {"SR": "sr_ref", - "uuid": "vdi_uuid"}) - mock_vdi_snapshot.return_value = "snap_ref" - mock_vdi_get_uuid.return_value = "snap_uuid" - mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}] - - try: - with vm_utils.snapshot_attached_here(session, instance, "vm_ref", - "label", '2', mock_callback) as result: - self.assertEqual(["a", "b"], result) - raise test.TestingException() - self.assertTrue(False) - except test.TestingException: - pass - - mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref", - '2') - mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref") - mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance, - "sr_ref", "vdi_ref", ['a', 'b']) - mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref") - mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"), - mock.call(session, "snap_uuid")]) - mock_callback.assert_called_once_with( - task_state="image_pending_upload") - mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"]) - mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session, - instance, ['a', 'b'], "sr_ref") - - @mock.patch.object(greenthread, 'sleep') - def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep): - instance = {"uuid": "fake"} - vm_utils._wait_for_vhd_coalesce("session", instance, - "sr_ref", "vdi_ref", ["uuid"]) - self.assertFalse(mock_sleep.called) - - @mock.patch.object(vm_utils, '_count_children') - @mock.patch.object(greenthread, 'sleep') - def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep, - mock_count): - mock_count.return_value = 2 - instance = {"uuid": "fake"} - - vm_utils._wait_for_vhd_coalesce("session", instance, - "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) - - self.assertFalse(mock_sleep.called) - self.assertTrue(mock_count.called) - - @mock.patch.object(greenthread, 'sleep') - @mock.patch.object(vm_utils, '_get_vhd_parent_uuid') - @mock.patch.object(vm_utils, '_count_children') - @mock.patch.object(vm_utils, '_scan_sr') - def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr, - mock_count, mock_get_vhd_parent_uuid, mock_sleep): - mock_count.return_value = 1 - instance = {"uuid": "fake"} - - self.assertRaises(exception.NovaException, - vm_utils._wait_for_vhd_coalesce, "session", instance, - "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) - - self.assertTrue(mock_count.called) - self.assertEqual(20, mock_sleep.call_count) - self.assertEqual(20, mock_scan_sr.call_count) - - @mock.patch.object(greenthread, 'sleep') - @mock.patch.object(vm_utils, '_get_vhd_parent_uuid') - @mock.patch.object(vm_utils, '_count_children') - @mock.patch.object(vm_utils, '_scan_sr') - def test_wait_for_vhd_coalesce_success(self, mock_scan_sr, - mock_count, mock_get_vhd_parent_uuid, mock_sleep): - mock_count.return_value = 1 - instance = {"uuid": "fake"} - mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"] - - vm_utils._wait_for_vhd_coalesce("session", instance, - "sr_ref", "vdi_ref", ["uuid1", "uuid2"]) - - self.assertEqual(1, mock_sleep.call_count) - self.assertEqual(2, mock_scan_sr.call_count) - - @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') - def test_count_children(self, mock_get_all_vdis_in_sr): - vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}), - ('child2', {'sm_config': {'vhd-parent': 'parent2'}}), - ('child3', {'sm_config': {'vhd-parent': 'parent1'}})] - mock_get_all_vdis_in_sr.return_value = vdis - self.assertEqual(2, vm_utils._count_children('session', - 'parent1', 'sr')) - - -class ImportMigratedDisksTestCase(VMUtilsTestBase): - @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks') - @mock.patch.object(vm_utils, '_import_migrated_root_disk') - def test_import_all_migrated_disks(self, mock_root, mock_ephemeral): - session = "session" - instance = "instance" - mock_root.return_value = "root_vdi" - mock_ephemeral.return_value = ["a", "b"] - - result = vm_utils.import_all_migrated_disks(session, instance) - - expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]} - self.assertEqual(expected, result) - mock_root.assert_called_once_with(session, instance) - mock_ephemeral.assert_called_once_with(session, instance) - - @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks') - @mock.patch.object(vm_utils, '_import_migrated_root_disk') - def test_import_all_migrated_disks_import_root_false(self, mock_root, - mock_ephemeral): - session = "session" - instance = "instance" - mock_root.return_value = "root_vdi" - mock_ephemeral.return_value = ["a", "b"] - - result = vm_utils.import_all_migrated_disks(session, instance, - import_root=False) - - expected = {'root': None, 'ephemerals': ["a", "b"]} - self.assertEqual(expected, result) - self.assertEqual(0, mock_root.call_count) - mock_ephemeral.assert_called_once_with(session, instance) - - @mock.patch.object(vm_utils, '_import_migrated_vhds') - def test_import_migrated_root_disk(self, mock_migrate): - mock_migrate.return_value = "foo" - instance = {"uuid": "uuid", "name": "name"} - - result = vm_utils._import_migrated_root_disk("s", instance) - - self.assertEqual("foo", result) - mock_migrate.assert_called_once_with("s", instance, "uuid", "root", - "name") - - @mock.patch.object(vm_utils, '_import_migrated_vhds') - def test_import_migrate_ephemeral_disks(self, mock_migrate): - mock_migrate.return_value = "foo" - instance = objects.Instance(id=1, uuid=uuids.fake) - instance.old_flavor = objects.Flavor(ephemeral_gb=4000) - - result = vm_utils._import_migrate_ephemeral_disks("s", instance) - - self.assertEqual({'4': 'foo', '5': 'foo'}, result) - inst_uuid = instance.uuid - inst_name = instance.name - expected_calls = [mock.call("s", instance, - "%s_ephemeral_1" % inst_uuid, - "ephemeral", - "%s ephemeral (1)" % inst_name), - mock.call("s", instance, - "%s_ephemeral_2" % inst_uuid, - "ephemeral", - "%s ephemeral (2)" % inst_name)] - self.assertEqual(expected_calls, mock_migrate.call_args_list) - - @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes') - def test_import_migrate_ephemeral_disks_use_old_flavor(self, - mock_get_sizes): - mock_get_sizes.return_value = [] - instance = objects.Instance(id=1, uuid=uuids.fake, ephemeral_gb=2000) - instance.old_flavor = objects.Flavor(ephemeral_gb=4000) - - vm_utils._import_migrate_ephemeral_disks("s", instance) - mock_get_sizes.assert_called_once_with(4000) - - @mock.patch.object(os_xenapi.client.vm_management, 'receive_vhd') - @mock.patch.object(vm_utils, '_set_vdi_info') - @mock.patch.object(vm_utils, 'scan_default_sr') - @mock.patch.object(vm_utils, 'get_sr_path') - def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr, - mock_set_info, mock_recv_vhd): - session = mock.Mock() - instance = {"uuid": "uuid"} - mock_recv_vhd.return_value = {"root": {"uuid": "a"}} - session.call_xenapi.return_value = "vdi_ref" - mock_get_sr_path.return_value = "sr_path" - - result = vm_utils._import_migrated_vhds(session, instance, - 'chain_label', 'disk_type', 'vdi_label') - - expected = {'uuid': "a", 'ref': "vdi_ref"} - self.assertEqual(expected, result) - mock_get_sr_path.assert_called_once_with(session) - mock_recv_vhd.assert_called_once_with(session, 'chain_label', - 'sr_path', mock.ANY) - mock_scan_sr.assert_called_once_with(session) - session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a') - mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type', - 'vdi_label', 'disk_type', instance) - - def test_get_vhd_parent_uuid_rec_provided(self): - session = mock.Mock() - vdi_ref = 'vdi_ref' - vdi_rec = {'sm_config': {}} - self.assertIsNone(vm_utils._get_vhd_parent_uuid(session, - vdi_ref, - vdi_rec)) - self.assertFalse(session.call_xenapi.called) - - -class MigrateVHDTestCase(VMUtilsTestBase): - def _assert_transfer_called(self, session, label): - session.call_plugin_serialized.assert_called_once_with( - 'migration.py', 'transfer_vhd', instance_uuid=label, host="dest", - vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2) - - @mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd') - def test_migrate_vhd_root(self, mock_trans_vhd): - session = mock.Mock() - instance = {"uuid": "a"} - - vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest", - "sr_path", 2) - - mock_trans_vhd.assert_called_once_with(session, "a", - "dest", "vdi_uuid", "sr_path", - 2) - - @mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd') - def test_migrate_vhd_ephemeral(self, mock_trans_vhd): - session = mock.Mock() - instance = {"uuid": "a"} - - vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest", - "sr_path", 2, 2) - - mock_trans_vhd.assert_called_once_with(session, "a_ephemeral_2", - "dest", "vdi_uuid", "sr_path", - 2) - - @mock.patch.object(os_xenapi.client.vm_management, 'transfer_vhd') - def test_migrate_vhd_converts_exceptions(self, mock_trans_vhd): - session = mock.Mock() - session.XenAPI.Failure = test.TestingException - mock_trans_vhd.side_effect = test.TestingException() - instance = {"uuid": "a"} - - self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd, - session, instance, "vdi_uuid", "dest", "sr_path", 2) - mock_trans_vhd.assert_called_once_with(session, "a", - "dest", "vdi_uuid", "sr_path", - 2) - - -class StripBaseMirrorTestCase(VMUtilsTestBase): - def test_strip_base_mirror_from_vdi_works(self): - session = mock.Mock() - vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref") - session.call_xenapi.assert_called_once_with( - "VDI.remove_from_sm_config", "vdi_ref", "base_mirror") - - def test_strip_base_mirror_from_vdi_hides_error(self): - session = mock.Mock() - session.XenAPI.Failure = test.TestingException - session.call_xenapi.side_effect = test.TestingException() - - vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref") - - session.call_xenapi.assert_called_once_with( - "VDI.remove_from_sm_config", "vdi_ref", "base_mirror") - - @mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi') - def test_strip_base_mirror_from_vdis(self, mock_strip): - def call_xenapi(method, arg): - if method == "VM.get_VBDs": - return ['VBD_ref_1', 'VBD_ref_2'] - if method == "VBD.get_VDI": - return 'VDI' + arg[3:] - return "Unexpected call_xenapi: %s.%s" % (method, arg) - - session = mock.Mock() - session.call_xenapi.side_effect = call_xenapi - - vm_utils.strip_base_mirror_from_vdis(session, "vm_ref") - - expected = [mock.call('VM.get_VBDs', "vm_ref"), - mock.call('VBD.get_VDI', "VBD_ref_1"), - mock.call('VBD.get_VDI', "VBD_ref_2")] - self.assertEqual(expected, session.call_xenapi.call_args_list) - - expected = [mock.call(session, "VDI_ref_1"), - mock.call(session, "VDI_ref_2")] - self.assertEqual(expected, mock_strip.call_args_list) - - -class DeviceIdTestCase(VMUtilsTestBase): - def test_device_id_is_none_if_not_specified_in_meta_data(self): - image_meta = objects.ImageMeta.from_dict({}) - session = mock.Mock() - session.product_version = (6, 1, 0) - self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta)) - - def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self): - image_meta = objects.ImageMeta.from_dict( - {'properties': {'xenapi_device_id': '0002'}}) - session = mock.Mock() - session.product_version = (6, 2, 0) - self.assertEqual(2, - vm_utils.get_vm_device_id(session, image_meta)) - session.product_version = (6, 3, 1) - self.assertEqual(2, - vm_utils.get_vm_device_id(session, image_meta)) - - def test_raise_exception_if_device_id_not_supported_by_hyp_version(self): - image_meta = objects.ImageMeta.from_dict( - {'properties': {'xenapi_device_id': '0002'}}) - session = mock.Mock() - session.product_version = (6, 0) - exc = self.assertRaises(exception.NovaException, - vm_utils.get_vm_device_id, session, image_meta) - self.assertEqual("Device id 2 specified is not supported by " - "hypervisor version (6, 0)", exc.message) - session.product_version = ('6a') - exc = self.assertRaises(exception.NovaException, - vm_utils.get_vm_device_id, session, image_meta) - self.assertEqual("Device id 2 specified is not supported by " - "hypervisor version 6a", exc.message) - - -class CreateVmRecordTestCase(VMUtilsTestBase): - @mock.patch.object(flavors, 'extract_flavor') - def test_create_vm_record_linux(self, mock_extract_flavor): - instance = objects.Instance(uuid=uuids.nova_uuid, - os_type="linux") - self._test_create_vm_record(mock_extract_flavor, instance, False) - - @mock.patch.object(flavors, 'extract_flavor') - def test_create_vm_record_windows(self, mock_extract_flavor): - instance = objects.Instance(uuid=uuids.nova_uuid, - os_type="windows") - with mock.patch.object(instance, 'get_flavor') as get: - get.return_value = objects.Flavor._from_db_object( - None, objects.Flavor(), test_flavor.fake_flavor) - self._test_create_vm_record(mock_extract_flavor, instance, True) - - def _test_create_vm_record(self, mock_extract_flavor, instance, - is_viridian): - session = self.get_fake_session() - flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2} - mock_extract_flavor.return_value = flavor - - with mock.patch.object(instance, 'get_flavor') as get: - get.return_value = objects.Flavor(memory_mb=1024, - vcpus=1, - vcpu_weight=2) - vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk", - device_id=2) - - is_viridian_str = str(is_viridian).lower() - - expected_vm_rec = { - 'VCPUs_params': {'cap': '0', 'weight': '2'}, - 'PV_args': '', - 'memory_static_min': '0', - 'ha_restart_priority': '', - 'HVM_boot_policy': 'BIOS order', - 'PV_bootloader': '', - 'tags': [], - 'VCPUs_max': '1', - 'memory_static_max': '1073741824', - 'actions_after_shutdown': 'destroy', - 'memory_dynamic_max': '1073741824', - 'user_version': '0', - 'xenstore_data': {'vm-data/allowvssprovider': 'false'}, - 'blocked_operations': {}, - 'is_a_template': False, - 'name_description': '', - 'memory_dynamic_min': '1073741824', - 'actions_after_crash': 'destroy', - 'memory_target': '1073741824', - 'PV_ramdisk': '', - 'PV_bootloader_args': '', - 'PCI_bus': '', - 'other_config': {'nova_uuid': uuids.nova_uuid}, - 'name_label': 'name', - 'actions_after_reboot': 'restart', - 'VCPUs_at_startup': '1', - 'HVM_boot_params': {'order': 'dc'}, - 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true', - 'timeoffset': '0', 'viridian': is_viridian_str, - 'acpi': 'true', 'device_id': '0002'}, - 'PV_legacy_args': '', - 'PV_kernel': '', - 'affinity': '', - 'recommendations': '', - 'ha_always_run': False} - - session.call_xenapi.assert_called_with('VM.create', expected_vm_rec) - - def test_list_vms(self): - self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) - self.fixture.config(disable_process_locking=True, - group='oslo_concurrency') - self.flags(instance_name_template='%d') - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - - fake.create_vm("foo1", "Halted") - vm_ref = fake.create_vm("foo2", "Running") - - stubs.stubout_session(self, fake.SessionBase) - driver = xenapi_conn.XenAPIDriver(False) - - result = list(vm_utils.list_vms(driver._session)) - - # Will have 3 VMs - but one is Dom0 and one is not running on the host - self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3) - self.assertEqual(len(result), 1) - - result_keys = [key for (key, value) in result] - - self.assertIn(vm_ref, result_keys) - - -class ChildVHDsTestCase(test.NoDBTestCase): - all_vdis = [ - ("my-vdi-ref", - {"uuid": "my-uuid", "sm_config": {}, - "is_a_snapshot": False, "other_config": {}}), - ("non-parent", - {"uuid": "uuid-1", "sm_config": {}, - "is_a_snapshot": False, "other_config": {}}), - ("diff-parent", - {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"}, - "is_a_snapshot": False, "other_config": {}}), - ("child", - {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"}, - "is_a_snapshot": False, "other_config": {}}), - ("child-snap", - {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"}, - "is_a_snapshot": True, "other_config": {}}), - ] - - @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') - def test_child_vhds_defaults(self, mock_get_all): - mock_get_all.return_value = self.all_vdis - - result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"]) - - self.assertJsonEqual(['uuid-child', 'uuid-child-snap'], result) - - @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') - def test_child_vhds_only_snapshots(self, mock_get_all): - mock_get_all.return_value = self.all_vdis - - result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"], - old_snapshots_only=True) - - self.assertEqual(['uuid-child-snap'], result) - - @mock.patch.object(vm_utils, '_get_all_vdis_in_sr') - def test_child_vhds_chain(self, mock_get_all): - mock_get_all.return_value = self.all_vdis - - result = vm_utils._child_vhds("session", "sr_ref", - ["my-uuid", "other-uuid"], old_snapshots_only=True) - - self.assertEqual(['uuid-child-snap'], result) - - def test_is_vdi_a_snapshot_works(self): - vdi_rec = {"is_a_snapshot": True, - "other_config": {}} - - self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec)) - - def test_is_vdi_a_snapshot_base_images_false(self): - vdi_rec = {"is_a_snapshot": True, - "other_config": {"image-id": "fake"}} - - self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec)) - - def test_is_vdi_a_snapshot_false_for_non_snapshot(self): - vdi_rec = {"is_a_snapshot": False, - "other_config": {}} - - self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec)) - - -class RemoveOldSnapshotsTestCase(test.NoDBTestCase): - - @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') - @mock.patch.object(vm_utils, '_walk_vdi_chain') - @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain') - def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get): - instance = {"uuid": "fake"} - mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"}) - mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}] - - vm_utils.remove_old_snapshots("session", instance, "vm_ref") - - mock_delete.assert_called_once_with("session", instance, - ["uuid1", "uuid2"], "sr_ref") - mock_get.assert_called_once_with("session", "vm_ref") - mock_walk.assert_called_once_with("session", "vdi") - - @mock.patch.object(vm_utils, '_child_vhds') - def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child): - instance = {"uuid": "fake"} - - vm_utils._delete_snapshots_in_vdi_chain("session", instance, - ["uuid"], "sr") - - self.assertFalse(mock_child.called) - - @mock.patch.object(vm_utils, '_child_vhds') - def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child): - instance = {"uuid": "fake"} - mock_child.return_value = [] - - vm_utils._delete_snapshots_in_vdi_chain("session", instance, - ["uuid1", "uuid2"], "sr") - - mock_child.assert_called_once_with("session", "sr", ["uuid2"], - old_snapshots_only=True) - - @mock.patch.object(vm_utils, '_scan_sr') - @mock.patch.object(vm_utils, 'safe_destroy_vdis') - @mock.patch.object(vm_utils, '_child_vhds') - def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child, - mock_destroy, mock_scan): - instance = {"uuid": "fake"} - mock_child.return_value = ["suuid1", "suuid2"] - session = mock.Mock() - session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"] - - vm_utils._delete_snapshots_in_vdi_chain(session, instance, - ["uuid1", "uuid2"], "sr") - - mock_child.assert_called_once_with(session, "sr", ["uuid2"], - old_snapshots_only=True) - session.VDI.get_by_uuid.assert_has_calls([ - mock.call("suuid1"), mock.call("suuid2")]) - mock_destroy.assert_called_once_with(session, ["ref1", "ref2"]) - mock_scan.assert_called_once_with(session, "sr") - - -class ResizeFunctionTestCase(test.NoDBTestCase): - def _call_get_resize_func_name(self, brand, version): - session = mock.Mock() - session.product_brand = brand - session.product_version = version - - return vm_utils._get_resize_func_name(session) - - def _test_is_resize(self, brand, version): - result = self._call_get_resize_func_name(brand, version) - self.assertEqual("VDI.resize", result) - - def _test_is_resize_online(self, brand, version): - result = self._call_get_resize_func_name(brand, version) - self.assertEqual("VDI.resize_online", result) - - def test_xenserver_5_5(self): - self._test_is_resize_online("XenServer", (5, 5, 0)) - - def test_xenserver_6_0(self): - self._test_is_resize("XenServer", (6, 0, 0)) - - def test_xcp_1_1(self): - self._test_is_resize_online("XCP", (1, 1, 0)) - - def test_xcp_1_2(self): - self._test_is_resize("XCP", (1, 2, 0)) - - def test_xcp_2_0(self): - self._test_is_resize("XCP", (2, 0, 0)) - - def test_random_brand(self): - self._test_is_resize("asfd", (1, 1, 0)) - - def test_default(self): - self._test_is_resize(None, None) - - def test_empty(self): - self._test_is_resize("", "") - - -class VMInfoTests(VMUtilsTestBase): - def setUp(self): - super(VMInfoTests, self).setUp() - self.session = mock.Mock() - - def test_get_power_state_valid(self): - # Save on test setup calls by having these simple tests in one method - self.session.call_xenapi.return_value = "Running" - self.assertEqual(vm_utils.get_power_state(self.session, "ref"), - power_state.RUNNING) - - self.session.call_xenapi.return_value = "Halted" - self.assertEqual(vm_utils.get_power_state(self.session, "ref"), - power_state.SHUTDOWN) - - self.session.call_xenapi.return_value = "Paused" - self.assertEqual(vm_utils.get_power_state(self.session, "ref"), - power_state.PAUSED) - - self.session.call_xenapi.return_value = "Suspended" - self.assertEqual(vm_utils.get_power_state(self.session, "ref"), - power_state.SUSPENDED) - - self.session.call_xenapi.return_value = "Crashed" - self.assertEqual(vm_utils.get_power_state(self.session, "ref"), - power_state.CRASHED) - - def test_get_power_state_invalid(self): - self.session.call_xenapi.return_value = "Invalid" - self.assertRaises(KeyError, - vm_utils.get_power_state, self.session, "ref") - - _XAPI_record = {'power_state': 'Running', - 'memory_static_max': str(10 << 10), - 'memory_dynamic_max': str(9 << 10), - 'VCPUs_max': '5'} - - def test_compile_info(self): - - def call_xenapi(method, *args): - if method.startswith('VM.get_') and args[0] == 'dummy': - return self._XAPI_record[method[7:]] - - self.session.call_xenapi.side_effect = call_xenapi - - info = vm_utils.compile_info(self.session, "dummy") - self.assertEqual(hardware.InstanceInfo(state=power_state.RUNNING), - info) diff --git a/nova/tests/unit/virt/xenapi/test_vmops.py b/nova/tests/unit/virt/xenapi/test_vmops.py deleted file mode 100644 index e1b6a6beae71..000000000000 --- a/nova/tests/unit/virt/xenapi/test_vmops.py +++ /dev/null @@ -1,2405 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import base64 -import uuid -import zlib - -try: - import xmlrpclib -except ImportError: - import six.moves.xmlrpc_client as xmlrpclib - -from eventlet import greenthread -import fixtures -import mock -from os_xenapi.client import host_xenstore -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import importutils -from oslo_utils import timeutils -import six - -from nova.compute import power_state -from nova.compute import task_states -from nova import context -from nova import exception -from nova import objects -from nova.objects import fields -from nova.pci import manager as pci_manager -from nova import test -from nova.tests.unit import fake_flavor -from nova.tests.unit import fake_instance -from nova.tests.unit.virt.xenapi import stubs -from nova.virt import fake -from nova.virt.xenapi import agent as xenapi_agent -from nova.virt.xenapi import fake as xenapi_fake -from nova.virt.xenapi.image import utils as image_utils -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import vmops -from nova.virt.xenapi import volume_utils -from nova.virt.xenapi import volumeops - - -class VMOpsTestBase(stubs.XenAPITestBaseNoDB): - def setUp(self): - super(VMOpsTestBase, self).setUp() - self._setup_mock_vmops() - self.vms = [] - - def _setup_mock_vmops(self, product_brand=None, product_version=None): - stubs.stubout_session(self, xenapi_fake.SessionBase) - self._session = xenapi_fake.SessionBase( - 'http://localhost', 'root', 'test_pass') - self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI()) - - def create_vm(self, name, state="Running"): - vm_ref = xenapi_fake.create_vm(name, state) - self.vms.append(vm_ref) - vm = xenapi_fake.get_record("VM", vm_ref) - return vm, vm_ref - - def tearDown(self): - super(VMOpsTestBase, self).tearDown() - for vm in self.vms: - xenapi_fake.destroy_vm(vm) - - -class VMOpsTestCase(VMOpsTestBase): - def setUp(self): - super(VMOpsTestCase, self).setUp() - self._setup_mock_vmops() - self.context = context.RequestContext('user', 'project') - self.instance = fake_instance.fake_instance_obj(self.context) - - def _setup_mock_vmops(self, product_brand=None, product_version=None): - self._session = self._get_mock_session(product_brand, product_version) - self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI()) - - def _get_mock_session(self, product_brand, product_version): - class Mock(object): - pass - - mock_session = Mock() - mock_session.product_brand = product_brand - mock_session.product_version = product_version - return mock_session - - def _test_finish_revert_migration_after_crash(self, backup_made, new_made, - vm_shutdown=True): - instance = {'name': 'foo', - 'task_state': task_states.RESIZE_MIGRATING} - context = 'fake_context' - - lookup_returns = [backup_made and 'foo' or None, - (not backup_made or new_made) and 'foo' or None] - - @mock.patch.object(vm_utils, 'lookup', - side_effect=lookup_returns) - @mock.patch.object(self._vmops, '_destroy') - @mock.patch.object(vm_utils, 'set_vm_name_label') - @mock.patch.object(self._vmops, '_attach_mapped_block_devices') - @mock.patch.object(self._vmops, '_start') - @mock.patch.object(vm_utils, 'is_vm_shutdown', - return_value=vm_shutdown) - def test(mock_is_vm, mock_start, mock_attach_bdm, mock_set_vm_name, - mock_destroy, mock_lookup): - self._vmops.finish_revert_migration(context, instance, []) - - mock_lookup.assert_has_calls([mock.call(self._session, 'foo-orig'), - mock.call(self._session, 'foo')]) - if backup_made: - if new_made: - mock_destroy.assert_called_once_with(instance, 'foo') - mock_set_vm_name.assert_called_once_with(self._session, 'foo', - 'foo') - mock_attach_bdm.assert_called_once_with(instance, []) - - mock_is_vm.assert_called_once_with(self._session, 'foo') - if vm_shutdown: - mock_start.assert_called_once_with(instance, 'foo') - - test() - - def test_finish_revert_migration_after_crash(self): - self._test_finish_revert_migration_after_crash(True, True) - - def test_finish_revert_migration_after_crash_before_new(self): - self._test_finish_revert_migration_after_crash(True, False) - - def test_finish_revert_migration_after_crash_before_backup(self): - self._test_finish_revert_migration_after_crash(False, False) - - @mock.patch.object(vm_utils, 'lookup', return_value=None) - def test_get_vm_opaque_ref_raises_instance_not_found(self, mock_lookup): - instance = {"name": "dummy"} - - self.assertRaises(exception.InstanceNotFound, - self._vmops._get_vm_opaque_ref, instance) - mock_lookup.assert_called_once_with(self._session, instance['name'], - False) - - @mock.patch.object(vm_utils, 'destroy_vm') - @mock.patch.object(vm_utils, 'clean_shutdown_vm') - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - def test_clean_shutdown_no_bdm_on_destroy(self, hard_shutdown_vm, - clean_shutdown_vm, destroy_vm): - vm_ref = 'vm_ref' - self._vmops._destroy(self.instance, vm_ref, destroy_disks=False) - hard_shutdown_vm.assert_called_once_with(self._vmops._session, - self.instance, vm_ref) - self.assertEqual(0, clean_shutdown_vm.call_count) - - @mock.patch.object(vm_utils, 'destroy_vm') - @mock.patch.object(vm_utils, 'clean_shutdown_vm') - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - def test_clean_shutdown_with_bdm_on_destroy(self, hard_shutdown_vm, - clean_shutdown_vm, destroy_vm): - vm_ref = 'vm_ref' - block_device_info = {'block_device_mapping': ['fake']} - self._vmops._destroy(self.instance, vm_ref, destroy_disks=False, - block_device_info=block_device_info) - clean_shutdown_vm.assert_called_once_with(self._vmops._session, - self.instance, vm_ref) - self.assertEqual(0, hard_shutdown_vm.call_count) - - @mock.patch.object(vm_utils, 'destroy_vm') - @mock.patch.object(vm_utils, 'clean_shutdown_vm', return_value=False) - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - def test_clean_shutdown_with_bdm_failed_on_destroy(self, hard_shutdown_vm, - clean_shutdown_vm, destroy_vm): - vm_ref = 'vm_ref' - block_device_info = {'block_device_mapping': ['fake']} - self._vmops._destroy(self.instance, vm_ref, destroy_disks=False, - block_device_info=block_device_info) - clean_shutdown_vm.assert_called_once_with(self._vmops._session, - self.instance, vm_ref) - hard_shutdown_vm.assert_called_once_with(self._vmops._session, - self.instance, vm_ref) - - @mock.patch.object(vm_utils, 'try_auto_configure_disk') - @mock.patch.object(vm_utils, 'create_vbd', - side_effect=test.TestingException) - def test_attach_disks_rescue_auto_disk_config_false(self, create_vbd, - try_auto_config): - ctxt = context.RequestContext('user', 'project') - instance = fake_instance.fake_instance_obj(ctxt) - image_meta = objects.ImageMeta.from_dict( - {'properties': {'auto_disk_config': 'false'}}) - vdis = {'root': {'ref': 'fake-ref'}} - self.assertRaises(test.TestingException, self._vmops._attach_disks, - ctxt, instance, image_meta=image_meta, vm_ref=None, - name_label=None, vdis=vdis, disk_image_type='fake', - network_info=[], rescue=True) - self.assertFalse(try_auto_config.called) - - @mock.patch.object(vm_utils, 'try_auto_configure_disk') - @mock.patch.object(vm_utils, 'create_vbd', - side_effect=test.TestingException) - def test_attach_disks_rescue_auto_disk_config_true(self, create_vbd, - try_auto_config): - ctxt = context.RequestContext('user', 'project') - instance = fake_instance.fake_instance_obj(ctxt) - image_meta = objects.ImageMeta.from_dict( - {'properties': {'auto_disk_config': 'true'}}) - vdis = {'root': {'ref': 'fake-ref'}} - self.assertRaises(test.TestingException, self._vmops._attach_disks, - ctxt, instance, image_meta=image_meta, vm_ref=None, - name_label=None, vdis=vdis, disk_image_type='fake', - network_info=[], rescue=True) - try_auto_config.assert_called_once_with(self._vmops._session, - 'fake-ref', instance.flavor.root_gb) - - @mock.patch.object(vm_utils, 'snapshot_attached_here') - @mock.patch.object(timeutils, 'delta_seconds') - @mock.patch.object(timeutils, 'utcnow') - @mock.patch.object(image_utils, 'get_image_handler') - def test_snapshot_using_image_handler(self, - mock_get_image_handler, - mock_utcnow, - mock_delta_seconds, - mock_snapshot_attached_here): - mock_utcnow.side_effect = ['fake_start', 'fake_end'] - self.flags(image_handler='direct_vhd', group='xenserver') - mock_get_image_handler.return_value = mock.Mock() - - class FakeVdiUuid(object): - def __enter__(self): - pass - - def __exit__(self, Type, value, traceback): - pass - - fake_vdi_uuid = FakeVdiUuid() - mock_snapshot_attached_here.return_value = fake_vdi_uuid - self._setup_mock_vmops() - vmops = self._vmops - with mock.patch.object(vmops, '_get_vm_opaque_ref', - return_value='fake_ref') as mock_get_opa_ref: - fake_instance = {'name': 'fake_name'} - - vmops.snapshot('fake_ctx', fake_instance, 'fake_image_id', - mock.Mock()) - - vmops.image_handler.upload_image.assert_called_once_with( - 'fake_ctx', vmops._session, fake_instance, - 'fake_image_id', None) - mock_get_opa_ref.assert_called_once_with(fake_instance) - mock_delta_seconds.assert_called_once_with('fake_start', - 'fake_end') - self.assertEqual(mock_utcnow.call_count, 2) - - @mock.patch.object(vm_utils, 'snapshot_attached_here') - @mock.patch.object(timeutils, 'delta_seconds') - @mock.patch.object(timeutils, 'utcnow') - @mock.patch.object(image_utils, 'get_image_handler') - @mock.patch.object(importutils, 'import_object') - def test_snapshot_using_upload_image_handler(self, - mock_import_object, - mock_get_image_handler, - mock_utcnow, - mock_delta_seconds, - mock_snapshot_attached_here): - mock_utcnow.side_effect = ['fake_start', 'fake_end'] - self.flags(image_upload_handler='image_upload_handler', - group='xenserver') - mock_get_image_handler.return_value = mock.Mock() - - class FakeVdiUuid(object): - def __enter__(self): - pass - - def __exit__(self, Type, value, traceback): - pass - - fake_vdi_uuid = FakeVdiUuid() - mock_snapshot_attached_here.return_value = fake_vdi_uuid - mock_import_object.return_value = mock.Mock() - self._setup_mock_vmops() - vmops = self._vmops - with mock.patch.object(vmops, '_get_vm_opaque_ref', - return_value='fake_ref') as mock_get_opa_ref: - fake_instance = {'name': 'fake_name'} - - vmops.snapshot('fake_ctx', fake_instance, 'fake_image_id', - mock.Mock()) - - vmops.image_upload_handler.upload_image.assert_called_once_with( - 'fake_ctx', vmops._session, fake_instance, - 'fake_image_id', None) - mock_get_opa_ref.assert_called_once_with(fake_instance) - mock_delta_seconds.assert_called_once_with('fake_start', - 'fake_end') - self.assertEqual(mock_utcnow.call_count, 2) - - -class InjectAutoDiskConfigTestCase(VMOpsTestBase): - def test_inject_auto_disk_config_when_present(self): - vm, vm_ref = self.create_vm("dummy") - instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True} - self.vmops._inject_auto_disk_config(instance, vm_ref) - xenstore_data = vm['xenstore_data'] - self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True') - - def test_inject_auto_disk_config_none_as_false(self): - vm, vm_ref = self.create_vm("dummy") - instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None} - self.vmops._inject_auto_disk_config(instance, vm_ref) - xenstore_data = vm['xenstore_data'] - self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False') - - -class GetConsoleOutputTestCase(VMOpsTestBase): - def _mock_console_log(self, session, domid): - if domid == 0: - raise session.XenAPI.Failure('No console') - return base64.b64encode(zlib.compress(six.b('dom_id: %s' % domid))) - - @mock.patch.object(vmops.vm_management, 'get_console_log') - def test_get_console_output_works(self, mock_console_log): - ctxt = context.RequestContext('user', 'project') - mock_console_log.side_effect = self._mock_console_log - instance = fake_instance.fake_instance_obj(ctxt) - with mock.patch.object(self.vmops, '_get_last_dom_id', - return_value=42) as mock_last_dom: - self.assertEqual(b"dom_id: 42", - self.vmops.get_console_output(instance)) - mock_last_dom.assert_called_once_with(instance, check_rescue=True) - - @mock.patch.object(vmops.vm_management, 'get_console_log') - def test_get_console_output_not_available(self, mock_console_log): - mock_console_log.side_effect = self._mock_console_log - ctxt = context.RequestContext('user', 'project') - instance = fake_instance.fake_instance_obj(ctxt) - # dom_id=0 used to trigger exception in fake XenAPI - with mock.patch.object(self.vmops, '_get_last_dom_id', - return_value=0) as mock_last_dom: - self.assertRaises(exception.ConsoleNotAvailable, - self.vmops.get_console_output, instance) - mock_last_dom.assert_called_once_with(instance, check_rescue=True) - - def test_get_dom_id_works(self): - instance = {"name": "dummy"} - vm, vm_ref = self.create_vm("dummy") - self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance)) - - def test_get_dom_id_works_with_rescue_vm(self): - instance = {"name": "dummy"} - vm, vm_ref = self.create_vm("dummy-rescue") - self.assertEqual(vm["domid"], - self.vmops._get_dom_id(instance, check_rescue=True)) - - def test_get_dom_id_raises_not_found(self): - instance = {"name": "dummy"} - self.create_vm("not-dummy") - self.assertRaises(exception.InstanceNotFound, - self.vmops._get_dom_id, instance) - - def test_get_dom_id_works_with_vmref(self): - vm, vm_ref = self.create_vm("dummy") - instance = {'name': 'dummy'} - self.assertEqual(vm["domid"], - self.vmops._get_dom_id(instance, vm_ref=vm_ref)) - - def test_get_dom_id_fails_if_shutdown(self): - vm, vm_ref = self.create_vm("dummy") - instance = {'name': 'dummy'} - self._session.VM.hard_shutdown(vm_ref) - self.assertRaises(exception.InstanceNotFound, - self.vmops._get_dom_id, instance, vm_ref=vm_ref) - - -class SpawnTestCase(VMOpsTestBase): - def _stub_out_common(self): - self.mock_ensure_instance_name_unique = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_ensure_instance_name_unique')).mock - self.mock_ensure_enough_free_mem = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_ensure_enough_free_mem')).mock - self.mock_update_instance_progress = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_update_instance_progress')).mock - self.mock_determine_disk_image_type = self.useFixture( - fixtures.MockPatchObject( - vm_utils, 'determine_disk_image_type')).mock - self.mock_get_vdis_for_instance = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_get_vdis_for_instance')).mock - self.mock_safe_destroy_vdis = self.useFixture( - fixtures.MockPatchObject( - vm_utils, 'safe_destroy_vdis')).mock - self.mock_safe_cleanup_from_vdis = self.useFixture( - fixtures.MockPatchObject( - self.vmops._volumeops, 'safe_cleanup_from_vdis')).mock - self.mock_resize_up_vdis = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_resize_up_vdis')).mock - self.mock_create_kernel_and_ramdisk = self.useFixture( - fixtures.MockPatchObject( - vm_utils, 'create_kernel_and_ramdisk')).mock - self.mock_destroy_kernel_ramdisk = self.useFixture( - fixtures.MockPatchObject( - vm_utils, 'destroy_kernel_ramdisk')).mock - self.mock_create_vm_record = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_create_vm_record')).mock - self.mock_destroy = self.useFixture( - fixtures.MockPatchObject(self.vmops, '_destroy')).mock - self.mock_attach_disks = self.useFixture( - fixtures.MockPatchObject(self.vmops, '_attach_disks')).mock - self.mock_save_device_metadata = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_save_device_metadata')).mock - self.mock_get_instance_pci_devs = self.useFixture( - fixtures.MockPatchObject( - pci_manager, 'get_instance_pci_devs')).mock - self.mock_set_other_config_pci = self.useFixture( - fixtures.MockPatchObject( - vm_utils, 'set_other_config_pci')).mock - self.mock_attach_orig_disks = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_attach_orig_disks')).mock - self.mock_inject_network_info = self.useFixture( - fixtures.MockPatchObject(self.vmops, 'inject_network_info')).mock - self.mock_inject_hostname = self.useFixture( - fixtures.MockPatchObject(self.vmops, '_inject_hostname')).mock - self.mock_inject_instance_metadata = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_inject_instance_metadata')).mock - self.mock_inject_auto_disk_config = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_inject_auto_disk_config')).mock - self.mock_file_inject_vm_settings = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_file_inject_vm_settings')).mock - self.mock_create_vifs = self.useFixture( - fixtures.MockPatchObject(self.vmops, '_create_vifs')).mock - self.mock_start = self.useFixture( - fixtures.MockPatchObject(self.vmops, '_start')).mock - self.mock_wait_for_instance_to_start = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_wait_for_instance_to_start')).mock - self.mock_configure_new_instance_w_agent = self.useFixture( - fixtures.MockPatchObject( - self.vmops, '_configure_new_instance_with_agent')).mock - self.mock_remove_hostname = self.useFixture( - fixtures.MockPatchObject(self.vmops, '_remove_hostname')).mock - self.mock_update_last_dom_id = self.useFixture( - fixtures.MockPatchObject(self.vmops, '_update_last_dom_id')).mock - self.mock_call_xenapi = self.useFixture( - fixtures.MockPatchObject(self.vmops._session, 'call_xenapi')).mock - self.mock_attach_vgpu = self.useFixture( - fixtures.MockPatchObject(self.vmops, '_attach_vgpu')).mock - - @staticmethod - def _new_instance(obj): - class _Instance(dict): - __getattr__ = dict.__getitem__ - __setattr__ = dict.__setitem__ - return _Instance(**obj) - - def _test_spawn(self, name_label_param=None, block_device_info_param=None, - rescue=False, include_root_vdi=True, throw_exception=None, - attach_pci_dev=False, neutron_exception=False, - network_info=None, vgpu_info=None): - self._stub_out_common() - - instance = self._new_instance({"name": "dummy", "uuid": "fake_uuid", - "device_metadata": None}) - - name_label = name_label_param - if name_label is None: - name_label = "dummy" - image_meta = objects.ImageMeta.from_dict({"id": uuids.image_id}) - context = "context" - session = self.vmops._session - injected_files = "fake_files" - admin_password = "password" - if network_info is None: - network_info = [] - steps = 9 - if rescue: - steps += 1 - - block_device_info = block_device_info_param - if block_device_info and not block_device_info['root_device_name']: - block_device_info = dict(block_device_info_param) - block_device_info['root_device_name'] = \ - self.vmops.default_root_dev - - di_type = "di_type" - self.mock_determine_disk_image_type.return_value = di_type - - expected_update_instance_progress_calls = [] - step = 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - - vdis = {"other": {"ref": "fake_ref_2", "osvol": True}} - if include_root_vdi: - vdis["root"] = {"ref": "fake_ref"} - self.mock_get_vdis_for_instance.return_value = vdis - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - - kernel_file = "kernel" - ramdisk_file = "ramdisk" - self.mock_create_kernel_and_ramdisk.return_value = (kernel_file, - ramdisk_file) - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - - vm_ref = "fake_vm_ref" - self.mock_create_vm_record.return_value = vm_ref - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - - if attach_pci_dev: - fake_dev = { - 'created_at': None, - 'updated_at': None, - 'deleted_at': None, - 'deleted': None, - 'id': 1, - 'compute_node_id': 1, - 'address': '00:00.0', - 'vendor_id': '1234', - 'product_id': 'abcd', - 'dev_type': fields.PciDeviceType.STANDARD, - 'status': 'available', - 'dev_id': 'devid', - 'label': 'label', - 'instance_uuid': None, - 'extra_info': '{}', - } - self.mock_get_instance_pci_devs.return_value = [fake_dev] - else: - self.mock_get_instance_pci_devs.return_value = [] - - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - - if neutron_exception: - events = [('network-vif-plugged', 1)] - self.stub_out('nova.virt.xenapi.vmops.VMOps.' - '_neutron_failed_callback', - lambda event_name, instance: None) - self.stub_out('nova.virt.xenapi.vmops.VMOps.' - 'wait_for_instance_event', - lambda instance, event_names, - deadline, error_callback: None) - mock_wait_for_instance_event = self.useFixture( - fixtures.MockPatchObject( - self.vmops._virtapi, 'wait_for_instance_event', - side_effect=( - exception.VirtualInterfaceCreateException))).mock - else: - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - - if rescue: - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - start_pause = True - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - step += 1 - expected_update_instance_progress_calls.append( - mock.call(context, instance, step, steps)) - - if throw_exception: - self.mock_update_instance_progress.side_effect = [ - None, None, None, None, None, None, None, None, - throw_exception] - - self.vmops.spawn(context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info_param, - vgpu_info, name_label_param, rescue) - - self.mock_ensure_instance_name_unique.assert_called_once_with( - name_label) - self.mock_ensure_enough_free_mem.assert_called_once_with(instance) - self.mock_update_instance_progress.assert_has_calls( - expected_update_instance_progress_calls) - self.mock_determine_disk_image_type.assert_called_once_with(image_meta) - self.mock_get_vdis_for_instance.assert_called_once_with( - context, instance, name_label, image_meta, di_type, - block_device_info) - self.mock_resize_up_vdis.assert_called_once_with(instance, vdis) - self.mock_create_kernel_and_ramdisk.assert_called_once_with( - context, session, instance, name_label) - self.mock_create_vm_record.assert_called_once_with( - context, instance, name_label, di_type, kernel_file, ramdisk_file, - image_meta, rescue) - self.mock_attach_disks.assert_called_once_with( - context, instance, image_meta, vm_ref, name_label, vdis, di_type, - network_info, rescue, admin_password, injected_files) - self.mock_save_device_metadata.assert_called_once_with( - context, instance, block_device_info) - self.mock_get_instance_pci_devs.assert_called_once_with(instance) - self.mock_inject_network_info.assert_called_once_with( - instance, network_info, vm_ref) - self.mock_inject_hostname.assert_called_once_with(instance, vm_ref, - rescue) - self.mock_inject_instance_metadata.assert_called_once_with(instance, - vm_ref) - self.mock_inject_auto_disk_config.assert_called_once_with(instance, - vm_ref) - self.mock_file_inject_vm_settings.assert_called_once_with( - instance, vm_ref, vdis, network_info) - self.mock_start.assert_called_once_with(instance, vm_ref, - start_pause=start_pause) - self.mock_attach_vgpu.assert_called_once_with(vm_ref, vgpu_info, - instance) - - if throw_exception or neutron_exception: - self.mock_safe_destroy_vdis.assert_called_once_with( - self.vmops._session, ["fake_ref"]) - self.mock_safe_cleanup_from_vdis.assert_called_once_with( - ["fake_ref_2"]) - self.mock_destroy_kernel_ramdisk.assert_called_once_with( - self.vmops._session, instance, kernel_file, ramdisk_file) - self.mock_destroy.assert_called_once_with( - instance, vm_ref, network_info=network_info) - - if attach_pci_dev: - self.mock_set_other_config_pci.assert_called_once_with( - self.vmops._session, vm_ref, "0/0000:00:00.0") - - if neutron_exception: - mock_wait_for_instance_event.assert_called_once_with( - instance, events, deadline=300, - error_callback=self.vmops._neutron_failed_callback) - else: - self.mock_create_vifs.assert_called_once_with(instance, vm_ref, - network_info) - self.mock_wait_for_instance_to_start.assert_called_once_with( - instance, vm_ref) - self.mock_configure_new_instance_w_agent.assert_called_once_with( - instance, vm_ref, injected_files, admin_password) - self.mock_remove_hostname.assert_called_once_with( - instance, vm_ref) - self.mock_update_last_dom_id.assert_called_once_with(vm_ref) - self.mock_call_xenapi.assert_called_once_with('VM.unpause', vm_ref) - - if rescue: - self.mock_attach_orig_disks.assert_called_once_with(instance, - vm_ref) - - def test_spawn(self): - self._test_spawn() - - def test_spawn_with_alternate_options(self): - self._test_spawn(include_root_vdi=False, rescue=True, - name_label_param="bob", - block_device_info_param={"root_device_name": ""}) - - def test_spawn_with_pci_available_on_the_host(self): - self._test_spawn(attach_pci_dev=True) - - def test_spawn_with_vgpu(self): - vgpu_info = {'grp_uuid': uuids.gpu_group_1, - 'vgpu_type_uuid': uuids.vgpu_type_1} - self._test_spawn(vgpu_info=vgpu_info) - - def test_spawn_performs_rollback_and_throws_exception(self): - self.assertRaises(test.TestingException, self._test_spawn, - throw_exception=test.TestingException()) - - @mock.patch.object(vmops.VMOps, '_get_neutron_events', - return_value=[('network-vif-plugged', 1)]) - def test_spawn_with_neutron(self, mock_get_neutron_events): - network_info = [{'id': 1, 'active': True}] - self.stub_out('nova.virt.xenapi.vmops.VMOps._neutron_failed_callback', - lambda event_name, instance: None) - - self._test_spawn(network_info=network_info) - - mock_get_neutron_events.assert_called_once_with( - network_info, True, True, False) - - @staticmethod - def _dev_mock(obj): - dev = mock.MagicMock(**obj) - dev.__contains__.side_effect = ( - lambda attr: getattr(dev, attr, None) is not None) - return dev - - @mock.patch.object(objects, 'XenDeviceBus') - @mock.patch.object(objects, 'IDEDeviceBus') - @mock.patch.object(objects, 'DiskMetadata') - def test_prepare_disk_metadata(self, mock_DiskMetadata, - mock_IDEDeviceBus, mock_XenDeviceBus): - mock_IDEDeviceBus.side_effect = \ - lambda **kw: \ - self._dev_mock({"address": kw.get("address"), "bus": "ide"}) - mock_XenDeviceBus.side_effect = \ - lambda **kw: \ - self._dev_mock({"address": kw.get("address"), "bus": "xen"}) - mock_DiskMetadata.side_effect = \ - lambda **kw: self._dev_mock(dict(**kw)) - - bdm = self._dev_mock({"device_name": "/dev/xvda", "tag": "disk_a"}) - disk_metadata = self.vmops._prepare_disk_metadata(bdm) - - self.assertEqual(disk_metadata[0].tags, ["disk_a"]) - self.assertEqual(disk_metadata[0].bus.bus, "ide") - self.assertEqual(disk_metadata[0].bus.address, "0:0") - self.assertEqual(disk_metadata[1].tags, ["disk_a"]) - self.assertEqual(disk_metadata[1].bus.bus, "xen") - self.assertEqual(disk_metadata[1].bus.address, "000000") - self.assertEqual(disk_metadata[2].tags, ["disk_a"]) - self.assertEqual(disk_metadata[2].bus.bus, "xen") - self.assertEqual(disk_metadata[2].bus.address, "51712") - self.assertEqual(disk_metadata[3].tags, ["disk_a"]) - self.assertEqual(disk_metadata[3].bus.bus, "xen") - self.assertEqual(disk_metadata[3].bus.address, "768") - - bdm = self._dev_mock({"device_name": "/dev/xvdc", "tag": "disk_c"}) - disk_metadata = self.vmops._prepare_disk_metadata(bdm) - - self.assertEqual(disk_metadata[0].tags, ["disk_c"]) - self.assertEqual(disk_metadata[0].bus.bus, "ide") - self.assertEqual(disk_metadata[0].bus.address, "1:0") - self.assertEqual(disk_metadata[1].tags, ["disk_c"]) - self.assertEqual(disk_metadata[1].bus.bus, "xen") - self.assertEqual(disk_metadata[1].bus.address, "000200") - self.assertEqual(disk_metadata[2].tags, ["disk_c"]) - self.assertEqual(disk_metadata[2].bus.bus, "xen") - self.assertEqual(disk_metadata[2].bus.address, "51744") - self.assertEqual(disk_metadata[3].tags, ["disk_c"]) - self.assertEqual(disk_metadata[3].bus.bus, "xen") - self.assertEqual(disk_metadata[3].bus.address, "5632") - - bdm = self._dev_mock({"device_name": "/dev/xvde", "tag": "disk_e"}) - disk_metadata = self.vmops._prepare_disk_metadata(bdm) - - self.assertEqual(disk_metadata[0].tags, ["disk_e"]) - self.assertEqual(disk_metadata[0].bus.bus, "xen") - self.assertEqual(disk_metadata[0].bus.address, "000400") - self.assertEqual(disk_metadata[1].tags, ["disk_e"]) - self.assertEqual(disk_metadata[1].bus.bus, "xen") - self.assertEqual(disk_metadata[1].bus.address, "51776") - - @mock.patch.object(objects.VirtualInterfaceList, 'get_by_instance_uuid') - @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') - @mock.patch.object(objects, 'NetworkInterfaceMetadata') - @mock.patch.object(objects, 'InstanceDeviceMetadata') - @mock.patch.object(objects, 'PCIDeviceBus') - @mock.patch.object(vmops.VMOps, '_prepare_disk_metadata') - def test_save_device_metadata(self, mock_prepare_disk_metadata, - mock_PCIDeviceBus, mock_InstanceDeviceMetadata, - mock_NetworkInterfaceMetadata, mock_get_bdms, mock_get_vifs): - context = {} - instance = {"uuid": "fake_uuid"} - vif = self._dev_mock({"address": "fake_address", "tag": "vif_tag"}) - bdm = self._dev_mock({"device_name": "/dev/xvdx", "tag": "bdm_tag"}) - block_device_info = {'block_device_mapping': [bdm]} - - mock_get_vifs.return_value = [vif] - mock_get_bdms.return_value = [bdm] - mock_InstanceDeviceMetadata.side_effect = \ - lambda **kw: {"devices": kw.get("devices")} - mock_NetworkInterfaceMetadata.return_value = mock.sentinel.vif_metadata - mock_prepare_disk_metadata.return_value = [mock.sentinel.bdm_metadata] - - dev_meta = self.vmops._save_device_metadata(context, instance, - block_device_info) - - mock_get_vifs.assert_called_once_with(context, instance["uuid"]) - - mock_NetworkInterfaceMetadata.assert_called_once_with(mac=vif.address, - bus=mock_PCIDeviceBus.return_value, - tags=[vif.tag]) - mock_prepare_disk_metadata.assert_called_once_with(bdm) - self.assertEqual(dev_meta["devices"], - [mock.sentinel.vif_metadata, mock.sentinel.bdm_metadata]) - - @mock.patch.object(objects.VirtualInterfaceList, 'get_by_instance_uuid') - @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid') - @mock.patch.object(vmops.VMOps, '_prepare_disk_metadata') - def test_save_device_metadata_no_vifs_no_bdms( - self, mock_prepare_disk_metadata, mock_get_bdms, mock_get_vifs): - """Tests that we don't save any device metadata when there are no - VIFs or BDMs. - """ - ctxt = context.RequestContext('fake-user', 'fake-project') - instance = objects.Instance(uuid=uuids.instance_uuid) - block_device_info = {'block_device_mapping': []} - - mock_get_vifs.return_value = objects.VirtualInterfaceList() - - dev_meta = self.vmops._save_device_metadata( - ctxt, instance, block_device_info) - self.assertIsNone(dev_meta) - - mock_get_vifs.assert_called_once_with(ctxt, uuids.instance_uuid) - mock_get_bdms.assert_not_called() - mock_prepare_disk_metadata.assert_not_called() - - @mock.patch.object(vmops.VMOps, '_get_neutron_events') - def test_spawn_with_neutron_exception(self, mock_get_neutron_events): - mock_get_neutron_events.return_value = [('network-vif-plugged', 1)] - self.assertRaises(exception.VirtualInterfaceCreateException, - self._test_spawn, neutron_exception=True) - mock_get_neutron_events.assert_called_once_with( - [], True, True, False) - - @mock.patch.object(vmops.VMOps, '_attach_mapped_block_devices') - @mock.patch.object(vm_utils, 'import_all_migrated_disks') - @mock.patch.object(volumeops.VolumeOps, 'connect_volume') - def _test_finish_migration(self, mock_connect_volume, - mock_import_all_migrated_disks, - mock_attach_mapped_block_devices, - power_on=True, resize_instance=True, - throw_exception=None, booted_from_volume=False, - vgpu_info=None): - self._stub_out_common() - - context = "context" - migration = {} - name_label = "dummy" - instance = self._new_instance({"name": name_label, "uuid": "fake_uuid", - "root_device_name": "/dev/xvda", "device_metadata": None}) - disk_info = "disk_info" - network_info = "net_info" - image_meta = objects.ImageMeta.from_dict({"id": uuids.image_id}) - block_device_info = {} - import_root = True - - expected_call_xenapi = [] - if booted_from_volume: - block_device_info = {'block_device_mapping': [ - {'mount_device': '/dev/xvda', - 'connection_info': {'data': 'fake-data'}}]} - import_root = False - mock_connect_volume.return_value = ('sr', 'vol-vdi-uuid') - expected_call_xenapi.append(mock.call('VDI.get_by_uuid', - 'vol-vdi-uuid')) - self.mock_call_xenapi.return_value = 'vol-vdi-ref' - session = self.vmops._session - - di_type = "di_type" - self.mock_determine_disk_image_type.return_value = di_type - - root_vdi = {"ref": "fake_ref"} - ephemeral_vdi = {"ref": "fake_ref_e"} - vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}} - mock_import_all_migrated_disks.return_value = vdis - - kernel_file = "kernel" - ramdisk_file = "ramdisk" - self.mock_create_kernel_and_ramdisk.return_value = (kernel_file, - ramdisk_file) - - vm_ref = "fake_vm_ref" - rescue = False - self.mock_create_vm_record.return_value = vm_ref - self.mock_get_instance_pci_devs.return_value = [] - - if power_on: - expected_call_xenapi.append(mock.call('VM.unpause', vm_ref)) - - if throw_exception: - self.mock_update_instance_progress.side_effect = throw_exception - - self.vmops.finish_migration(context, migration, instance, disk_info, - network_info, image_meta, resize_instance, - block_device_info, power_on) - - self.mock_ensure_instance_name_unique.assert_called_once_with( - name_label) - self.mock_ensure_enough_free_mem.assert_called_once_with(instance) - self.mock_update_instance_progress.assert_called_once_with( - context, instance, step=5, total_steps=5) - self.mock_determine_disk_image_type.assert_called_once_with(image_meta) - self.mock_create_kernel_and_ramdisk.assert_called_once_with( - context, session, instance, name_label) - self.mock_create_vm_record.assert_called_once_with( - context, instance, name_label, di_type, kernel_file, ramdisk_file, - image_meta, rescue) - self.mock_attach_disks.assert_called_once_with( - context, instance, image_meta, vm_ref, name_label, vdis, di_type, - network_info, False, None, None) - self.mock_save_device_metadata.assert_called_once_with( - context, instance, block_device_info) - self.mock_get_instance_pci_devs.assert_called_once_with(instance) - self.mock_inject_network_info.assert_called_once_with( - instance, network_info, vm_ref) - self.mock_inject_instance_metadata.assert_called_once_with(instance, - vm_ref) - self.mock_inject_auto_disk_config.assert_called_once_with(instance, - vm_ref) - self.mock_file_inject_vm_settings.assert_called_once_with( - instance, vm_ref, vdis, network_info) - self.mock_create_vifs.assert_called_once_with( - instance, vm_ref, network_info) - self.mock_attach_vgpu.assert_called_once_with(vm_ref, vgpu_info, - instance) - mock_import_all_migrated_disks.assert_called_once_with( - self.vmops._session, instance, import_root=import_root) - mock_attach_mapped_block_devices.assert_called_once_with( - instance, block_device_info) - - if resize_instance: - self.mock_resize_up_vdis.assert_called_once_with( - instance, vdis) - - if throw_exception: - self.mock_safe_destroy_vdis.assert_called_once_with( - self.vmops._session, ["fake_ref_e", "fake_ref"]) - self.mock_destroy_kernel_ramdisk.assert_called_once_with( - self.vmops._session, instance, kernel_file, ramdisk_file) - self.mock_destroy.assert_called_once_with( - instance, vm_ref, network_info=network_info) - - if power_on: - self.mock_start.assert_called_once_with(instance, vm_ref, - start_pause=True) - self.mock_wait_for_instance_to_start.assert_called_once_with( - instance, vm_ref) - self.mock_update_last_dom_id.assert_called_once_with(vm_ref) - - if expected_call_xenapi: - self.mock_call_xenapi.assert_has_calls(expected_call_xenapi) - else: - self.mock_call_xenapi.assert_not_called() - - if booted_from_volume: - mock_connect_volume.assert_called_once_with({'data': 'fake-data'}) - - def test_finish_migration(self): - self._test_finish_migration() - - def test_finish_migration_no_power_on(self): - self._test_finish_migration(power_on=False, resize_instance=False) - - def test_finish_migration_booted_from_volume(self): - self._test_finish_migration(booted_from_volume=True) - - def test_finish_migrate_performs_rollback_on_error(self): - self.assertRaises(test.TestingException, self._test_finish_migration, - power_on=False, resize_instance=False, - throw_exception=test.TestingException()) - - @mock.patch.object(xenapi_fake.SessionBase, 'call_xenapi') - def test_remove_hostname(self, mock_call_xenapi): - vm, vm_ref = self.create_vm("dummy") - instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None} - - self.vmops._remove_hostname(instance, vm_ref) - - mock_call_xenapi.assert_called_once_with( - "VM.remove_from_xenstore_data", vm_ref, "vm-data/hostname") - - @mock.patch.object(vmops.VMOps, '_remove_hostname') - @mock.patch.object(vmops.VMOps, '_inject_hostname') - @mock.patch.object(vmops.VMOps, '_get_agent') - @mock.patch.object(vmops.VMOps, 'agent_enabled', return_value=True) - def test_reset_network(self, mock_agent_enabled, mock_get_agent, - mock_inject_hostname, mock_remove_hostname): - vm, vm_ref = self.create_vm("dummy") - instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None} - agent = mock.Mock() - mock_get_agent.return_value = agent - - self.vmops.reset_network(instance) - - agent.resetnetwork.assert_called_once_with() - mock_agent_enabled.assert_called_once_with(instance) - mock_get_agent.assert_called_once_with(instance, vm_ref) - mock_inject_hostname.assert_called_once_with(instance, vm_ref, False) - mock_remove_hostname.assert_called_once_with(instance, vm_ref) - - @mock.patch.object(vmops.VMOps, '_add_to_param_xenstore') - def test_inject_hostname(self, mock_add_to_param_xenstore): - instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"} - vm_ref = "vm_ref" - - self.vmops._inject_hostname(instance, vm_ref, rescue=False) - - mock_add_to_param_xenstore.assert_called_once_with( - vm_ref, 'vm-data/hostname', 'dummy') - - @mock.patch.object(vmops.VMOps, '_add_to_param_xenstore') - def test_inject_hostname_with_rescue_prefix( - self, mock_add_to_param_xenstore): - instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"} - vm_ref = "vm_ref" - - self.vmops._inject_hostname(instance, vm_ref, rescue=True) - - mock_add_to_param_xenstore.assert_called_once_with( - vm_ref, 'vm-data/hostname', 'RESCUE-dummy') - - @mock.patch.object(vmops.VMOps, '_add_to_param_xenstore') - def test_inject_hostname_with_windows_name_truncation( - self, mock_add_to_param_xenstore): - instance = {"hostname": "dummydummydummydummydummy", - "os_type": "windows", "uuid": "uuid"} - vm_ref = "vm_ref" - - self.vmops._inject_hostname(instance, vm_ref, rescue=True) - - mock_add_to_param_xenstore.assert_called_once_with( - vm_ref, 'vm-data/hostname', 'RESCUE-dummydum') - - @mock.patch.object(greenthread, 'sleep') - @mock.patch.object(vm_utils, 'get_power_state', - side_effect=[power_state.SHUTDOWN, - power_state.RUNNING]) - def test_wait_for_instance_to_start(self, mock_get_power_state, - mock_sleep): - instance = {"uuid": "uuid"} - vm_ref = "vm_ref" - - self.vmops._wait_for_instance_to_start(instance, vm_ref) - - mock_get_power_state.assert_has_calls( - [mock.call(self._session, vm_ref), - mock.call(self._session, vm_ref)]) - mock_sleep.assert_called_once_with(0.5) - - @mock.patch.object(vm_utils, 'lookup', return_value='ref') - @mock.patch.object(vm_utils, 'create_vbd') - def test_attach_orig_disks(self, mock_create_vbd, mock_lookup): - instance = {"name": "dummy"} - vm_ref = "vm_ref" - vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"} - - with mock.patch.object(self.vmops, '_find_vdi_refs', - return_value=vbd_refs) as mock_find_vdi: - self.vmops._attach_orig_disks(instance, vm_ref) - mock_lookup.assert_called_once_with(self.vmops._session, 'dummy') - mock_find_vdi.assert_called_once_with('ref', exclude_volumes=True) - mock_create_vbd.assert_called_once_with( - self.vmops._session, vm_ref, 'vdi_ref', vmops.DEVICE_RESCUE, - bootable=False) - - @mock.patch.object(xenapi_agent.XenAPIBasedAgent, 'update_if_needed') - @mock.patch.object(xenapi_agent.XenAPIBasedAgent, 'resetnetwork') - @mock.patch.object(xenapi_agent.XenAPIBasedAgent, 'get_version', - return_value='1.2.3') - @mock.patch.object(vmops.VMOps, '_get_agent') - @mock.patch.object(xenapi_agent, 'should_use_agent', - return_value=True) - def test_agent_update_setup(self, mock_should_use_agent, mock_get_agent, - mock_get_version, mock_resetnetwork, - mock_update_if_needed): - # agent updates need to occur after networking is configured - instance = {'name': 'betelgeuse', - 'uuid': '1-2-3-4-5-6'} - vm_ref = 'vm_ref' - agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session, - self.vmops._virtapi, instance, vm_ref) - mock_get_agent.return_value = agent - - self.vmops._configure_new_instance_with_agent(instance, vm_ref, - None, None) - - mock_should_use_agent.assert_called_once_with(instance) - mock_get_agent.assert_called_once_with(instance, vm_ref) - mock_get_version.assert_called_once_with() - mock_resetnetwork.assert_called_once_with() - mock_update_if_needed.assert_called_once_with('1.2.3') - - def test_get_neutron_event(self): - network_info = [{"active": False, "id": 1}, - {"active": True, "id": 2}, - {"active": False, "id": 3}, - {"id": 4}] - power_on = True - first_boot = True - rescue = False - events = self.vmops._get_neutron_events(network_info, - power_on, first_boot, rescue) - self.assertEqual("network-vif-plugged", events[0][0]) - self.assertEqual(1, events[0][1]) - self.assertEqual("network-vif-plugged", events[1][0]) - self.assertEqual(3, events[1][1]) - - def test_get_neutron_event_power_off(self): - network_info = [{"active": False, "id": 1}, - {"active": True, "id": 2}, - {"active": False, "id": 3}, - {"id": 4}] - power_on = False - first_boot = True - rescue = False - events = self.vmops._get_neutron_events(network_info, - power_on, first_boot, rescue) - self.assertEqual([], events) - - def test_get_neutron_event_not_first_boot(self): - network_info = [{"active": False, "id": 1}, - {"active": True, "id": 2}, - {"active": False, "id": 3}, - {"id": 4}] - power_on = True - first_boot = False - rescue = False - events = self.vmops._get_neutron_events(network_info, - power_on, first_boot, rescue) - self.assertEqual([], events) - - def test_get_neutron_event_rescue(self): - network_info = [{"active": False, "id": 1}, - {"active": True, "id": 2}, - {"active": False, "id": 3}, - {"id": 4}] - power_on = True - first_boot = True - rescue = True - events = self.vmops._get_neutron_events(network_info, - power_on, first_boot, rescue) - self.assertEqual([], events) - - -class DestroyTestCase(VMOpsTestBase): - def setUp(self): - super(DestroyTestCase, self).setUp() - self.context = context.RequestContext(user_id=None, project_id=None) - self.instance = fake_instance.fake_instance_obj(self.context) - - @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None]) - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - @mock.patch.object(volume_utils, 'find_sr_by_uuid') - @mock.patch.object(volume_utils, 'forget_sr') - def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm, - lookup): - self.vmops.destroy(self.instance, 'network_info', - {'block_device_mapping': []}) - self.assertEqual(0, find_sr_by_uuid.call_count) - self.assertEqual(0, forget_sr.call_count) - self.assertEqual(0, hard_shutdown_vm.call_count) - - @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None]) - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None) - @mock.patch.object(volume_utils, 'forget_sr') - def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid, - hard_shutdown_vm, lookup): - self.vmops.destroy(self.instance, 'network_info', - {'block_device_mapping': [{'connection_info': - {'data': {'volume_id': 'fake-uuid'}}}]}) - find_sr_by_uuid.assert_called_once_with(self.vmops._session, - 'FA15E-D15C-fake-uuid') - self.assertEqual(0, forget_sr.call_count) - self.assertEqual(0, hard_shutdown_vm.call_count) - - @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None]) - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref') - @mock.patch.object(volume_utils, 'forget_sr') - def test_no_vm_orphaned_volume_old_sr(self, forget_sr, find_sr_by_uuid, - hard_shutdown_vm, lookup): - self.vmops.destroy(self.instance, 'network_info', - {'block_device_mapping': [{'connection_info': - {'data': {'volume_id': 'fake-uuid'}}}]}) - find_sr_by_uuid.assert_called_once_with(self.vmops._session, - 'FA15E-D15C-fake-uuid') - forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref') - self.assertEqual(0, hard_shutdown_vm.call_count) - - @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None]) - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - @mock.patch.object(volume_utils, 'find_sr_by_uuid', - side_effect=[None, 'sr_ref']) - @mock.patch.object(volume_utils, 'forget_sr') - @mock.patch.object(uuid, 'uuid5', return_value='fake-uuid') - def test_no_vm_orphaned_volume(self, uuid5, forget_sr, - find_sr_by_uuid, hard_shutdown_vm, lookup): - fake_data = {'volume_id': 'fake-uuid', - 'target_portal': 'host:port', - 'target_iqn': 'iqn'} - self.vmops.destroy(self.instance, 'network_info', - {'block_device_mapping': [{'connection_info': - {'data': fake_data}}]}) - call1 = mock.call(self.vmops._session, 'FA15E-D15C-fake-uuid') - call2 = mock.call(self.vmops._session, 'fake-uuid') - uuid5.assert_called_once_with(volume_utils.SR_NAMESPACE, - 'host/port/iqn') - find_sr_by_uuid.assert_has_calls([call1, call2]) - forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref') - self.assertEqual(0, hard_shutdown_vm.call_count) - - -@mock.patch.object(vmops.VMOps, '_update_instance_progress') -@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') -@mock.patch.object(vm_utils, 'get_sr_path') -@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm') -@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down') -@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up') -class MigrateDiskAndPowerOffTestCase(VMOpsTestBase): - def setUp(self): - super(MigrateDiskAndPowerOffTestCase, self).setUp() - self.context = context.RequestContext('user', 'project') - - def test_migrate_disk_and_power_off_works_down(self, - migrate_up, migrate_down, *mocks): - instance = objects.Instance( - flavor=objects.Flavor(root_gb=2, ephemeral_gb=0), - uuid=uuids.instance) - flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=1, - ephemeral_gb=0) - - self.vmops.migrate_disk_and_power_off(None, instance, None, - flavor, None) - - self.assertFalse(migrate_up.called) - self.assertTrue(migrate_down.called) - - def test_migrate_disk_and_power_off_works_up(self, - migrate_up, migrate_down, *mocks): - instance = objects.Instance( - flavor=objects.Flavor(root_gb=1, - ephemeral_gb=1), - uuid=uuids.instance) - flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=2, - ephemeral_gb=2) - - self.vmops.migrate_disk_and_power_off(None, instance, None, - flavor, None) - - self.assertFalse(migrate_down.called) - self.assertTrue(migrate_up.called) - - def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self, - migrate_up, migrate_down, *mocks): - instance = objects.Instance(flavor=objects.Flavor(ephemeral_gb=2)) - flavor = fake_flavor.fake_flavor_obj(self.context, ephemeral_gb=1) - - self.assertRaises(exception.ResizeError, - self.vmops.migrate_disk_and_power_off, - None, instance, None, flavor, None) - - -@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') -@mock.patch.object(vm_utils, 'migrate_vhd') -@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown') -@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm') -@mock.patch.object(vmops.VMOps, '_update_instance_progress') -@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label') -class MigrateDiskResizingUpTestCase(VMOpsTestBase): - def _fake_snapshot_attached_here(self, session, instance, vm_ref, label, - userdevice, post_snapshot_callback): - self.assertIsInstance(instance, dict) - if userdevice == '0': - self.assertEqual("vm_ref", vm_ref) - self.assertEqual("fake-snapshot", label) - yield ["leaf", "parent", "grandp"] - else: - leaf = userdevice + "-leaf" - parent = userdevice + "-parent" - yield [leaf, parent] - - @mock.patch.object(volume_utils, 'is_booted_from_volume', - return_value=False) - def test_migrate_disk_resizing_up_works_no_ephemeral(self, - mock_is_booted_from_volume, mock_apply_orig, mock_update_progress, - mock_get_all_vdi_uuids, mock_shutdown, mock_migrate_vhd, - mock_get_vdi_for_vm): - context = "ctxt" - instance = {"name": "fake", "uuid": "uuid"} - dest = "dest" - vm_ref = "vm_ref" - sr_path = "sr_path" - - mock_get_all_vdi_uuids.return_value = None - mock_get_vdi_for_vm.return_value = ({}, {"uuid": "root"}) - - with mock.patch.object(vm_utils, '_snapshot_attached_here_impl', - self._fake_snapshot_attached_here): - self.vmops._migrate_disk_resizing_up(context, instance, dest, - vm_ref, sr_path) - - mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session, - vm_ref, min_userdevice=4) - mock_apply_orig.assert_called_once_with(instance, vm_ref) - mock_shutdown.assert_called_once_with(instance, vm_ref) - - m_vhd_expected = [mock.call(self.vmops._session, instance, "parent", - dest, sr_path, 1), - mock.call(self.vmops._session, instance, "grandp", - dest, sr_path, 2), - mock.call(self.vmops._session, instance, "root", - dest, sr_path, 0)] - self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list) - - prog_expected = [ - mock.call(context, instance, 1, 5), - mock.call(context, instance, 2, 5), - mock.call(context, instance, 3, 5), - mock.call(context, instance, 4, 5) - # 5/5: step to be executed by finish migration. - ] - self.assertEqual(prog_expected, mock_update_progress.call_args_list) - - @mock.patch.object(volume_utils, 'is_booted_from_volume', - return_value=False) - def test_migrate_disk_resizing_up_ephemerals_no_volume(self, - mock_is_booted_from_volume, mock_apply_orig, mock_update_progress, - mock_get_all_vdi_uuids, mock_shutdown, mock_migrate_vhd, - mock_get_vdi_for_vm): - context = "ctxt" - instance = {"name": "fake", "uuid": "uuid"} - dest = "dest" - vm_ref = "vm_ref" - sr_path = "sr_path" - - mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"] - mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "root"}), - ({}, {"uuid": "4-root"}), - ({}, {"uuid": "5-root"})] - - with mock.patch.object(vm_utils, '_snapshot_attached_here_impl', - self._fake_snapshot_attached_here): - self.vmops._migrate_disk_resizing_up(context, instance, dest, - vm_ref, sr_path) - - mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session, - vm_ref, min_userdevice=4) - mock_apply_orig.assert_called_once_with(instance, vm_ref) - mock_shutdown.assert_called_once_with(instance, vm_ref) - - m_vhd_expected = [mock.call(self.vmops._session, instance, - "parent", dest, sr_path, 1), - mock.call(self.vmops._session, instance, - "grandp", dest, sr_path, 2), - mock.call(self.vmops._session, instance, - "4-parent", dest, sr_path, 1, 1), - mock.call(self.vmops._session, instance, - "5-parent", dest, sr_path, 1, 2), - mock.call(self.vmops._session, instance, - "root", dest, sr_path, 0), - mock.call(self.vmops._session, instance, - "4-root", dest, sr_path, 0, 1), - mock.call(self.vmops._session, instance, - "5-root", dest, sr_path, 0, 2)] - self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list) - - prog_expected = [ - mock.call(context, instance, 1, 5), - mock.call(context, instance, 2, 5), - mock.call(context, instance, 3, 5), - mock.call(context, instance, 4, 5) - # 5/5: step to be executed by finish migration. - ] - self.assertEqual(prog_expected, mock_update_progress.call_args_list) - - @mock.patch.object(volume_utils, 'is_booted_from_volume') - def test_migrate_disk_resizing_up_ephemerals_mixed_volumes(self, - mock_is_booted_from_volume, mock_apply_orig, mock_update_progress, - mock_get_all_vdi_uuids, mock_shutdown, mock_migrate_vhd, - mock_get_vdi_for_vm): - context = "ctxt" - instance = {"name": "fake", "uuid": "uuid"} - dest = "dest" - vm_ref = "vm_ref" - sr_path = "sr_path" - - mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"] - mock_get_vdi_for_vm.side_effect = [({}, {"uuid": "4-root"}), - ({}, {"uuid": "5-root"})] - # Here we mock the is_booted_from_volume call to emulate the - # 4-root and 4-parent VDI's being volume based, while 5-root - # and 5-Parent are local ephemeral drives that should be migrated. - mock_is_booted_from_volume.side_effect = [True, False, True, False] - - with mock.patch.object(vm_utils, '_snapshot_attached_here_impl', - self._fake_snapshot_attached_here): - self.vmops._migrate_disk_resizing_up(context, instance, dest, - vm_ref, sr_path) - - mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session, - vm_ref, min_userdevice=4) - mock_apply_orig.assert_called_once_with(instance, vm_ref) - mock_shutdown.assert_called_once_with(instance, vm_ref) - m_vhd_expected = [mock.call(self.vmops._session, instance, - "4-parent", dest, sr_path, 1, 1), - mock.call(self.vmops._session, instance, - "4-root", dest, sr_path, 0, 1)] - - self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list) - - prog_expected = [ - mock.call(context, instance, 1, 5), - mock.call(context, instance, 2, 5), - mock.call(context, instance, 3, 5), - mock.call(context, instance, 4, 5) - # 5/5: step to be executed by finish migration. - ] - self.assertEqual(prog_expected, mock_update_progress.call_args_list) - - @mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan') - @mock.patch.object(volume_utils, 'is_booted_from_volume', - return_value=False) - def test_migrate_disk_resizing_up_rollback(self, - mock_is_booted_from_volume, - mock_restore, - mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids, - mock_shutdown, mock_migrate_vhd, mock_get_vdi_for_vm): - context = "ctxt" - instance = {"name": "fake", "uuid": "fake"} - dest = "dest" - vm_ref = "vm_ref" - sr_path = "sr_path" - - mock_migrate_vhd.side_effect = test.TestingException - mock_restore.side_effect = test.TestingException - - with mock.patch.object(vm_utils, '_snapshot_attached_here_impl', - self._fake_snapshot_attached_here): - self.assertRaises(exception.InstanceFaultRollback, - self.vmops._migrate_disk_resizing_up, - context, instance, dest, vm_ref, sr_path) - - mock_apply_orig.assert_called_once_with(instance, vm_ref) - mock_restore.assert_called_once_with(instance) - mock_migrate_vhd.assert_called_once_with(self.vmops._session, - instance, "parent", dest, sr_path, 1) - - -class CreateVMRecordTestCase(VMOpsTestBase): - @mock.patch.object(vm_utils, 'determine_vm_mode') - @mock.patch.object(vm_utils, 'get_vm_device_id') - @mock.patch.object(vm_utils, 'create_vm') - def test_create_vm_record_with_vm_device_id(self, mock_create_vm, - mock_get_vm_device_id, mock_determine_vm_mode): - - context = "context" - instance = objects.Instance(vm_mode="vm_mode", uuid=uuids.instance) - name_label = "dummy" - disk_image_type = "vhd" - kernel_file = "kernel" - ramdisk_file = "ram" - device_id = "0002" - image_properties = {"xenapi_device_id": device_id} - image_meta = objects.ImageMeta.from_dict( - {"properties": image_properties}) - rescue = False - session = "session" - self.vmops._session = session - mock_get_vm_device_id.return_value = device_id - mock_determine_vm_mode.return_value = "vm_mode" - - self.vmops._create_vm_record(context, instance, name_label, - disk_image_type, kernel_file, ramdisk_file, image_meta, rescue) - - mock_get_vm_device_id.assert_called_with(session, image_meta) - mock_create_vm.assert_called_with(session, instance, name_label, - kernel_file, ramdisk_file, False, device_id) - - -class BootableTestCase(VMOpsTestBase): - - def setUp(self): - super(BootableTestCase, self).setUp() - - self.instance = {"name": "test", "uuid": "fake"} - vm_rec, self.vm_ref = self.create_vm('test') - - # sanity check bootlock is initially disabled: - self.assertEqual({}, vm_rec['blocked_operations']) - - def _get_blocked(self): - vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref) - return vm_rec['blocked_operations'] - - def test_acquire_bootlock(self): - self.vmops._acquire_bootlock(self.vm_ref) - blocked = self._get_blocked() - self.assertIn('start', blocked) - - def test_release_bootlock(self): - self.vmops._acquire_bootlock(self.vm_ref) - self.vmops._release_bootlock(self.vm_ref) - blocked = self._get_blocked() - self.assertNotIn('start', blocked) - - def test_set_bootable(self): - self.vmops.set_bootable(self.instance, True) - blocked = self._get_blocked() - self.assertNotIn('start', blocked) - - def test_set_not_bootable(self): - self.vmops.set_bootable(self.instance, False) - blocked = self._get_blocked() - self.assertIn('start', blocked) - - -@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True) -class ResizeVdisTestCase(VMOpsTestBase): - def test_dont_resize_root_volumes_osvol_false(self, mock_resize): - instance = fake_instance.fake_instance_obj( - None, flavor=objects.Flavor(root_gb=20)) - vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}} - self.vmops._resize_up_vdis(instance, vdis) - self.assertTrue(mock_resize.called) - - def test_dont_resize_root_volumes_osvol_true(self, mock_resize): - instance = fake_instance.fake_instance_obj( - None, flavor=objects.Flavor(root_gb=20)) - vdis = {'root': {'osvol': True}} - self.vmops._resize_up_vdis(instance, vdis) - self.assertFalse(mock_resize.called) - - def test_dont_resize_root_volumes_no_osvol(self, mock_resize): - instance = fake_instance.fake_instance_obj( - None, flavor=objects.Flavor(root_gb=20)) - vdis = {'root': {}} - self.vmops._resize_up_vdis(instance, vdis) - self.assertFalse(mock_resize.called) - - @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes') - def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes, - mock_resize): - mock_sizes.return_value = [2000, 1000] - instance = fake_instance.fake_instance_obj( - None, flavor=objects.Flavor(root_gb=20, ephemeral_gb=20)) - ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}} - vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'}, - 'ephemerals': ephemerals} - with mock.patch.object(vm_utils, 'generate_single_ephemeral', - autospec=True) as g: - self.vmops._resize_up_vdis(instance, vdis) - self.assertEqual([mock.call(self.vmops._session, instance, 4, - 2000), - mock.call(self.vmops._session, instance, 5, - 1000)], - mock_resize.call_args_list) - self.assertFalse(g.called) - - def test_resize_up_vdis_root(self, mock_resize): - instance = objects.Instance(flavor=objects.Flavor(root_gb=20, - ephemeral_gb=0)) - self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}}) - mock_resize.assert_called_once_with(self.vmops._session, instance, - "vdi_ref", 20) - - def test_resize_up_vdis_zero_disks(self, mock_resize): - instance = objects.Instance(flavor=objects.Flavor(root_gb=0, - ephemeral_gb=0)) - self.vmops._resize_up_vdis(instance, {"root": {}}) - self.assertFalse(mock_resize.called) - - def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize): - instance = objects.Instance(flavor=objects.Flavor(root_gb=0, - ephemeral_gb=3000)) - vdis = {} - - self.vmops._resize_up_vdis(instance, vdis) - - self.assertFalse(mock_resize.called) - - @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes') - def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize): - mock_sizes.return_value = [2000, 1000] - instance = objects.Instance(flavor=objects.Flavor(root_gb=0, - ephemeral_gb=3000)) - ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}} - vdis = {"ephemerals": ephemerals} - - self.vmops._resize_up_vdis(instance, vdis) - - mock_sizes.assert_called_once_with(3000) - expected = [mock.call(self.vmops._session, instance, 4, 2000), - mock.call(self.vmops._session, instance, 5, 1000)] - self.assertEqual(expected, mock_resize.call_args_list) - - @mock.patch.object(vm_utils, 'generate_single_ephemeral') - @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes') - def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes, - mock_generate, - mock_resize): - mock_sizes.return_value = [2000, 1000] - instance = objects.Instance(uuid=uuids.instance, - flavor=objects.Flavor(root_gb=0, - ephemeral_gb=3000)) - ephemerals = {"4": {"ref": 4}} - vdis = {"ephemerals": ephemerals} - - self.vmops._resize_up_vdis(instance, vdis) - - mock_sizes.assert_called_once_with(3000) - mock_resize.assert_called_once_with(self.vmops._session, instance, - 4, 2000) - mock_generate.assert_called_once_with(self.vmops._session, instance, - None, 5, 1000) - - -@mock.patch.object(vm_utils, 'remove_old_snapshots') -class CleanupFailedSnapshotTestCase(VMOpsTestBase): - def test_post_interrupted_snapshot_cleanup(self, mock_remove): - self.vmops._get_vm_opaque_ref = mock.Mock() - self.vmops._get_vm_opaque_ref.return_value = "vm_ref" - - self.vmops.post_interrupted_snapshot_cleanup("context", "instance") - - mock_remove.assert_called_once_with(self.vmops._session, - "instance", "vm_ref") - - -class XenstoreCallsTestCase(VMOpsTestBase): - """Test cases for Read/Write/Delete/Update xenstore calls - from vmops. - """ - - @mock.patch.object(vmops.VMOps, '_get_dom_id') - @mock.patch.object(host_xenstore, 'read_record') - def test_read_from_xenstore(self, mock_read_record, mock_dom_id): - mock_read_record.return_value = "fake_xapi_return" - mock_dom_id.return_value = "fake_dom_id" - fake_instance = {"name": "fake_instance"} - path = "attr/PVAddons/MajorVersion" - self.assertEqual("fake_xapi_return", - self.vmops._read_from_xenstore(fake_instance, path, - vm_ref="vm_ref")) - mock_dom_id.assert_called_once_with(fake_instance, "vm_ref") - - @mock.patch.object(vmops.VMOps, '_get_dom_id') - @mock.patch.object(host_xenstore, 'read_record') - def test_read_from_xenstore_ignore_missing_path(self, mock_read_record, - mock_dom_id): - mock_read_record.return_value = "fake_xapi_return" - mock_dom_id.return_value = "fake_dom_id" - fake_instance = {"name": "fake_instance"} - path = "attr/PVAddons/MajorVersion" - self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref") - mock_read_record.assert_called_once_with( - self._session, "fake_dom_id", path, ignore_missing_path=True) - - @mock.patch.object(vmops.VMOps, '_get_dom_id') - @mock.patch.object(host_xenstore, 'read_record') - def test_read_from_xenstore_missing_path(self, mock_read_record, - mock_dom_id): - mock_read_record.return_value = "fake_xapi_return" - mock_dom_id.return_value = "fake_dom_id" - fake_instance = {"name": "fake_instance"} - path = "attr/PVAddons/MajorVersion" - self.vmops._read_from_xenstore(fake_instance, path, vm_ref="vm_ref", - ignore_missing_path=False) - mock_read_record.assert_called_once_with(self._session, "fake_dom_id", - path, - ignore_missing_path=False) - - -class LiveMigrateTestCase(VMOpsTestBase): - - @mock.patch.object(vmops.VMOps, '_get_network_ref') - @mock.patch.object(vm_utils, 'host_in_this_pool') - def _test_check_can_live_migrate_destination_shared_storage( - self, - shared, - mock_is_same_pool, - mock_net_ref): - fake_instance = objects.Instance(host="fake_host") - block_migration = None - disk_over_commit = False - ctxt = 'ctxt' - mock_net_ref.return_value = 'fake_net_ref' - if shared: - mock_is_same_pool.return_value = True - else: - mock_is_same_pool.return_value = False - - with mock.patch.object(self._session, 'get_rec') as fake_sr_rec, \ - mock.patch.object(self._session, 'host.get_by_name_label') \ - as fake_get_ref: - fake_get_ref.return_value = ['fake_host_ref'] - fake_sr_rec.return_value = {'shared': shared} - migrate_data_ret = self.vmops.check_can_live_migrate_destination( - ctxt, fake_instance, block_migration, disk_over_commit) - - if shared: - self.assertFalse(migrate_data_ret.block_migration) - else: - self.assertTrue(migrate_data_ret.block_migration) - self.assertEqual({'': 'fake_net_ref'}, - migrate_data_ret.vif_uuid_map) - - def test_check_can_live_migrate_destination_shared_storage(self): - self._test_check_can_live_migrate_destination_shared_storage(True) - - def test_check_can_live_migrate_destination_shared_storage_false(self): - self._test_check_can_live_migrate_destination_shared_storage(False) - - @mock.patch.object(vmops.VMOps, '_get_network_ref') - def test_check_can_live_migrate_destination_block_migration( - self, - mock_net_ref): - fake_instance = objects.Instance(host="fake_host") - block_migration = None - disk_over_commit = False - ctxt = 'ctxt' - mock_net_ref.return_value = 'fake_net_ref' - - with mock.patch.object(self._session, 'host.get_by_name_label') \ - as fake_get_ref: - fake_get_ref.return_value = ['fake_host_ref'] - migrate_data_ret = self.vmops.check_can_live_migrate_destination( - ctxt, fake_instance, block_migration, disk_over_commit) - - self.assertTrue(migrate_data_ret.block_migration) - self.assertEqual(vm_utils.safe_find_sr(self._session), - migrate_data_ret.destination_sr_ref) - self.assertEqual({'value': 'fake_migrate_data'}, - migrate_data_ret.migrate_send_data) - self.assertEqual({'': 'fake_net_ref'}, - migrate_data_ret.vif_uuid_map) - - @mock.patch.object(vmops.VMOps, '_migrate_receive') - @mock.patch.object(vm_utils, 'safe_find_sr') - @mock.patch.object(vmops.VMOps, '_get_network_ref') - def test_no_hosts_found_with_the_name_label(self, - mock_get_network_ref, - mock_safe_find_sr, - mock_migrate_receive): - # Can find the dest host in current pool, do block live migrate - fake_instance = objects.Instance(host="fake_host") - mock_migrate_receive.return_value = {'fake_key': 'fake_data'} - mock_safe_find_sr.return_value = 'fake_destination_sr_ref' - mock_get_network_ref.return_value = 'fake_net_ref' - block_migration = None - disk_over_commit = False - ctxt = 'ctxt' - with mock.patch.object(self._session, 'host.get_by_name_label') \ - as fake_get_ref: - fake_get_ref.return_value = [] - migrate_data_ret = self.vmops.check_can_live_migrate_destination( - ctxt, fake_instance, block_migration, disk_over_commit) - self.assertTrue(migrate_data_ret.block_migration) - self.assertEqual(migrate_data_ret.vif_uuid_map, - {'': 'fake_net_ref'}) - - def test_multiple_hosts_found_with_same_name(self): - # More than one host found with the dest host name, raise exception - fake_instance = objects.Instance(host="fake_host") - block_migration = None - disk_over_commit = False - ctxt = 'ctxt' - with mock.patch.object(self._session, 'host.get_by_name_label') \ - as fake_get_ref: - fake_get_ref.return_value = ['fake_host_ref1', 'fake_host_ref2'] - self.assertRaises(exception.MigrationPreCheckError, - self.vmops.check_can_live_migrate_destination, - ctxt, fake_instance, block_migration, - disk_over_commit) - - @mock.patch.object(vm_utils, 'host_in_this_pool') - def test_request_pool_migrate_to_outer_pool_host(self, mock_is_same_pool): - # Caller asks for no block live migrate while the dest host is not in - # the same pool with the src host, raise exception - fake_instance = objects.Instance(host="fake_host") - block_migration = False - disk_over_commit = False - ctxt = 'ctxt' - mock_is_same_pool.return_value = False - with mock.patch.object(self._session, 'host.get_by_name_label') \ - as fake_get_ref: - fake_get_ref.return_value = ['fake_host_ref1'] - self.assertRaises(exception.MigrationPreCheckError, - self.vmops.check_can_live_migrate_destination, - ctxt, fake_instance, block_migration, - disk_over_commit) - - @mock.patch.object(vmops.VMOps, 'create_interim_networks') - @mock.patch.object(vmops.VMOps, 'connect_block_device_volumes') - def test_pre_live_migration(self, mock_connect, mock_create): - migrate_data = objects.XenapiLiveMigrateData() - migrate_data.block_migration = True - sr_uuid_map = {"sr_uuid": "sr_ref"} - vif_uuid_map = {"neutron_vif_uuid": "dest_network_ref"} - mock_connect.return_value = {"sr_uuid": "sr_ref"} - mock_create.return_value = {"neutron_vif_uuid": "dest_network_ref"} - result = self.vmops.pre_live_migration( - None, None, "bdi", "fake_network_info", None, migrate_data) - - self.assertTrue(result.block_migration) - self.assertEqual(result.sr_uuid_map, sr_uuid_map) - self.assertEqual(result.vif_uuid_map, vif_uuid_map) - mock_connect.assert_called_once_with("bdi") - mock_create.assert_called_once_with("fake_network_info") - - @mock.patch.object(vmops.VMOps, '_delete_networks_and_bridges') - def test_post_live_migration_at_source(self, mock_delete): - self.vmops.post_live_migration_at_source('fake_context', - 'fake_instance', - 'fake_network_info') - mock_delete.assert_called_once_with('fake_instance', - 'fake_network_info') - - -class LiveMigrateFakeVersionTestCase(VMOpsTestBase): - @mock.patch.object(vmops.VMOps, '_pv_device_reported') - @mock.patch.object(vmops.VMOps, '_pv_driver_version_reported') - @mock.patch.object(vmops.VMOps, '_write_fake_pv_version') - def test_ensure_pv_driver_info_for_live_migration( - self, - mock_write_fake_pv_version, - mock_pv_driver_version_reported, - mock_pv_device_reported): - - mock_pv_device_reported.return_value = True - mock_pv_driver_version_reported.return_value = False - fake_instance = {"name": "fake_instance"} - self.vmops._ensure_pv_driver_info_for_live_migration(fake_instance, - "vm_rec") - - mock_write_fake_pv_version.assert_called_once_with(fake_instance, - "vm_rec") - - @mock.patch.object(vmops.VMOps, '_read_from_xenstore') - def test_pv_driver_version_reported_None(self, fake_read_from_xenstore): - fake_read_from_xenstore.return_value = '"None"' - fake_instance = {"name": "fake_instance"} - self.assertFalse(self.vmops._pv_driver_version_reported(fake_instance, - "vm_ref")) - - @mock.patch.object(vmops.VMOps, '_read_from_xenstore') - def test_pv_driver_version_reported(self, fake_read_from_xenstore): - fake_read_from_xenstore.return_value = '6.2.0' - fake_instance = {"name": "fake_instance"} - self.assertTrue(self.vmops._pv_driver_version_reported(fake_instance, - "vm_ref")) - - @mock.patch.object(vmops.VMOps, '_read_from_xenstore') - def test_pv_device_reported(self, fake_read_from_xenstore): - with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec: - fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'} - with mock.patch.object(self._session, 'call_xenapi') as fake_call: - fake_call.return_value = {'device': '0'} - fake_read_from_xenstore.return_value = '4' - fake_instance = {"name": "fake_instance"} - self.assertTrue(self.vmops._pv_device_reported(fake_instance, - "vm_ref")) - - @mock.patch.object(vmops.VMOps, '_read_from_xenstore') - def test_pv_device_not_reported(self, fake_read_from_xenstore): - with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec: - fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'} - with mock.patch.object(self._session, 'call_xenapi') as fake_call: - fake_call.return_value = {'device': '0'} - fake_read_from_xenstore.return_value = '0' - fake_instance = {"name": "fake_instance"} - self.assertFalse(self.vmops._pv_device_reported(fake_instance, - "vm_ref")) - - @mock.patch.object(vmops.VMOps, '_read_from_xenstore') - def test_pv_device_None_reported(self, fake_read_from_xenstore): - with mock.patch.object(self._session.VM, 'get_record') as fake_vm_rec: - fake_vm_rec.return_value = {'VIFs': 'fake-vif-object'} - with mock.patch.object(self._session, 'call_xenapi') as fake_call: - fake_call.return_value = {'device': '0'} - fake_read_from_xenstore.return_value = '"None"' - fake_instance = {"name": "fake_instance"} - self.assertFalse(self.vmops._pv_device_reported(fake_instance, - "vm_ref")) - - @mock.patch.object(vmops.VMOps, '_write_to_xenstore') - def test_write_fake_pv_version(self, fake_write_to_xenstore): - fake_write_to_xenstore.return_value = 'fake_return' - fake_instance = {"name": "fake_instance"} - with mock.patch.object(self._session, 'product_version') as version: - version.return_value = ('6', '2', '0') - self.assertIsNone(self.vmops._write_fake_pv_version(fake_instance, - "vm_ref")) - - -class LiveMigrateHelperTestCase(VMOpsTestBase): - def test_connect_block_device_volumes_none(self): - self.assertEqual({}, self.vmops.connect_block_device_volumes(None)) - - @mock.patch.object(volumeops.VolumeOps, "connect_volume") - def test_connect_block_device_volumes_calls_connect(self, mock_connect): - with mock.patch.object(self.vmops._session, - "call_xenapi") as mock_session: - mock_connect.return_value = ("sr_uuid", None) - mock_session.return_value = "sr_ref" - bdm = {"connection_info": "c_info"} - bdi = {"block_device_mapping": [bdm]} - result = self.vmops.connect_block_device_volumes(bdi) - - self.assertEqual({'sr_uuid': 'sr_ref'}, result) - - mock_connect.assert_called_once_with("c_info") - mock_session.assert_called_once_with("SR.get_by_uuid", - "sr_uuid") - - @mock.patch.object(volumeops.VolumeOps, "connect_volume") - @mock.patch.object(volume_utils, 'forget_sr') - def test_connect_block_device_volumes_calls_forget_sr(self, mock_forget, - mock_connect): - bdms = [{'connection_info': 'info1'}, - {'connection_info': 'info2'}] - - def fake_connect(connection_info): - expected = bdms[mock_connect.call_count - 1]['connection_info'] - self.assertEqual(expected, connection_info) - - if mock_connect.call_count == 2: - raise exception.VolumeDriverNotFound(driver_type='123') - - return ('sr_uuid_1', None) - - def fake_call_xenapi(method, uuid): - self.assertEqual('sr_uuid_1', uuid) - return 'sr_ref_1' - - mock_connect.side_effect = fake_connect - - with mock.patch.object(self.vmops._session, "call_xenapi", - side_effect=fake_call_xenapi): - self.assertRaises(exception.VolumeDriverNotFound, - self.vmops.connect_block_device_volumes, - {'block_device_mapping': bdms}) - mock_forget.assert_called_once_with(self.vmops._session, - 'sr_ref_1') - - def _call_live_migrate_command_with_migrate_send_data(self, migrate_data): - command_name = 'test_command' - vm_ref = "vm_ref" - - def side_effect(method, *args): - if method == "SR.get_by_uuid": - return "sr_ref_new" - xmlrpclib.dumps(args, method, allow_none=1) - - with mock.patch.object(self.vmops, - "_generate_vdi_map") as mock_gen_vdi_map, \ - mock.patch.object(self.vmops._session, - 'call_xenapi') as mock_call_xenapi, \ - mock.patch.object(vm_utils, 'host_in_this_pool' - ) as mock_host_in_this_pool, \ - mock.patch.object(self.vmops, - "_generate_vif_network_map") as mock_vif_map: - mock_call_xenapi.side_effect = side_effect - mock_gen_vdi_map.side_effect = [ - {"vdi": "sr_ref"}, {"vdi": "sr_ref_2"}] - mock_vif_map.return_value = {"vif_ref1": "dest_net_ref"} - mock_host_in_this_pool.return_value = False - - self.vmops._call_live_migrate_command(command_name, - vm_ref, migrate_data) - - expect_vif_map = {} - if 'vif_uuid_map' in migrate_data: - expect_vif_map.update({"vif_ref1": "dest_net_ref"}) - expected_vdi_map = {'vdi': 'sr_ref'} - if 'sr_uuid_map' in migrate_data: - expected_vdi_map = {'vdi': 'sr_ref_2'} - self.assertEqual(mock_call_xenapi.call_args_list[-1], - mock.call(command_name, vm_ref, - migrate_data.migrate_send_data, True, - expected_vdi_map, expect_vif_map, {})) - - self.assertEqual(mock_gen_vdi_map.call_args_list[0], - mock.call(migrate_data.destination_sr_ref, vm_ref)) - if 'sr_uuid_map' in migrate_data: - self.assertEqual(mock_gen_vdi_map.call_args_list[1], - mock.call(migrate_data.sr_uuid_map["sr_uuid2"], vm_ref, - "sr_ref_new")) - - def test_call_live_migrate_command_with_full_data(self): - migrate_data = objects.XenapiLiveMigrateData() - migrate_data.migrate_send_data = {"foo": "bar"} - migrate_data.destination_sr_ref = "sr_ref" - migrate_data.sr_uuid_map = {"sr_uuid2": "sr_ref_3"} - migrate_data.vif_uuid_map = {"vif_id": "dest_net_ref"} - self._call_live_migrate_command_with_migrate_send_data(migrate_data) - - def test_call_live_migrate_command_with_no_sr_uuid_map(self): - migrate_data = objects.XenapiLiveMigrateData() - migrate_data.migrate_send_data = {"foo": "baz"} - migrate_data.destination_sr_ref = "sr_ref" - self._call_live_migrate_command_with_migrate_send_data(migrate_data) - - def test_call_live_migrate_command_with_no_migrate_send_data(self): - migrate_data = objects.XenapiLiveMigrateData() - self.assertRaises(exception.InvalidParameterValue, - self._call_live_migrate_command_with_migrate_send_data, - migrate_data) - - @mock.patch.object(vmops.VMOps, '_call_live_migrate_command') - def test_check_can_live_migrate_source_with_xcp2(self, mock_call_migrate): - ctxt = 'ctxt' - fake_instance = {"name": "fake_instance"} - fake_dest_check_data = objects.XenapiLiveMigrateData() - fake_dest_check_data.block_migration = True - mock_call_migrate.side_effect = \ - xenapi_fake.xenapi_session.XenAPI.Failure(['VDI_NOT_IN_MAP']) - - with mock.patch.object(self.vmops, - '_get_iscsi_srs') as mock_iscsi_srs, \ - mock.patch.object(self.vmops, - '_get_vm_opaque_ref') as mock_vm, \ - mock.patch.object(self.vmops, - '_get_host_software_versions') as mock_host_sw: - mock_iscsi_srs.return_value = [] - mock_vm.return_value = 'vm_ref' - mock_host_sw.return_value = {'platform_name': 'XCP', - 'platform_version': '2.1.0'} - fake_returned_data = self.vmops.check_can_live_migrate_source( - ctxt, fake_instance, fake_dest_check_data) - - self.assertEqual(fake_returned_data, fake_dest_check_data) - - @mock.patch.object(vmops.VMOps, '_call_live_migrate_command') - def test_check_can_live_migrate_source_with_xcp2_vif_raise(self, - mock_call_migrate): - ctxt = 'ctxt' - fake_instance = {"name": "fake_instance"} - fake_dest_check_data = objects.XenapiLiveMigrateData() - fake_dest_check_data.block_migration = True - mock_call_migrate.side_effect = \ - xenapi_fake.xenapi_session.XenAPI.Failure(['VIF_NOT_IN_MAP']) - - with mock.patch.object(self.vmops, - '_get_iscsi_srs') as mock_iscsi_srs, \ - mock.patch.object(self.vmops, - '_get_vm_opaque_ref') as mock_vm, \ - mock.patch.object(self.vmops, - '_get_host_software_versions') as mock_host_sw: - mock_iscsi_srs.return_value = [] - mock_vm.return_value = 'vm_ref' - mock_host_sw.return_value = {'platform_name': 'XCP', - 'platform_version': '2.1.0'} - self.assertRaises(exception.MigrationPreCheckError, - self.vmops.check_can_live_migrate_source, ctxt, - fake_instance, fake_dest_check_data) - - @mock.patch.object(vmops.VMOps, '_call_live_migrate_command') - def test_check_can_live_migrate_source_with_xcp2_sw_raise(self, - mock_call_migrate): - ctxt = 'ctxt' - fake_instance = {"name": "fake_instance"} - fake_dest_check_data = objects.XenapiLiveMigrateData() - fake_dest_check_data.block_migration = True - mock_call_migrate.side_effect = \ - xenapi_fake.xenapi_session.XenAPI.Failure(['VDI_NOT_IN_MAP']) - - with mock.patch.object(self.vmops, - '_get_iscsi_srs') as mock_iscsi_srs, \ - mock.patch.object(self.vmops, - '_get_vm_opaque_ref') as mock_vm, \ - mock.patch.object(self.vmops, - '_get_host_software_versions') as mock_host_sw: - mock_iscsi_srs.return_value = [] - mock_vm.return_value = 'vm_ref' - mock_host_sw.return_value = {'platform_name': 'XCP', - 'platform_version': '1.1.0'} - self.assertRaises(exception.MigrationPreCheckError, - self.vmops.check_can_live_migrate_source, ctxt, - fake_instance, fake_dest_check_data) - - def test_generate_vif_network_map(self): - with mock.patch.object(self._session.VIF, - 'get_other_config') as mock_other_config, \ - mock.patch.object(self._session.VM, - 'get_VIFs') as mock_get_vif: - mock_other_config.side_effect = [{'neutron-port-id': 'vif_id_a'}, - {'neutron-port-id': 'vif_id_b'}] - mock_get_vif.return_value = ['vif_ref1', 'vif_ref2'] - vif_uuid_map = {'vif_id_b': 'dest_net_ref2', - 'vif_id_a': 'dest_net_ref1'} - vif_map = self.vmops._generate_vif_network_map('vm_ref', - vif_uuid_map) - expected = {'vif_ref1': 'dest_net_ref1', - 'vif_ref2': 'dest_net_ref2'} - self.assertEqual(vif_map, expected) - - def test_generate_vif_network_map_default_net(self): - with mock.patch.object(self._session.VIF, - 'get_other_config') as mock_other_config, \ - mock.patch.object(self._session.VM, - 'get_VIFs') as mock_get_vif: - mock_other_config.side_effect = [{'neutron-port-id': 'vif_id_a'}, - {'neutron-port-id': 'vif_id_b'}] - mock_get_vif.return_value = ['vif_ref1'] - vif_uuid_map = {'': 'default_net_ref'} - vif_map = self.vmops._generate_vif_network_map('vm_ref', - vif_uuid_map) - expected = {'vif_ref1': 'default_net_ref'} - self.assertEqual(vif_map, expected) - - def test_generate_vif_network_map_exception(self): - with mock.patch.object(self._session.VIF, - 'get_other_config') as mock_other_config, \ - mock.patch.object(self._session.VM, - 'get_VIFs') as mock_get_vif: - mock_other_config.side_effect = [{'neutron-port-id': 'vif_id_a'}, - {'neutron-port-id': 'vif_id_b'}] - mock_get_vif.return_value = ['vif_ref1', 'vif_ref2'] - vif_uuid_map = {'vif_id_c': 'dest_net_ref2', - 'vif_id_d': 'dest_net_ref1'} - self.assertRaises(exception.MigrationError, - self.vmops._generate_vif_network_map, - 'vm_ref', vif_uuid_map) - - def test_generate_vif_network_map_exception_no_iface(self): - with mock.patch.object(self._session.VIF, - 'get_other_config') as mock_other_config, \ - mock.patch.object(self._session.VM, - 'get_VIFs') as mock_get_vif: - mock_other_config.return_value = {} - mock_get_vif.return_value = ['vif_ref1'] - vif_uuid_map = {} - self.assertRaises(exception.MigrationError, - self.vmops._generate_vif_network_map, - 'vm_ref', vif_uuid_map) - - def test_delete_networks_and_bridges(self): - self.vmops.vif_driver = mock.Mock() - network_info = [{'id': 'fake_vif'}] - self.vmops._delete_networks_and_bridges('fake_instance', network_info) - self.vmops.vif_driver.delete_network_and_bridge.\ - assert_called_once_with('fake_instance', 'fake_vif') - - def test_create_interim_networks(self): - class FakeVifDriver(object): - def create_vif_interim_network(self, vif): - if vif['id'] == "vif_1": - return "network_ref_1" - if vif['id'] == "vif_2": - return "network_ref_2" - - network_info = [{'id': "vif_1"}, {'id': 'vif_2'}] - self.vmops.vif_driver = FakeVifDriver() - vif_map = self.vmops.create_interim_networks(network_info) - self.assertEqual(vif_map, {'vif_1': 'network_ref_1', - 'vif_2': 'network_ref_2'}) - - -class RollbackLiveMigrateDestinationTestCase(VMOpsTestBase): - @mock.patch.object(vmops.VMOps, '_delete_networks_and_bridges') - @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref') - @mock.patch.object(volume_utils, 'forget_sr') - def test_rollback_dest_calls_sr_forget(self, forget_sr, sr_ref, - delete_networks_bridges): - block_device_info = {'block_device_mapping': [{'connection_info': - {'data': {'volume_id': 'fake-uuid', - 'target_iqn': 'fake-iqn', - 'target_portal': 'fake-portal'}}}]} - network_info = [{'id': 'vif1'}] - self.vmops.rollback_live_migration_at_destination('instance', - network_info, - block_device_info) - forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref') - delete_networks_bridges.assert_called_once_with( - 'instance', [{'id': 'vif1'}]) - - @mock.patch.object(vmops.VMOps, '_delete_networks_and_bridges') - @mock.patch.object(volume_utils, 'forget_sr') - @mock.patch.object(volume_utils, 'find_sr_by_uuid', - side_effect=test.TestingException) - def test_rollback_dest_handles_exception(self, find_sr_ref, forget_sr, - delete_networks_bridges): - block_device_info = {'block_device_mapping': [{'connection_info': - {'data': {'volume_id': 'fake-uuid', - 'target_iqn': 'fake-iqn', - 'target_portal': 'fake-portal'}}}]} - network_info = [{'id': 'vif1'}] - self.vmops.rollback_live_migration_at_destination('instance', - network_info, - block_device_info) - self.assertFalse(forget_sr.called) - delete_networks_bridges.assert_called_once_with( - 'instance', [{'id': 'vif1'}]) - - -@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown') -@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label') -@mock.patch.object(vmops.VMOps, '_update_instance_progress') -@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') -@mock.patch.object(vm_utils, 'resize_disk') -@mock.patch.object(vm_utils, 'migrate_vhd') -@mock.patch.object(vm_utils, 'destroy_vdi') -class MigrateDiskResizingDownTestCase(VMOpsTestBase): - def test_migrate_disk_resizing_down_works_no_ephemeral( - self, - mock_destroy_vdi, - mock_migrate_vhd, - mock_resize_disk, - mock_get_vdi_for_vm_safely, - mock_update_instance_progress, - mock_apply_orig_vm_name_label, - mock_resize_ensure_vm_is_shutdown): - - context = "ctx" - instance = {"name": "fake", "uuid": "uuid"} - dest = "dest" - vm_ref = "vm_ref" - sr_path = "sr_path" - instance_type = dict(root_gb=1) - old_vdi_ref = "old_ref" - new_vdi_ref = "new_ref" - new_vdi_uuid = "new_uuid" - - mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None) - mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid) - - self.vmops._migrate_disk_resizing_down(context, instance, dest, - instance_type, vm_ref, sr_path) - - mock_get_vdi_for_vm_safely.assert_called_once_with( - self.vmops._session, - vm_ref) - mock_resize_ensure_vm_is_shutdown.assert_called_once_with( - instance, vm_ref) - mock_apply_orig_vm_name_label.assert_called_once_with( - instance, vm_ref) - mock_resize_disk.assert_called_once_with( - self.vmops._session, - instance, - old_vdi_ref, - instance_type) - mock_migrate_vhd.assert_called_once_with( - self.vmops._session, - instance, - new_vdi_uuid, - dest, - sr_path, 0) - mock_destroy_vdi.assert_called_once_with( - self.vmops._session, - new_vdi_ref) - - prog_expected = [ - mock.call(context, instance, 1, 5), - mock.call(context, instance, 2, 5), - mock.call(context, instance, 3, 5), - mock.call(context, instance, 4, 5) - # 5/5: step to be executed by finish migration. - ] - self.assertEqual(prog_expected, - mock_update_instance_progress.call_args_list) - - -class GetVdisForInstanceTestCase(VMOpsTestBase): - """Tests get_vdis_for_instance utility method.""" - def setUp(self): - super(GetVdisForInstanceTestCase, self).setUp() - self.context = context.get_admin_context() - self.context.auth_token = 'auth_token' - self.session = mock.Mock() - self.vmops._session = self.session - self.instance = fake_instance.fake_instance_obj(self.context) - self.name_label = 'name' - self.image = 'fake_image_id' - - @mock.patch.object(volumeops.VolumeOps, "connect_volume", - return_value=("sr", "vdi_uuid")) - def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock): - # setup fake data - data = {'name_label': self.name_label, - 'sr_uuid': 'fake', - 'auth_password': 'scrubme'} - bdm = [{'mount_device': '/dev/vda', - 'connection_info': {'data': data}}] - bdi = {'root_device_name': 'vda', - 'block_device_mapping': bdm} - - # Tests that the parameters to the to_xml method are sanitized for - # passwords when logged. - def fake_debug(*args, **kwargs): - if 'auth_password' in args[0]: - self.assertNotIn('scrubme', args[0]) - fake_debug.matched = True - - fake_debug.matched = False - - with mock.patch.object(vmops.LOG, 'debug', - side_effect=fake_debug) as debug_mock: - vdis = self.vmops._get_vdis_for_instance(self.context, - self.instance, self.name_label, self.image, - image_type=4, block_device_info=bdi) - self.assertEqual(1, len(vdis)) - get_uuid_mock.assert_called_once_with({"data": data}) - # we don't care what the log message is, we just want to make sure - # our stub method is called which asserts the password is scrubbed - self.assertTrue(debug_mock.called) - self.assertTrue(fake_debug.matched) - - -class AttachInterfaceTestCase(VMOpsTestBase): - """Test VIF hot plug/unplug""" - def setUp(self): - super(AttachInterfaceTestCase, self).setUp() - self.vmops.vif_driver = mock.Mock() - self.fake_vif = {'id': '12345'} - self.fake_instance = mock.Mock() - self.fake_instance.uuid = '6478' - - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') - def test_attach_interface(self, mock_get_vm_opaque_ref): - mock_get_vm_opaque_ref.return_value = 'fake_vm_ref' - with mock.patch.object(self._session.VM, 'get_allowed_VIF_devices')\ - as fake_devices: - fake_devices.return_value = [2, 3, 4] - self.vmops.attach_interface(self.fake_instance, self.fake_vif) - fake_devices.assert_called_once_with('fake_vm_ref') - mock_get_vm_opaque_ref.assert_called_once_with(self.fake_instance) - self.vmops.vif_driver.plug.assert_called_once_with( - self.fake_instance, self.fake_vif, vm_ref='fake_vm_ref', - device=2) - - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') - def test_attach_interface_no_devices(self, mock_get_vm_opaque_ref): - mock_get_vm_opaque_ref.return_value = 'fake_vm_ref' - with mock.patch.object(self._session.VM, 'get_allowed_VIF_devices')\ - as fake_devices: - fake_devices.return_value = [] - self.assertRaises(exception.InterfaceAttachFailed, - self.vmops.attach_interface, - self.fake_instance, self.fake_vif) - - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') - def test_attach_interface_plug_failed(self, mock_get_vm_opaque_ref): - mock_get_vm_opaque_ref.return_value = 'fake_vm_ref' - with mock.patch.object(self._session.VM, 'get_allowed_VIF_devices')\ - as fake_devices: - fake_devices.return_value = [2, 3, 4] - self.vmops.vif_driver.plug.side_effect =\ - exception.VirtualInterfacePlugException('Failed to plug VIF') - self.assertRaises(exception.VirtualInterfacePlugException, - self.vmops.attach_interface, - self.fake_instance, self.fake_vif) - self.vmops.vif_driver.plug.assert_called_once_with( - self.fake_instance, self.fake_vif, vm_ref='fake_vm_ref', - device=2) - self.vmops.vif_driver.unplug.assert_called_once_with( - self.fake_instance, self.fake_vif, 'fake_vm_ref') - - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') - def test_attach_interface_reraise_exception(self, mock_get_vm_opaque_ref): - mock_get_vm_opaque_ref.return_value = 'fake_vm_ref' - with mock.patch.object(self._session.VM, 'get_allowed_VIF_devices')\ - as fake_devices: - fake_devices.return_value = [2, 3, 4] - self.vmops.vif_driver.plug.side_effect =\ - exception.VirtualInterfacePlugException('Failed to plug VIF') - self.vmops.vif_driver.unplug.side_effect =\ - exception.VirtualInterfaceUnplugException( - 'Failed to unplug VIF') - ex = self.assertRaises(exception.VirtualInterfacePlugException, - self.vmops.attach_interface, - self.fake_instance, self.fake_vif) - self.assertEqual('Failed to plug VIF', six.text_type(ex)) - self.vmops.vif_driver.plug.assert_called_once_with( - self.fake_instance, self.fake_vif, vm_ref='fake_vm_ref', - device=2) - self.vmops.vif_driver.unplug.assert_called_once_with( - self.fake_instance, self.fake_vif, 'fake_vm_ref') - - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') - def test_detach_interface(self, mock_get_vm_opaque_ref): - mock_get_vm_opaque_ref.return_value = 'fake_vm_ref' - self.vmops.detach_interface(self.fake_instance, self.fake_vif) - mock_get_vm_opaque_ref.assert_called_once_with(self.fake_instance) - self.vmops.vif_driver.unplug.assert_called_once_with( - self.fake_instance, self.fake_vif, 'fake_vm_ref') - - @mock.patch('nova.virt.xenapi.vmops.LOG.exception') - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') - def test_detach_interface_exception(self, mock_get_vm_opaque_ref, - mock_log_exception): - mock_get_vm_opaque_ref.return_value = 'fake_vm_ref' - self.vmops.vif_driver.unplug.side_effect =\ - exception.VirtualInterfaceUnplugException('Failed to unplug VIF') - - self.assertRaises(exception.VirtualInterfaceUnplugException, - self.vmops.detach_interface, - self.fake_instance, self.fake_vif) - mock_log_exception.assert_called() - - @mock.patch('nova.virt.xenapi.vmops.LOG.exception', - new_callable=mock.NonCallableMock) - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref', - side_effect=exception.InstanceNotFound( - instance_id='fake_vm_ref')) - def test_detach_interface_instance_not_found( - self, mock_get_vm_opaque_ref, mock_log_exception): - self.assertRaises(exception.InstanceNotFound, - self.vmops.detach_interface, - self.fake_instance, self.fake_vif) diff --git a/nova/tests/unit/virt/xenapi/test_volume_utils.py b/nova/tests/unit/virt/xenapi/test_volume_utils.py deleted file mode 100644 index 1fcdad570634..000000000000 --- a/nova/tests/unit/virt/xenapi/test_volume_utils.py +++ /dev/null @@ -1,416 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from eventlet import greenthread -import mock -import six - -from nova import exception -from nova import test -from nova.tests.unit.virt.xenapi import stubs -from nova.virt.xenapi import volume_utils - - -class SROps(stubs.XenAPITestBaseNoDB): - def test_find_sr_valid_uuid(self): - self.session = mock.Mock() - self.session.call_xenapi.return_value = 'sr_ref' - self.assertEqual(volume_utils.find_sr_by_uuid(self.session, - 'sr_uuid'), - 'sr_ref') - - def test_find_sr_invalid_uuid(self): - class UUIDException(Exception): - details = ["UUID_INVALID", "", "", ""] - - self.session = mock.Mock() - self.session.XenAPI.Failure = UUIDException - self.session.call_xenapi.side_effect = UUIDException - self.assertIsNone( - volume_utils.find_sr_by_uuid(self.session, 'sr_uuid')) - - def test_find_sr_from_vdi(self): - vdi_ref = 'fake-ref' - - def fake_call_xenapi(method, *args): - self.assertEqual(method, 'VDI.get_SR') - self.assertEqual(args[0], vdi_ref) - return args[0] - - session = mock.Mock() - session.call_xenapi.side_effect = fake_call_xenapi - self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref), - vdi_ref) - - def test_find_sr_from_vdi_exception(self): - vdi_ref = 'fake-ref' - - class FakeException(Exception): - pass - - session = mock.Mock() - session.XenAPI.Failure = FakeException - session.call_xenapi.side_effect = FakeException - self.assertRaises(exception.StorageError, - volume_utils.find_sr_from_vdi, session, vdi_ref) - - -class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB): - def test_target_host(self): - self.assertEqual(volume_utils._get_target_host('host:port'), - 'host') - - self.assertEqual(volume_utils._get_target_host('host'), - 'host') - - # There is no default value - self.assertIsNone(volume_utils._get_target_host(':port')) - - self.assertIsNone(volume_utils._get_target_host(None)) - - def test_target_port(self): - self.assertEqual(volume_utils._get_target_port('host:port'), 'port') - self.assertEqual(volume_utils._get_target_port('host'), 3260) - - -class IntroduceTestCase(stubs.XenAPITestBaseNoDB): - - @mock.patch.object(volume_utils, '_get_vdi_ref') - @mock.patch.object(greenthread, 'sleep') - def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref): - def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun): - fake_get_vdi_ref.call_count += 1 - if fake_get_vdi_ref.call_count == 2: - return 'vdi_ref' - - def fake_call_xenapi(method, *args): - if method == 'SR.scan': - return - elif method == 'VDI.get_record': - return {'managed': 'true'} - - session = mock.Mock() - session.call_xenapi.side_effect = fake_call_xenapi - - mock_get_vdi_ref.side_effect = fake_get_vdi_ref - fake_get_vdi_ref.call_count = 0 - - self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'), - 'vdi_ref') - mock_sleep.assert_called_once_with(20) - - @mock.patch.object(volume_utils, '_get_vdi_ref') - @mock.patch.object(greenthread, 'sleep') - def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref): - def fake_call_xenapi(method, *args): - if method == 'SR.scan': - return - elif method == 'VDI.get_record': - return {'managed': 'true'} - - session = mock.Mock() - session.call_xenapi.side_effect = fake_call_xenapi - mock_get_vdi_ref.return_value = None - - self.assertRaises(exception.StorageError, - volume_utils.introduce_vdi, session, 'sr_ref') - mock_sleep.assert_called_once_with(20) - - -class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB): - def test_mountpoint_to_number(self): - cases = { - 'sda': 0, - 'sdp': 15, - 'hda': 0, - 'hdp': 15, - 'vda': 0, - 'xvda': 0, - '0': 0, - '10': 10, - 'vdq': -1, - 'sdq': -1, - 'hdq': -1, - 'xvdq': -1, - } - - for (input, expected) in cases.items(): - actual = volume_utils._mountpoint_to_number(input) - self.assertEqual(actual, expected, - '%s yielded %s, not %s' % (input, actual, expected)) - - @classmethod - def _make_connection_info(cls): - target_iqn = 'iqn.2010-10.org.openstack:volume-00000001' - return {'driver_volume_type': 'iscsi', - 'data': {'volume_id': 1, - 'target_iqn': target_iqn, - 'target_portal': '127.0.0.1:3260,fake', - 'target_lun': None, - 'auth_method': 'CHAP', - 'auth_username': 'username', - 'auth_password': 'verybadpass'}} - - def test_parse_volume_info_parsing_auth_details(self): - conn_info = self._make_connection_info() - result = volume_utils._parse_volume_info(conn_info['data']) - - self.assertEqual('username', result['chapuser']) - self.assertEqual('verybadpass', result['chappassword']) - - def test_parse_volume_info_missing_details(self): - # Tests that a StorageError is raised if volume_id, target_host, or - # target_ign is missing from connection_data. Also ensures that the - # auth_password value is not present in the StorageError message. - for data_key_to_null in ('volume_id', 'target_portal', 'target_iqn'): - conn_info = self._make_connection_info() - conn_info['data'][data_key_to_null] = None - ex = self.assertRaises(exception.StorageError, - volume_utils._parse_volume_info, - conn_info['data']) - self.assertNotIn('verybadpass', six.text_type(ex)) - - def test_get_device_number_raise_exception_on_wrong_mountpoint(self): - self.assertRaises( - exception.StorageError, - volume_utils.get_device_number, - 'dev/sd') - - -class FindVBDTestCase(stubs.XenAPITestBaseNoDB): - def test_find_vbd_by_number_works(self): - session = mock.Mock() - session.VM.get_VBDs.return_value = ["a", "b"] - session.VBD.get_userdevice.return_value = "1" - - result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) - - self.assertEqual("a", result) - session.VM.get_VBDs.assert_called_once_with("vm_ref") - session.VBD.get_userdevice.assert_called_once_with("a") - - def test_find_vbd_by_number_no_matches(self): - session = mock.Mock() - session.VM.get_VBDs.return_value = ["a", "b"] - session.VBD.get_userdevice.return_value = "3" - - result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) - - self.assertIsNone(result) - session.VM.get_VBDs.assert_called_once_with("vm_ref") - expected = [mock.call("a"), mock.call("b")] - self.assertEqual(expected, - session.VBD.get_userdevice.call_args_list) - - def test_find_vbd_by_number_no_vbds(self): - session = mock.Mock() - session.VM.get_VBDs.return_value = [] - - result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) - - self.assertIsNone(result) - session.VM.get_VBDs.assert_called_once_with("vm_ref") - self.assertFalse(session.VBD.get_userdevice.called) - - def test_find_vbd_by_number_ignores_exception(self): - session = mock.Mock() - session.XenAPI.Failure = test.TestingException - session.VM.get_VBDs.return_value = ["a"] - session.VBD.get_userdevice.side_effect = test.TestingException - - result = volume_utils.find_vbd_by_number(session, "vm_ref", 1) - - self.assertIsNone(result) - session.VM.get_VBDs.assert_called_once_with("vm_ref") - session.VBD.get_userdevice.assert_called_once_with("a") - - -class IntroduceSRTestCase(stubs.XenAPITestBaseNoDB): - @mock.patch.object(volume_utils, '_create_pbd') - def test_backend_kind(self, create_pbd): - session = mock.Mock() - session.product_version = (6, 5, 0) - session.call_xenapi.return_value = 'sr_ref' - params = {'sr_type': 'iscsi'} - sr_uuid = 'sr_uuid' - label = 'label' - expected_params = {'backend-kind': 'vbd'} - - volume_utils.introduce_sr(session, sr_uuid, label, params) - session.call_xenapi.assert_any_call('SR.introduce', sr_uuid, - label, '', 'iscsi', - '', False, expected_params) - - @mock.patch.object(volume_utils, '_create_pbd') - def test_backend_kind_upstream_fix(self, create_pbd): - session = mock.Mock() - session.product_version = (7, 0, 0) - session.call_xenapi.return_value = 'sr_ref' - params = {'sr_type': 'iscsi'} - sr_uuid = 'sr_uuid' - label = 'label' - expected_params = {} - - volume_utils.introduce_sr(session, sr_uuid, label, params) - session.call_xenapi.assert_any_call('SR.introduce', sr_uuid, - label, '', 'iscsi', - '', False, expected_params) - - -class BootedFromVolumeTestCase(stubs.XenAPITestBaseNoDB): - def test_booted_from_volume(self): - session = mock.Mock() - session.VM.get_VBDs.return_value = ['vbd_ref'] - session.VBD.get_userdevice.return_value = '0' - session.VBD.get_other_config.return_value = {'osvol': True} - booted_from_volume = volume_utils.is_booted_from_volume(session, - 'vm_ref') - self.assertTrue(booted_from_volume) - - def test_not_booted_from_volume(self): - session = mock.Mock() - session.VM.get_VBDs.return_value = ['vbd_ref'] - session.VBD.get_userdevice.return_value = '0' - session.VBD.get_other_config.return_value = {} - booted_from_volume = volume_utils.is_booted_from_volume(session, - 'vm_ref') - self.assertFalse(booted_from_volume) - - -class MultipleVolumesTestCase(stubs.XenAPITestBaseNoDB): - def test_sr_info_two_luns(self): - data1 = {'target_portal': 'host:port', - 'target_iqn': 'iqn', - 'volume_id': 'vol_id_1', - 'target_lun': 1} - data2 = {'target_portal': 'host:port', - 'target_iqn': 'iqn', - 'volume_id': 'vol_id_2', - 'target_lun': 2} - (sr_uuid1, label1, params1) = volume_utils.parse_sr_info(data1) - (sr_uuid2, label2, params2) = volume_utils.parse_sr_info(data2) - - self.assertEqual(sr_uuid1, sr_uuid2) - self.assertEqual(label1, label2) - - @mock.patch.object(volume_utils, 'forget_sr') - def test_purge_sr_no_VBDs(self, mock_forget): - - def _call_xenapi(func, *args): - if func == 'SR.get_VDIs': - return ['VDI1', 'VDI2'] - if func == 'VDI.get_VBDs': - return [] - - self.session = mock.Mock() - self.session.call_xenapi = _call_xenapi - - volume_utils.purge_sr(self.session, 'SR') - - mock_forget.assert_called_once_with(self.session, 'SR') - - @mock.patch.object(volume_utils, 'forget_sr') - def test_purge_sr_in_use(self, mock_forget): - - def _call_xenapi(func, *args): - if func == 'SR.get_VDIs': - return ['VDI1', 'VDI2'] - if func == 'VDI.get_VBDs': - if args[0] == 'VDI1': - return ['VBD1'] - if args[0] == 'VDI2': - return ['VBD2'] - - self.session = mock.Mock() - self.session.call_xenapi = _call_xenapi - - volume_utils.purge_sr(self.session, 'SR') - - self.assertEqual([], mock_forget.mock_calls) - - -class TestStreamToVDI(stubs.XenAPITestBaseNoDB): - - @mock.patch.object(volume_utils, '_stream_to_vdi') - @mock.patch.object(volume_utils, '_get_vdi_import_path', - return_value='vdi_import_path') - def test_creates_task_conn(self, mock_import_path, mock_stream): - session = self.get_fake_session() - session.custom_task = mock.MagicMock() - session.custom_task.return_value.__enter__.return_value = 'task' - session.http_connection = mock.MagicMock() - session.http_connection.return_value.__enter__.return_value = 'conn' - - instance = {'name': 'instance-name'} - - volume_utils.stream_to_vdi(session, instance, 'vhd', 'file_obj', 100, - 'vdi_ref') - - session.custom_task.assert_called_with('VDI_IMPORT_for_instance-name') - mock_stream.assert_called_with('conn', 'vdi_import_path', 100, - 'file_obj') - - self.assertTrue(session.http_connection.return_value.__exit__.called) - self.assertTrue(session.custom_task.return_value.__exit__.called) - - def test_stream_to_vdi_tiny(self): - mock_file = mock.Mock() - mock_file.read.side_effect = ['a'] - mock_conn = mock.Mock() - resp = mock.Mock() - resp.status = '200' - resp.reason = 'OK' - mock_conn.getresponse.return_value = resp - - volume_utils._stream_to_vdi(mock_conn, '/path', 1, mock_file) - args, kwargs = mock_conn.request.call_args - self.assertEqual(kwargs['headers']['Content-Length'], '1') - mock_file.read.assert_called_once_with(1) - mock_conn.send.assert_called_once_with('a') - - def test_stream_to_vdi_chunk_multiple(self): - mock_file = mock.Mock() - mock_file.read.side_effect = ['aaaaa', 'bbbbb'] - mock_conn = mock.Mock() - resp = mock.Mock() - resp.status = '200' - resp.reason = 'OK' - mock_conn.getresponse.return_value = resp - - tot_size = 2 * 16 * 1024 - volume_utils._stream_to_vdi(mock_conn, '/path', tot_size, mock_file) - args, kwargs = mock_conn.request.call_args - self.assertEqual(kwargs['headers']['Content-Length'], str(tot_size)) - mock_file.read.assert_has_calls([mock.call(16 * 1024), - mock.call(16 * 1024)]) - mock_conn.send.assert_has_calls([mock.call('aaaaa'), - mock.call('bbbbb')]) - - def test_stream_to_vdi_chunk_remaining(self): - mock_file = mock.Mock() - mock_file.read.side_effect = ['aaaaa', 'bb'] - mock_conn = mock.Mock() - resp = mock.Mock() - resp.status = '200' - resp.reason = 'OK' - mock_conn.getresponse.return_value = resp - - tot_size = 16 * 1024 + 1024 - volume_utils._stream_to_vdi(mock_conn, '/path', tot_size, mock_file) - args, kwargs = mock_conn.request.call_args - self.assertEqual(kwargs['headers']['Content-Length'], str(tot_size)) - mock_file.read.assert_has_calls([mock.call(16 * 1024), - mock.call(1024)]) - mock_conn.send.assert_has_calls([mock.call('aaaaa'), mock.call('bb')]) diff --git a/nova/tests/unit/virt/xenapi/test_volumeops.py b/nova/tests/unit/virt/xenapi/test_volumeops.py deleted file mode 100644 index eac534338ebb..000000000000 --- a/nova/tests/unit/virt/xenapi/test_volumeops.py +++ /dev/null @@ -1,547 +0,0 @@ -# Copyright (c) 2012 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from nova import exception -from nova import test -from nova.tests.unit.virt.xenapi import stubs -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import volume_utils -from nova.virt.xenapi import volumeops - - -class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB): - def setUp(self): - super(VolumeOpsTestBase, self).setUp() - self._setup_mock_volumeops() - - def _setup_mock_volumeops(self): - self.session = stubs.FakeSessionForVolumeTests('fake_uri') - self.ops = volumeops.VolumeOps(self.session) - - -class VolumeDetachTestCase(VolumeOpsTestBase): - @mock.patch.object(volumeops.vm_utils, 'lookup', return_value='vmref') - @mock.patch.object(volumeops.volume_utils, 'find_vbd_by_number', - return_value='vbdref') - @mock.patch.object(volumeops.vm_utils, 'is_vm_shutdown', - return_value=False) - @mock.patch.object(volumeops.vm_utils, 'unplug_vbd') - @mock.patch.object(volumeops.vm_utils, 'destroy_vbd') - @mock.patch.object(volumeops.volume_utils, 'get_device_number', - return_value='devnumber') - @mock.patch.object(volumeops.volume_utils, 'find_sr_from_vbd', - return_value='srref') - @mock.patch.object(volumeops.volume_utils, 'purge_sr') - def test_detach_volume_call(self, mock_purge, mock_find_sr, - mock_get_device_num, mock_destroy_vbd, - mock_unplug_vbd, mock_is_vm, mock_find_vbd, - mock_lookup): - - ops = volumeops.VolumeOps('session') - - ops.detach_volume( - dict(driver_volume_type='iscsi', data='conn_data'), - 'instance_1', 'mountpoint') - - mock_lookup.assert_called_once_with('session', 'instance_1') - mock_get_device_num.assert_called_once_with('mountpoint') - mock_find_vbd.assert_called_once_with('session', 'vmref', 'devnumber') - mock_is_vm.assert_called_once_with('session', 'vmref') - mock_unplug_vbd.assert_called_once_with('session', 'vbdref', 'vmref') - mock_destroy_vbd.assert_called_once_with('session', 'vbdref') - mock_find_sr.assert_called_once_with('session', 'vbdref') - mock_purge.assert_called_once_with('session', 'srref') - - @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") - @mock.patch.object(volume_utils, "find_vbd_by_number") - @mock.patch.object(vm_utils, "vm_ref_or_raise") - def test_detach_volume(self, mock_vm, mock_vbd, mock_detach): - mock_vm.return_value = "vm_ref" - mock_vbd.return_value = "vbd_ref" - - self.ops.detach_volume({}, "name", "/dev/xvdd") - - mock_vm.assert_called_once_with(self.session, "name") - mock_vbd.assert_called_once_with(self.session, "vm_ref", 3) - mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"]) - - @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") - @mock.patch.object(volume_utils, "find_vbd_by_number") - @mock.patch.object(vm_utils, "vm_ref_or_raise") - def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd, - mock_detach): - mock_vm.return_value = "vm_ref" - mock_vbd.return_value = None - - self.ops.detach_volume({}, "name", "/dev/xvdd") - - self.assertFalse(mock_detach.called) - - @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") - @mock.patch.object(volume_utils, "find_vbd_by_number") - @mock.patch.object(vm_utils, "vm_ref_or_raise") - def test_detach_volume_raises(self, mock_vm, mock_vbd, - mock_detach): - mock_vm.return_value = "vm_ref" - mock_vbd.side_effect = test.TestingException - - self.assertRaises(test.TestingException, - self.ops.detach_volume, {}, "name", "/dev/xvdd") - self.assertFalse(mock_detach.called) - - @mock.patch.object(volume_utils, "purge_sr") - @mock.patch.object(vm_utils, "destroy_vbd") - @mock.patch.object(volume_utils, "find_sr_from_vbd") - @mock.patch.object(vm_utils, "unplug_vbd") - @mock.patch.object(vm_utils, "is_vm_shutdown") - def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug, - mock_find_sr, mock_destroy, mock_purge): - mock_shutdown.return_value = False - mock_find_sr.return_value = "sr_ref" - - self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"]) - - mock_shutdown.assert_called_once_with(self.session, "vm_ref") - mock_find_sr.assert_called_once_with(self.session, "vbd_ref") - mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref") - mock_destroy.assert_called_once_with(self.session, "vbd_ref") - mock_purge.assert_called_once_with(self.session, "sr_ref") - - @mock.patch.object(volume_utils, "purge_sr") - @mock.patch.object(vm_utils, "destroy_vbd") - @mock.patch.object(volume_utils, "find_sr_from_vbd") - @mock.patch.object(vm_utils, "unplug_vbd") - @mock.patch.object(vm_utils, "is_vm_shutdown") - def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug, - mock_find_sr, mock_destroy, mock_purge): - mock_shutdown.return_value = True - mock_find_sr.return_value = "sr_ref" - - self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"]) - - expected = [mock.call(self.session, "vbd_ref_1"), - mock.call(self.session, "vbd_ref_2")] - self.assertEqual(expected, mock_destroy.call_args_list) - mock_purge.assert_called_with(self.session, "sr_ref") - self.assertFalse(mock_unplug.called) - - @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") - @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") - def test_detach_all_no_volumes(self, mock_get_all, mock_detach): - mock_get_all.return_value = [] - - self.ops.detach_all("vm_ref") - - mock_get_all.assert_called_once_with("vm_ref") - self.assertFalse(mock_detach.called) - - @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs") - @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") - def test_detach_all_volumes(self, mock_get_all, mock_detach): - mock_get_all.return_value = ["1"] - - self.ops.detach_all("vm_ref") - - mock_get_all.assert_called_once_with("vm_ref") - mock_detach.assert_called_once_with("vm_ref", ["1"]) - - def test_get_all_volume_vbd_refs_no_vbds(self): - with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: - with mock.patch.object(self.session.VBD, - "get_other_config") as mock_conf: - mock_get.return_value = [] - - result = self.ops._get_all_volume_vbd_refs("vm_ref") - - self.assertEqual([], list(result)) - mock_get.assert_called_once_with("vm_ref") - self.assertFalse(mock_conf.called) - - def test_get_all_volume_vbd_refs_no_volumes(self): - with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: - with mock.patch.object(self.session.VBD, - "get_other_config") as mock_conf: - mock_get.return_value = ["1"] - mock_conf.return_value = {} - - result = self.ops._get_all_volume_vbd_refs("vm_ref") - - self.assertEqual([], list(result)) - mock_get.assert_called_once_with("vm_ref") - mock_conf.assert_called_once_with("1") - - def test_get_all_volume_vbd_refs_with_volumes(self): - with mock.patch.object(self.session.VM, "get_VBDs") as mock_get: - with mock.patch.object(self.session.VBD, - "get_other_config") as mock_conf: - mock_get.return_value = ["1", "2"] - mock_conf.return_value = {"osvol": True} - - result = self.ops._get_all_volume_vbd_refs("vm_ref") - - self.assertEqual(["1", "2"], list(result)) - mock_get.assert_called_once_with("vm_ref") - - -class AttachVolumeTestCase(VolumeOpsTestBase): - @mock.patch.object(volumeops.VolumeOps, "_attach_volume") - @mock.patch.object(vm_utils, "vm_ref_or_raise") - def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach): - mock_get_vm.return_value = "vm_ref" - - self.ops.attach_volume({}, "instance_name", "/dev/xvda") - - mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", - '/dev/xvda', True) - - @mock.patch.object(volumeops.VolumeOps, "_attach_volume") - @mock.patch.object(vm_utils, "vm_ref_or_raise") - def test_attach_volume_hotplug(self, mock_get_vm, mock_attach): - mock_get_vm.return_value = "vm_ref" - - self.ops.attach_volume({}, "instance_name", "/dev/xvda", False) - - mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", - '/dev/xvda', False) - - @mock.patch.object(volumeops.VolumeOps, "_attach_volume") - def test_attach_volume_default_hotplug_connect_volume(self, mock_attach): - self.ops.connect_volume({}) - mock_attach.assert_called_once_with({}) - - @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") - @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") - @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") - @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") - def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor, - mock_provider, mock_driver): - connection_info = {"data": {}} - with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi: - mock_provider.return_value = ("sr_ref", "sr_uuid") - mock_vdi.return_value = "vdi_uuid" - - result = self.ops._attach_volume(connection_info) - - self.assertEqual(result, ("sr_uuid", "vdi_uuid")) - - mock_driver.assert_called_once_with(connection_info) - mock_provider.assert_called_once_with({}, None) - mock_hypervisor.assert_called_once_with("sr_ref", {}) - self.assertFalse(mock_attach.called) - - @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") - @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") - @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") - @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") - def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor, - mock_provider, mock_driver): - connection_info = {"data": {}} - with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi: - mock_provider.return_value = ("sr_ref", "sr_uuid") - mock_hypervisor.return_value = "vdi_ref" - mock_vdi.return_value = "vdi_uuid" - - result = self.ops._attach_volume(connection_info, "vm_ref", - "name", 2, True) - - self.assertEqual(result, ("sr_uuid", "vdi_uuid")) - - mock_driver.assert_called_once_with(connection_info) - mock_provider.assert_called_once_with({}, "name") - mock_hypervisor.assert_called_once_with("sr_ref", {}) - mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2, - True) - - @mock.patch.object(volume_utils, "forget_sr") - @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type") - @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider") - @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume") - @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm") - def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor, - mock_provider, mock_driver, mock_forget): - connection_info = {"data": {}} - mock_provider.return_value = ("sr_ref", "sr_uuid") - mock_hypervisor.side_effect = test.TestingException - - self.assertRaises(test.TestingException, - self.ops._attach_volume, connection_info) - - mock_driver.assert_called_once_with(connection_info) - mock_provider.assert_called_once_with({}, None) - mock_hypervisor.assert_called_once_with("sr_ref", {}) - mock_forget.assert_called_once_with(self.session, "sr_ref") - self.assertFalse(mock_attach.called) - - def test_check_is_supported_driver_type_pass_iscsi(self): - conn_info = {"driver_volume_type": "iscsi"} - self.ops._check_is_supported_driver_type(conn_info) - - def test_check_is_supported_driver_type_pass_xensm(self): - conn_info = {"driver_volume_type": "xensm"} - self.ops._check_is_supported_driver_type(conn_info) - - def test_check_is_supported_driver_type_pass_bad(self): - conn_info = {"driver_volume_type": "bad"} - self.assertRaises(exception.VolumeDriverNotFound, - self.ops._check_is_supported_driver_type, conn_info) - - @mock.patch.object(volume_utils, "introduce_sr") - @mock.patch.object(volume_utils, "find_sr_by_uuid") - @mock.patch.object(volume_utils, "parse_sr_info") - def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr, - mock_introduce_sr): - mock_parse.return_value = ("uuid", "label", "params") - mock_find_sr.return_value = None - mock_introduce_sr.return_value = "sr_ref" - - ref, uuid = self.ops._connect_to_volume_provider({}, "name") - - self.assertEqual("sr_ref", ref) - self.assertEqual("uuid", uuid) - mock_parse.assert_called_once_with({}, "Disk-for:name") - mock_find_sr.assert_called_once_with(self.session, "uuid") - mock_introduce_sr.assert_called_once_with(self.session, "uuid", - "label", "params") - - @mock.patch.object(volume_utils, "introduce_sr") - @mock.patch.object(volume_utils, "find_sr_by_uuid") - @mock.patch.object(volume_utils, "parse_sr_info") - def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr, - mock_introduce_sr): - mock_parse.return_value = ("uuid", "label", "params") - mock_find_sr.return_value = "sr_ref" - - ref, uuid = self.ops._connect_to_volume_provider({}, "name") - - self.assertEqual("sr_ref", ref) - self.assertEqual("uuid", uuid) - mock_parse.assert_called_once_with({}, "Disk-for:name") - mock_find_sr.assert_called_once_with(self.session, "uuid") - self.assertFalse(mock_introduce_sr.called) - - @mock.patch.object(volume_utils, "introduce_vdi") - def test_connect_hypervisor_to_volume_regular(self, mock_intro): - mock_intro.return_value = "vdi" - - result = self.ops._connect_hypervisor_to_volume("sr", {}) - - self.assertEqual("vdi", result) - mock_intro.assert_called_once_with(self.session, "sr") - - @mock.patch.object(volume_utils, "introduce_vdi") - def test_connect_hypervisor_to_volume_vdi(self, mock_intro): - mock_intro.return_value = "vdi" - - conn = {"vdi_uuid": "id"} - result = self.ops._connect_hypervisor_to_volume("sr", conn) - - self.assertEqual("vdi", result) - mock_intro.assert_called_once_with(self.session, "sr", - vdi_uuid="id") - - @mock.patch.object(volume_utils, "introduce_vdi") - def test_connect_hypervisor_to_volume_lun(self, mock_intro): - mock_intro.return_value = "vdi" - - conn = {"target_lun": "lun"} - result = self.ops._connect_hypervisor_to_volume("sr", conn) - - self.assertEqual("vdi", result) - mock_intro.assert_called_once_with(self.session, "sr", - target_lun="lun") - - @mock.patch.object(volume_utils, "introduce_vdi") - @mock.patch.object(volumeops.LOG, 'debug') - def test_connect_hypervisor_to_volume_mask_password(self, mock_debug, - mock_intro): - # Tests that the connection_data is scrubbed before logging. - data = {'auth_password': 'verybadpass'} - self.ops._connect_hypervisor_to_volume("sr", data) - self.assertTrue(mock_debug.called, 'LOG.debug was not called') - password_logged = False - for call in mock_debug.call_args_list: - # The call object is a tuple of (args, kwargs) - if 'verybadpass' in call[0]: - password_logged = True - break - self.assertFalse(password_logged, 'connection_data was not scrubbed') - - @mock.patch.object(vm_utils, "is_vm_shutdown") - @mock.patch.object(vm_utils, "create_vbd") - def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown): - mock_vbd.return_value = "vbd" - mock_shutdown.return_value = False - - with mock.patch.object(self.session.VBD, "plug") as mock_plug: - self.ops._attach_volume_to_vm("vdi", "vm", "name", '/dev/2', True) - mock_plug.assert_called_once_with("vbd", "vm") - - mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, - bootable=False, osvol=True) - mock_shutdown.assert_called_once_with(self.session, "vm") - - @mock.patch.object(vm_utils, "is_vm_shutdown") - @mock.patch.object(vm_utils, "create_vbd") - def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown): - mock_vbd.return_value = "vbd" - mock_shutdown.return_value = True - - with mock.patch.object(self.session.VBD, "plug") as mock_plug: - self.ops._attach_volume_to_vm("vdi", "vm", "name", '/dev/2', True) - self.assertFalse(mock_plug.called) - - mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, - bootable=False, osvol=True) - mock_shutdown.assert_called_once_with(self.session, "vm") - - @mock.patch.object(vm_utils, "is_vm_shutdown") - @mock.patch.object(vm_utils, "create_vbd") - def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown): - mock_vbd.return_value = "vbd" - - with mock.patch.object(self.session.VBD, "plug") as mock_plug: - self.ops._attach_volume_to_vm("vdi", "vm", "name", '/dev/2', False) - self.assertFalse(mock_plug.called) - - mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2, - bootable=False, osvol=True) - self.assertFalse(mock_shutdown.called) - - -class FindBadVolumeTestCase(VolumeOpsTestBase): - @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") - def test_find_bad_volumes_no_vbds(self, mock_get_all): - mock_get_all.return_value = [] - - result = self.ops.find_bad_volumes("vm_ref") - - mock_get_all.assert_called_once_with("vm_ref") - self.assertEqual([], result) - - @mock.patch.object(volume_utils, "find_sr_from_vbd") - @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") - def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr): - mock_get_all.return_value = ["1", "2"] - mock_find_sr.return_value = "sr_ref" - - with mock.patch.object(self.session.SR, "scan") as mock_scan: - result = self.ops.find_bad_volumes("vm_ref") - - mock_get_all.assert_called_once_with("vm_ref") - expected_find = [mock.call(self.session, "1"), - mock.call(self.session, "2")] - self.assertEqual(expected_find, mock_find_sr.call_args_list) - expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")] - self.assertEqual(expected_scan, mock_scan.call_args_list) - self.assertEqual([], result) - - @mock.patch.object(volume_utils, "find_sr_from_vbd") - @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") - def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr): - mock_get_all.return_value = ["vbd_ref"] - mock_find_sr.return_value = "sr_ref" - - class FakeException(Exception): - details = ['SR_BACKEND_FAILURE_40', "", "", ""] - - session = mock.Mock() - session.XenAPI.Failure = FakeException - self.ops._session = session - - with mock.patch.object(session.SR, "scan") as mock_scan: - with mock.patch.object(session.VBD, - "get_device") as mock_get: - mock_scan.side_effect = FakeException - mock_get.return_value = "xvdb" - - result = self.ops.find_bad_volumes("vm_ref") - - mock_get_all.assert_called_once_with("vm_ref") - mock_scan.assert_called_once_with("sr_ref") - mock_get.assert_called_once_with("vbd_ref") - self.assertEqual(["/dev/xvdb"], result) - - @mock.patch.object(volume_utils, "find_sr_from_vbd") - @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs") - def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr): - mock_get_all.return_value = ["vbd_ref"] - mock_find_sr.return_value = "sr_ref" - - class FakeException(Exception): - details = ['foo', "", "", ""] - - session = mock.Mock() - session.XenAPI.Failure = FakeException - self.ops._session = session - - with mock.patch.object(session.SR, "scan") as mock_scan: - with mock.patch.object(session.VBD, - "get_device") as mock_get: - mock_scan.side_effect = FakeException - mock_get.return_value = "xvdb" - - self.assertRaises(FakeException, - self.ops.find_bad_volumes, "vm_ref") - mock_scan.assert_called_once_with("sr_ref") - - -class CleanupFromVDIsTestCase(VolumeOpsTestBase): - def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs, - sr_refs): - find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref - in vdi_refs] - find_sr_from_vdi.assert_has_calls(find_sr_calls) - purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref - in sr_refs] - purge_sr.assert_has_calls(purge_sr_calls) - - @mock.patch.object(volume_utils, 'find_sr_from_vdi') - @mock.patch.object(volume_utils, 'purge_sr') - def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi): - vdi_refs = ['vdi_ref1', 'vdi_ref2'] - sr_refs = ['sr_ref1', 'sr_ref2'] - find_sr_from_vdi.side_effect = sr_refs - self.ops.safe_cleanup_from_vdis(vdi_refs) - - self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, - sr_refs) - - @mock.patch.object(volume_utils, 'find_sr_from_vdi', - side_effect=[exception.StorageError(reason=''), 'sr_ref2']) - @mock.patch.object(volume_utils, 'purge_sr') - def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr, - find_sr_from_vdi): - vdi_refs = ['vdi_ref1', 'vdi_ref2'] - sr_refs = ['sr_ref2'] - find_sr_from_vdi.side_effect = [exception.StorageError(reason=''), - sr_refs[0]] - self.ops.safe_cleanup_from_vdis(vdi_refs) - - self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, - sr_refs) - - @mock.patch.object(volume_utils, 'find_sr_from_vdi') - @mock.patch.object(volume_utils, 'purge_sr') - def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr, - find_sr_from_vdi): - vdi_refs = ['vdi_ref1', 'vdi_ref2'] - sr_refs = ['sr_ref1', 'sr_ref2'] - find_sr_from_vdi.side_effect = sr_refs - purge_sr.side_effect = [test.TestingException, None] - self.ops.safe_cleanup_from_vdis(vdi_refs) - - self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs, - sr_refs) diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py deleted file mode 100644 index 48b8fa15d084..000000000000 --- a/nova/tests/unit/virt/xenapi/test_xenapi.py +++ /dev/null @@ -1,3860 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Test suite for XenAPI.""" - -import ast -import base64 -import contextlib -import copy -import functools -import os -import re - -import mock -from os_xenapi.client import host_management -from os_xenapi.client import session -from os_xenapi.client import XenAPI -from oslo_concurrency import lockutils -from oslo_config import fixture as config_fixture -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import uuidutils -import testtools - -from nova.compute import api as compute_api -from nova.compute import manager -from nova.compute import power_state -from nova.compute import task_states -from nova.compute import utils as compute_utils -from nova.compute import vm_states -import nova.conf -from nova import context -from nova import crypto -from nova.db import api as db -from nova import exception -from nova.network import model as network_model -from nova import objects -from nova.objects import base -from nova.objects import fields as obj_fields -from nova import test -from nova.tests import fixtures -from nova.tests.unit.api.openstack import fakes -from nova.tests.unit.db import fakes as db_fakes -from nova.tests.unit import fake_diagnostics -from nova.tests.unit import fake_flavor -from nova.tests.unit import fake_instance -from nova.tests.unit import fake_network -from nova.tests.unit import fake_processutils -import nova.tests.unit.image.fake as fake_image -from nova.tests.unit import matchers -from nova.tests.unit.objects import test_aggregate -from nova.tests.unit.objects import test_diagnostics -from nova.tests.unit import utils as test_utils -from nova.tests.unit.virt.xenapi import stubs -from nova.virt import fake -from nova.virt.xenapi import agent -from nova.virt.xenapi import driver as xenapi_conn -from nova.virt.xenapi import fake as xenapi_fake -from nova.virt.xenapi import host -from nova.virt.xenapi.image import glance -from nova.virt.xenapi import pool -from nova.virt.xenapi import pool_states -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import vmops -from nova.virt.xenapi import volume_utils - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - -IMAGE_MACHINE = uuids.image_ref -IMAGE_KERNEL = uuids.image_kernel_id -IMAGE_RAMDISK = uuids.image_ramdisk_id -IMAGE_RAW = uuids.image_raw -IMAGE_VHD = uuids.image_vhd -IMAGE_ISO = uuids.image_iso -IMAGE_IPXE_ISO = uuids.image_ipxe_iso -IMAGE_FROM_VOLUME = uuids.image_from_volume - -IMAGE_FIXTURES = { - IMAGE_MACHINE: { - 'image_meta': {'name': 'fakemachine', 'size': 0, - 'disk_format': 'ami', - 'container_format': 'ami', - 'id': 'fake-image'}, - }, - IMAGE_KERNEL: { - 'image_meta': {'name': 'fakekernel', 'size': 0, - 'disk_format': 'aki', - 'container_format': 'aki', - 'id': 'fake-kernel'}, - }, - IMAGE_RAMDISK: { - 'image_meta': {'name': 'fakeramdisk', 'size': 0, - 'disk_format': 'ari', - 'container_format': 'ari', - 'id': 'fake-ramdisk'}, - }, - IMAGE_RAW: { - 'image_meta': {'name': 'fakeraw', 'size': 0, - 'disk_format': 'raw', - 'container_format': 'bare', - 'id': 'fake-image-raw'}, - }, - IMAGE_VHD: { - 'image_meta': {'name': 'fakevhd', 'size': 0, - 'disk_format': 'vhd', - 'container_format': 'ovf', - 'id': 'fake-image-vhd'}, - }, - IMAGE_ISO: { - 'image_meta': {'name': 'fakeiso', 'size': 0, - 'disk_format': 'iso', - 'container_format': 'bare', - 'id': 'fake-image-iso'}, - }, - IMAGE_IPXE_ISO: { - 'image_meta': {'name': 'fake_ipxe_iso', 'size': 0, - 'disk_format': 'iso', - 'container_format': 'bare', - 'id': 'fake-image-pxe', - 'properties': {'ipxe_boot': 'true'}}, - }, - IMAGE_FROM_VOLUME: { - 'image_meta': {'name': 'fake_ipxe_iso', - 'id': 'fake-image-volume', - 'properties': {'foo': 'bar'}}, - }, -} - - -def get_session(): - return xenapi_fake.SessionBase('http://localhost', 'root', 'test_pass') - - -def set_image_fixtures(): - image_service = fake_image.FakeImageService() - image_service.images.clear() - for image_id, image_meta in IMAGE_FIXTURES.items(): - image_meta = image_meta['image_meta'] - image_meta['id'] = image_id - image_service.create(None, image_meta) - - -def get_fake_device_info(): - # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid - # can be removed from the dict when LP bug #1087308 is fixed - fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None) - fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid'] - fake = {'block_device_mapping': - [{'connection_info': {'driver_volume_type': 'iscsi', - 'data': {'sr_uuid': 'falseSR', - 'introduce_sr_keys': ['sr_type'], - 'sr_type': 'iscsi', - 'vdi_uuid': fake_vdi_uuid, - 'target_discovered': False, - 'target_iqn': 'foo_iqn:foo_volid', - 'target_portal': 'localhost:3260', - 'volume_id': 'foo_volid', - 'target_lun': 1, - 'auth_password': 'my-p@55w0rd', - 'auth_username': 'johndoe', - 'auth_method': u'CHAP'}, }, - 'mount_device': 'vda', - 'delete_on_termination': False}, ], - 'root_device_name': '/dev/sda', - 'ephemerals': [], - 'swap': None, } - return fake - - -def stub_vm_utils_with_vdi_attached(function): - """vm_utils.with_vdi_attached needs to be stubbed out because it - calls down to the filesystem to attach a vdi. This provides a - decorator to handle that. - """ - @functools.wraps(function) - def decorated_function(self, *args, **kwargs): - @contextlib.contextmanager - def fake_vdi_attached(*args, **kwargs): - fake_dev = 'fakedev' - yield fake_dev - - def fake_image_download(*args, **kwargs): - pass - - orig_vdi_attached = vm_utils.vdi_attached - orig_image_download = fake_image._FakeImageService.download - try: - vm_utils.vdi_attached = fake_vdi_attached - fake_image._FakeImageService.download = fake_image_download - return function(self, *args, **kwargs) - finally: - fake_image._FakeImageService.download = orig_image_download - vm_utils.vdi_attached = orig_vdi_attached - - return decorated_function - - -def create_instance_with_system_metadata(context, instance_values): - inst = objects.Instance(context=context, - system_metadata={}) - for k, v in instance_values.items(): - setattr(inst, k, v) - inst.flavor = objects.Flavor.get_by_id(context, - instance_values['instance_type_id']) - inst.old_flavor = None - inst.new_flavor = None - inst.create() - inst.pci_devices = objects.PciDeviceList(objects=[]) - - return inst - - -class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB): - """Unit tests for Volume operations.""" - def setUp(self): - super(XenAPIVolumeTestCase, self).setUp() - self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) - self.fixture.config(disable_process_locking=True, - group='oslo_concurrency') - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - - self.instance = fake_instance.fake_db_instance(name='foo') - - @classmethod - def _make_connection_info(cls): - target_iqn = 'iqn.2010-10.org.openstack:volume-00000001' - return {'driver_volume_type': 'iscsi', - 'data': {'volume_id': 1, - 'target_iqn': target_iqn, - 'target_portal': '127.0.0.1:3260,fake', - 'target_lun': None, - 'auth_method': 'CHAP', - 'auth_username': 'username', - 'auth_password': 'password'}} - - def test_attach_volume(self): - # This shows how to test Ops classes' methods. - stubs.stubout_session(self, stubs.FakeSessionForVolumeTests) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vm = xenapi_fake.create_vm(self.instance['name'], 'Running') - conn_info = self._make_connection_info() - self.assertIsNone( - conn.attach_volume(None, conn_info, self.instance, '/dev/sdc')) - - # check that the VM has a VBD attached to it - # Get XenAPI record for VBD - vbds = xenapi_fake.get_all('VBD') - vbd = xenapi_fake.get_record('VBD', vbds[0]) - vm_ref = vbd['VM'] - self.assertEqual(vm_ref, vm) - - def test_attach_volume_raise_exception(self): - # This shows how to test when exceptions are raised. - stubs.stubout_session(self, stubs.FakeSessionForVolumeFailedTests) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - xenapi_fake.create_vm(self.instance['name'], 'Running') - self.assertRaises(exception.VolumeDriverNotFound, - conn.attach_volume, - None, {'driver_volume_type': 'nonexist'}, - self.instance, '/dev/sdc') - - -# FIXME(sirp): convert this to use XenAPITestBaseNoDB -class XenAPIVMTestCase(stubs.XenAPITestBase, - test_diagnostics.DiagnosticsComparisonMixin): - """Unit tests for VM operations.""" - def setUp(self): - super(XenAPIVMTestCase, self).setUp() - self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) - self.fixture.config(disable_process_locking=True, - group='oslo_concurrency') - self.flags(instance_name_template='%d') - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - db_fakes.stub_out_db_instance_api(self) - xenapi_fake.create_network('fake', 'fake_br1') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.stubout_get_this_vm_uuid() - self.stub_out_vm_methods() - fake_processutils.stub_out_processutils_execute(self) - self.user_id = 'fake' - self.project_id = fakes.FAKE_PROJECT_ID - self.context = context.RequestContext(self.user_id, self.project_id) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.conn._session.is_local_connection = False - - fake_image.stub_out_image_service(self) - set_image_fixtures() - self.stubout_image_service_download() - self.stubout_stream_disk() - - self.stub_out('nova.virt.xenapi.vmops.VMOps._inject_instance_metadata', - lambda self, instance, vm: None) - - def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref): - name_label = "fakenamelabel" - disk_type = "fakedisktype" - virtual_size = 777 - return vm_utils.create_vdi( - session, sr_ref, instance, name_label, disk_type, - virtual_size) - self.stub_out('nova.virt.xenapi.vm_utils._safe_copy_vdi', - fake_safe_copy_vdi) - - def fake_unpause_and_wait(self, vm_ref, instance, power_on): - self._update_last_dom_id(vm_ref) - self.stub_out('nova.virt.xenapi.vmops.VMOps._unpause_and_wait', - fake_unpause_and_wait) - - def tearDown(self): - fake_image.FakeImageService_reset() - super(XenAPIVMTestCase, self).tearDown() - - def stubout_instance_snapshot(self): - self.stub_out('nova.virt.xenapi.vm_utils._fetch_image', - lambda context, session, instance, name_label, - image, type, image_handler: { - 'root': dict(uuid=stubs._make_fake_vdi(), - file=None), - 'kernel': dict(uuid=stubs._make_fake_vdi(), - file=None), - 'ramdisk': dict(uuid=stubs._make_fake_vdi(), - file=None)}) - self.stub_out('nova.virt.xenapi.vm_utils._wait_for_vhd_coalesce', - lambda *args: ("fakeparent", "fakebase")) - - def stubout_image_service_download(self): - self.stub_out('nova.tests.unit.image.fake._FakeImageService.download', - lambda *args, **kwargs: None) - - def stubout_stream_disk(self): - self.stub_out('nova.virt.xenapi.vm_utils._stream_disk', - lambda *args, **kwargs: None) - - def stubout_is_snapshot(self): - """Always returns true - - xenapi fake driver does not create vmrefs for snapshots. - """ - self.stub_out('nova.virt.xenapi.vm_utils.is_snapshot', - lambda *args: True) - - def stubout_fetch_disk_image(self, raise_failure=False): - """Simulates a failure in fetch image_glance_disk.""" - - def _fake_fetch_disk_image(context, session, instance, name_label, - image, image_type): - if raise_failure: - raise XenAPI.Failure("Test Exception raised by " - "fake fetch_image_glance_disk") - elif image_type == vm_utils.ImageType.KERNEL: - filename = "kernel" - elif image_type == vm_utils.ImageType.RAMDISK: - filename = "ramdisk" - else: - filename = "unknown" - - vdi_type = vm_utils.ImageType.to_string(image_type) - return {vdi_type: dict(uuid=None, file=filename)} - - self.stub_out('nova.virt.xenapi.vm_utils._fetch_disk_image', - _fake_fetch_disk_image) - - def stubout_create_vm(self): - """Simulates a failure in create_vm.""" - - def f(*args): - raise XenAPI.Failure("Test Exception raised by fake create_vm") - self.stub_out('nova.virt.xenapi.vm_utils.create_vm', f) - - def stubout_attach_disks(self): - """Simulates a failure in _attach_disks.""" - - def f(*args): - raise XenAPI.Failure("Test Exception raised by fake _attach_disks") - self.stub_out('nova.virt.xenapi.vmops.VMOps._attach_disks', f) - - def stub_out_vm_methods(self): - self.stub_out('nova.virt.xenapi.vmops.VMOps._acquire_bootlock', - lambda self, vm: None) - self.stub_out('nova.virt.xenapi.vmops.VMOps._release_bootlock', - lambda self, vm: None) - self.stub_out('nova.virt.xenapi.vm_utils.generate_ephemeral', - lambda *args: None) - self.stub_out('nova.virt.xenapi.vm_utils._wait_for_device', - lambda session, dev, dom0, max_seconds: None) - - def test_init_host(self): - session = get_session() - vm = vm_utils._get_this_vm_ref(session) - # Local root disk - vdi0 = xenapi_fake.create_vdi('compute', None) - vbd0 = xenapi_fake.create_vbd(vm, vdi0) - # Instance VDI - vdi1 = xenapi_fake.create_vdi('instance-aaaa', None, - other_config={'nova_instance_uuid': 'aaaa'}) - xenapi_fake.create_vbd(vm, vdi1) - # Only looks like instance VDI - vdi2 = xenapi_fake.create_vdi('instance-bbbb', None) - vbd2 = xenapi_fake.create_vbd(vm, vdi2) - - self.conn.init_host(None) - self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2])) - - @mock.patch.object(objects.Instance, 'name', - new=mock.PropertyMock(return_value='foo')) - @mock.patch.object(vm_utils, 'lookup', return_value=True) - def test_instance_exists(self, mock_lookup): - instance = objects.Instance(uuid=uuids.instance) - self.assertTrue(self.conn.instance_exists(instance)) - mock_lookup.assert_called_once_with(mock.ANY, 'foo') - - @mock.patch.object(objects.Instance, 'name', - new=mock.PropertyMock(return_value='bar')) - @mock.patch.object(vm_utils, 'lookup', return_value=None) - def test_instance_not_exists(self, mock_lookup): - instance = objects.Instance(uuid=uuids.instance) - self.assertFalse(self.conn.instance_exists(instance)) - mock_lookup.assert_called_once_with(mock.ANY, 'bar') - - def test_list_instances_0(self): - instances = self.conn.list_instances() - self.assertEqual(instances, []) - - def test_list_instance_uuids_0(self): - instance_uuids = self.conn.list_instance_uuids() - self.assertEqual(instance_uuids, []) - - def test_list_instance_uuids(self): - uuids = [] - for x in range(1, 4): - instance = self._create_instance() - uuids.append(instance['uuid']) - instance_uuids = self.conn.list_instance_uuids() - self.assertEqual(len(uuids), len(instance_uuids)) - self.assertEqual(set(uuids), set(instance_uuids)) - - def test_get_rrd_server(self): - self.flags(connection_url='myscheme://myaddress/', - group='xenserver') - server_info = vm_utils._get_rrd_server() - self.assertEqual(server_info[0], 'myscheme') - self.assertEqual(server_info[1], 'myaddress') - - expected_raw_diagnostics = { - 'vbd_xvdb_write': '0.0', - 'memory_target': '4294967296.0000', - 'memory_internal_free': '1415564.0000', - 'memory': '4294967296.0000', - 'vbd_xvda_write': '0.0', - 'cpu0': '0.0042', - 'vif_0_tx': '287.4134', - 'vbd_xvda_read': '0.0', - 'vif_0_rx': '1816.0144', - 'vif_2_rx': '0.0', - 'vif_2_tx': '0.0', - 'vbd_xvdb_read': '0.0', - 'last_update': '1328795567', - } - - @mock.patch.object(vm_utils, '_get_rrd') - def test_get_diagnostics(self, mock_get_rrd): - def fake_get_rrd(host, vm_uuid): - path = os.path.dirname(os.path.realpath(__file__)) - with open(os.path.join(path, 'vm_rrd.xml')) as f: - return re.sub(r'\s', '', f.read()) - mock_get_rrd.side_effect = fake_get_rrd - - expected = self.expected_raw_diagnostics - instance = self._create_instance() - actual = self.conn.get_diagnostics(instance) - self.assertThat(actual, matchers.DictMatches(expected)) - - def test_get_instance_diagnostics(self): - expected = fake_diagnostics.fake_diagnostics_obj( - config_drive=False, - state='running', - driver='xenapi', - cpu_details=[{'id': 0, 'utilisation': 11}, - {'id': 1, 'utilisation': 22}, - {'id': 2, 'utilisation': 33}, - {'id': 3, 'utilisation': 44}], - nic_details=[{'mac_address': 'DE:AD:BE:EF:00:01', - 'rx_rate': 50, - 'tx_rate': 100}], - disk_details=[{'read_bytes': 50, 'write_bytes': 100}], - memory_details={'maximum': 8192, 'used': 3072}) - - instance = self._create_instance(obj=True) - actual = self.conn.get_instance_diagnostics(instance) - - self.assertDiagnosticsEqual(expected, actual) - - def _test_get_instance_diagnostics_failure(self, **kwargs): - instance = self._create_instance(obj=True) - - with mock.patch.object(xenapi_fake.SessionBase, 'VM_query_data_source', - **kwargs): - actual = self.conn.get_instance_diagnostics(instance) - - expected = fake_diagnostics.fake_diagnostics_obj( - config_drive=False, - state='running', - driver='xenapi', - cpu_details=[{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}], - nic_details=[{'mac_address': 'DE:AD:BE:EF:00:01'}], - disk_details=[{}], - memory_details={'maximum': None, 'used': None}) - - self.assertDiagnosticsEqual(expected, actual) - - def test_get_instance_diagnostics_xenapi_exception(self): - self._test_get_instance_diagnostics_failure( - side_effect=XenAPI.Failure('')) - - def test_get_instance_diagnostics_nan_value(self): - self._test_get_instance_diagnostics_failure( - return_value=float('NaN')) - - def test_get_vnc_console(self): - instance = self._create_instance(obj=True) - session = get_session() - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vm_ref = vm_utils.lookup(session, instance['name']) - - console = conn.get_vnc_console(self.context, instance) - - # Note(sulo): We don't care about session id in test - # they will always differ so strip that out - actual_path = console.internal_access_path.split('&')[0] - expected_path = "/console?ref=%s" % str(vm_ref) - - self.assertEqual(expected_path, actual_path) - - def test_get_vnc_console_for_rescue(self): - instance = self._create_instance(obj=True) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue', - 'Running') - # Set instance state to rescued - instance['vm_state'] = 'rescued' - - console = conn.get_vnc_console(self.context, instance) - - # Note(sulo): We don't care about session id in test - # they will always differ so strip that out - actual_path = console.internal_access_path.split('&')[0] - expected_path = "/console?ref=%s" % str(rescue_vm) - - self.assertEqual(expected_path, actual_path) - - def test_get_vnc_console_instance_not_ready(self): - instance = self._create_instance(obj=True, spawn=False) - instance.vm_state = 'building' - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.InstanceNotFound, - conn.get_vnc_console, self.context, instance) - - def test_get_vnc_console_rescue_not_ready(self): - instance = self._create_instance(obj=True, spawn=False) - instance.vm_state = 'rescued' - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.InstanceNotReady, - conn.get_vnc_console, self.context, instance) - - @mock.patch.object(vm_utils, 'create_vbd') - def test_instance_snapshot_fails_with_no_primary_vdi( - self, mock_create_vbd): - def create_bad_vbd(session, vm_ref, vdi_ref, userdevice, - vbd_type='disk', read_only=False, bootable=False, - osvol=False): - vbd_rec = {'VM': vm_ref, - 'VDI': vdi_ref, - 'userdevice': 'fake', - 'currently_attached': False} - vbd_ref = xenapi_fake._create_object('VBD', vbd_rec) - xenapi_fake.after_VBD_create(vbd_ref, vbd_rec) - return vbd_ref - - mock_create_vbd.side_effect = create_bad_vbd - self.stubout_instance_snapshot() - instance = self._create_instance() - - image_id = "my_snapshot_id" - self.assertRaises(exception.NovaException, self.conn.snapshot, - self.context, instance, image_id, - lambda *args, **kwargs: None) - - @mock.patch.object(glance.GlanceStore, 'upload_image') - def test_instance_snapshot(self, mock_upload_image): - expected_calls = [ - {'args': (), - 'kwargs': - {'task_state': task_states.IMAGE_PENDING_UPLOAD}}, - {'args': (), - 'kwargs': - {'task_state': task_states.IMAGE_UPLOADING, - 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}] - func_call_matcher = matchers.FunctionCallMatcher(expected_calls) - image_id = "my_snapshot_id" - - self.stubout_instance_snapshot() - self.stubout_is_snapshot() - - instance = self._create_instance() - - self.fake_upload_called = False - - def fake_image_upload(ctx, session, inst, img_id, vdi_uuids): - self.fake_upload_called = True - self.assertEqual(ctx, self.context) - self.assertEqual(inst, instance) - self.assertIsInstance(vdi_uuids, list) - self.assertEqual(img_id, image_id) - - mock_upload_image.side_effect = fake_image_upload - - self.conn.snapshot(self.context, instance, image_id, - func_call_matcher.call) - - # Ensure VM was torn down - vm_labels = [] - for vm_ref in xenapi_fake.get_all('VM'): - vm_rec = xenapi_fake.get_record('VM', vm_ref) - if not vm_rec["is_control_domain"]: - vm_labels.append(vm_rec["name_label"]) - - self.assertEqual(vm_labels, [instance['name']]) - - # Ensure VBDs were torn down - vbd_labels = [] - for vbd_ref in xenapi_fake.get_all('VBD'): - vbd_rec = xenapi_fake.get_record('VBD', vbd_ref) - vbd_labels.append(vbd_rec["vm_name_label"]) - - self.assertEqual(vbd_labels, [instance['name']]) - - # Ensure task states changed in correct order - self.assertIsNone(func_call_matcher.match()) - - # Ensure VDIs were torn down - for vdi_ref in xenapi_fake.get_all('VDI'): - vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) - name_label = vdi_rec["name_label"] - self.assertFalse(name_label.endswith('snapshot')) - - self.assertTrue(self.fake_upload_called) - - def create_vm_record(self, conn, os_type, name): - instances = conn.list_instances() - self.assertEqual(instances, [name]) - - # Get Nova record for VM - vm_info = conn.get_info({'name': name}) - # Get XenAPI record for VM - vms = [rec for rec - in xenapi_fake.get_all_records('VM').values() - if not rec['is_control_domain']] - vm = vms[0] - self.vm_info = vm_info - self.vm = vm - - def check_vm_record(self, conn, instance_type_id, check_injection): - flavor = objects.Flavor.get_by_id(self.context, instance_type_id) - mem_kib = int(flavor['memory_mb']) << 10 - mem_bytes = str(mem_kib << 10) - vcpus = flavor['vcpus'] - vcpu_weight = flavor['vcpu_weight'] - - self.assertEqual(self.vm['memory_static_max'], mem_bytes) - self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes) - self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes) - self.assertEqual(self.vm['VCPUs_max'], str(vcpus)) - self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus)) - if vcpu_weight is None: - self.assertEqual(self.vm['VCPUs_params'], {}) - else: - self.assertEqual(self.vm['VCPUs_params'], - {'weight': str(vcpu_weight), 'cap': '0'}) - - # Check that the VM is running according to Nova - self.assertEqual(self.vm_info.state, power_state.RUNNING) - - # Check that the VM is running according to XenAPI. - self.assertEqual(self.vm['power_state'], 'Running') - - if check_injection: - xenstore_data = self.vm['xenstore_data'] - self.assertNotIn('vm-data/hostname', xenstore_data) - key = 'vm-data/networking/DEADBEEF0001' - xenstore_value = xenstore_data[key] - tcpip_data = ast.literal_eval(xenstore_value) - self.assertJsonEqual({'broadcast': '192.168.1.255', - 'dns': ['192.168.1.4', '192.168.1.3'], - 'gateway': '192.168.1.1', - 'gateway_v6': '2001:db8:0:1::1', - 'ip6s': [{'enabled': '1', - 'ip': '2001:db8:0:1:dcad:beff:feef:1', - 'netmask': 64, - 'gateway': '2001:db8:0:1::1'}], - 'ips': [{'enabled': '1', - 'ip': '192.168.1.100', - 'netmask': '255.255.255.0', - 'gateway': '192.168.1.1'}], - 'label': 'test1', - 'mac': 'DE:AD:BE:EF:00:01'}, tcpip_data) - - def check_vm_params_for_windows(self): - self.assertEqual(self.vm['platform']['nx'], 'true') - self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'}) - self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order') - - # check that these are not set - self.assertEqual(self.vm['PV_args'], '') - self.assertEqual(self.vm['PV_bootloader'], '') - self.assertEqual(self.vm['PV_kernel'], '') - self.assertEqual(self.vm['PV_ramdisk'], '') - - def check_vm_params_for_linux(self): - self.assertEqual(self.vm['platform']['nx'], 'false') - self.assertEqual(self.vm['PV_args'], '') - self.assertEqual(self.vm['PV_bootloader'], 'pygrub') - - # check that these are not set - self.assertEqual(self.vm['PV_kernel'], '') - self.assertEqual(self.vm['PV_ramdisk'], '') - self.assertEqual(self.vm['HVM_boot_params'], {}) - self.assertEqual(self.vm['HVM_boot_policy'], '') - - def check_vm_params_for_linux_with_external_kernel(self): - self.assertEqual(self.vm['platform']['nx'], 'false') - self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1') - self.assertNotEqual(self.vm['PV_kernel'], '') - self.assertNotEqual(self.vm['PV_ramdisk'], '') - - # check that these are not set - self.assertEqual(self.vm['HVM_boot_params'], {}) - self.assertEqual(self.vm['HVM_boot_policy'], '') - - def _list_vdis(self): - session = get_session() - return session.call_xenapi('VDI.get_all') - - def _list_vms(self): - session = get_session() - return session.call_xenapi('VM.get_all') - - def _check_vdis(self, start_list, end_list): - for vdi_ref in end_list: - if vdi_ref not in start_list: - vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) - # If the cache is turned on then the base disk will be - # there even after the cleanup - if 'other_config' in vdi_rec: - if 'image-id' not in vdi_rec['other_config']: - self.fail('Found unexpected VDI:%s' % vdi_ref) - else: - self.fail('Found unexpected VDI:%s' % vdi_ref) - - @mock.patch.object(vmops.VMOps, '_inject_instance_metadata') - def _test_spawn(self, image_ref, kernel_id, ramdisk_id, - mock_inject_instance_metadata, - instance_type_id="3", os_type="linux", - hostname="test", architecture="x86-64", instance_id=1, - injected_files=None, check_injection=False, - create_record=True, empty_dns=False, - block_device_info=None, - key_data=None): - if injected_files is None: - injected_files = [] - - if create_record: - flavor = objects.Flavor.get_by_id(self.context, - instance_type_id) - instance = objects.Instance(context=self.context) - instance.project_id = self.project_id - instance.user_id = self.user_id - instance.image_ref = image_ref - instance.kernel_id = kernel_id - instance.ramdisk_id = ramdisk_id - instance.root_gb = flavor.root_gb - instance.ephemeral_gb = flavor.ephemeral_gb - instance.instance_type_id = instance_type_id - instance.os_type = os_type - instance.hostname = hostname - instance.key_data = key_data - instance.architecture = architecture - instance.system_metadata = {} - - instance.flavor = flavor - instance.create() - else: - instance = objects.Instance.get_by_id(self.context, instance_id, - expected_attrs=['flavor']) - - network_info = fake_network.fake_get_instance_nw_info(self) - if empty_dns: - # NOTE(tr3buchet): this is a terrible way to do this... - network_info[0]['network']['subnets'][0]['dns'] = [] - - image_meta = objects.ImageMeta.from_dict( - IMAGE_FIXTURES[image_ref]["image_meta"]) - self.conn.spawn(self.context, instance, image_meta, injected_files, - 'herp', {}, network_info, block_device_info) - self.create_vm_record(self.conn, os_type, instance['name']) - self.check_vm_record(self.conn, instance_type_id, check_injection) - self.assertEqual(instance['os_type'], os_type) - self.assertEqual(instance['architecture'], architecture) - - @mock.patch.object(session.XenAPISession, 'call_plugin_serialized') - @mock.patch.object(vm_utils, 'get_sr_path', return_value='/sr/path') - def test_spawn_ipxe_iso_success(self, mock_get_sr_path, - mock_call_plugin_serialized): - self.flags(ipxe_network_name='test1', - ipxe_boot_menu_url='http://boot.example.com', - ipxe_mkisofs_cmd='/root/mkisofs', - group='xenserver') - - self._test_spawn(IMAGE_IPXE_ISO, None, None) - - mock_get_sr_path.assert_called_once_with(mock.ANY) - mock_call_plugin_serialized.assert_has_calls([ - mock.call('ipxe.py', 'inject', '/sr/path', mock.ANY, - 'http://boot.example.com', '192.168.1.100', - '255.255.255.0', '192.168.1.1', '192.168.1.3', - '/root/mkisofs'), - mock.call('partition_utils.py', 'make_partition', - 'fakedev', '2048', '-')]) - - @mock.patch.object(session.XenAPISession, 'call_plugin_serialized') - def test_spawn_ipxe_iso_no_network_name(self, mock_call_plugin_serialized): - self.flags(ipxe_network_name=None, - ipxe_boot_menu_url='http://boot.example.com', - group='xenserver') - - self._test_spawn(IMAGE_IPXE_ISO, None, None) - self._check_call_plugin_serialized(mock_call_plugin_serialized) - - @mock.patch.object(session.XenAPISession, 'call_plugin_serialized') - def test_spawn_ipxe_iso_no_boot_menu_url( - self, mock_call_plugin_serialized): - self.flags(ipxe_network_name='test1', - ipxe_boot_menu_url=None, - group='xenserver') - - self._test_spawn(IMAGE_IPXE_ISO, None, None) - self._check_call_plugin_serialized(mock_call_plugin_serialized) - - def _check_call_plugin_serialized(self, mock_call_plugin_serialized): - vifs = xenapi_fake.get_all_records('VIF') - iface_id = vifs[list(vifs)[0]]['other_config']['neutron-port-id'] - - def _get_qbr_name(iface_id): - return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN] - - def _get_veth_pair_names(iface_id): - return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN], - ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN]) - - def _get_patch_port_pair_names(iface_id): - return (("vif%s" % iface_id)[:network_model.NIC_NAME_LEN], - ("tap%s" % iface_id)[:network_model.NIC_NAME_LEN]) - - # ipxe inject shouldn't be called - call1 = mock.call('partition_utils.py', 'make_partition', 'fakedev', - '2048', '-') - linux_br_name = _get_qbr_name(iface_id) - qvb_name, qvo_name = _get_veth_pair_names(iface_id) - patch_port1, tap_name = _get_patch_port_pair_names(iface_id) - - args = {'cmd': 'ip_link_get_dev', - 'args': {'device_name': linux_br_name} - } - call2 = mock.call('xenhost.py', 'network_config', args) - - args = {'cmd': 'ip_link_get_dev', - 'args': {'device_name': qvo_name} - } - call3 = mock.call('xenhost.py', 'network_config', args) - - args = {'cmd': 'ip_link_get_dev', - 'args': {'device_name': tap_name} - } - call4 = mock.call('xenhost.py', 'network_config', args) - mock_call_plugin_serialized.assert_has_calls([call1, - call2, - call3, - call4]) - - @mock.patch.object(session.XenAPISession, 'call_plugin_serialized') - def test_spawn_ipxe_iso_unknown_network_name( - self, mock_call_plugin_serialized): - self.flags(ipxe_network_name='test2', - ipxe_boot_menu_url='http://boot.example.com', - group='xenserver') - - self._test_spawn(IMAGE_IPXE_ISO, None, None) - self._check_call_plugin_serialized(mock_call_plugin_serialized) - - def test_spawn_empty_dns(self): - # Test spawning with an empty dns list. - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64", - empty_dns=True) - self.check_vm_params_for_linux() - - def test_spawn_not_enough_memory(self): - self.assertRaises(exception.InsufficientFreeMemory, self._test_spawn, - IMAGE_MACHINE, IMAGE_KERNEL, - IMAGE_RAMDISK, instance_type_id="4") # m1.xlarge - - def test_spawn_fail_cleanup_1(self): - """Simulates an error while downloading an image. - - Verifies that the VM and VDIs created are properly cleaned up. - """ - vdi_recs_start = self._list_vdis() - start_vms = self._list_vms() - self.stubout_fetch_disk_image(raise_failure=True) - self.assertRaises(XenAPI.Failure, self._test_spawn, - IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK) - # No additional VDI should be found. - vdi_recs_end = self._list_vdis() - end_vms = self._list_vms() - self._check_vdis(vdi_recs_start, vdi_recs_end) - # No additional VMs should be found. - self.assertEqual(start_vms, end_vms) - - def test_spawn_fail_cleanup_2(self): - """Simulates an error while creating VM record. - - Verifies that the VM and VDIs created are properly cleaned up. - """ - vdi_recs_start = self._list_vdis() - start_vms = self._list_vms() - self.stubout_create_vm() - self.assertRaises(XenAPI.Failure, self._test_spawn, - IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK) - # No additional VDI should be found. - vdi_recs_end = self._list_vdis() - end_vms = self._list_vms() - self._check_vdis(vdi_recs_start, vdi_recs_end) - # No additional VMs should be found. - self.assertEqual(start_vms, end_vms) - - def test_spawn_fail_cleanup_3(self): - """Simulates an error while attaching disks. - - Verifies that the VM and VDIs created are properly cleaned up. - """ - self.stubout_attach_disks() - vdi_recs_start = self._list_vdis() - start_vms = self._list_vms() - self.assertRaises(XenAPI.Failure, self._test_spawn, - IMAGE_MACHINE, IMAGE_KERNEL, IMAGE_RAMDISK) - # No additional VDI should be found. - vdi_recs_end = self._list_vdis() - end_vms = self._list_vms() - self._check_vdis(vdi_recs_start, vdi_recs_end) - # No additional VMs should be found. - self.assertEqual(start_vms, end_vms) - - def test_spawn_raw_glance(self): - self._test_spawn(IMAGE_RAW, None, None, os_type=None) - self.check_vm_params_for_windows() - - def test_spawn_vhd_glance_linux(self): - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64") - self.check_vm_params_for_linux() - - @mock.patch('nova.privsep.fs.mkfs') - def test_spawn_vhd_glance_windows(self, fake_mkfs): - self._test_spawn(IMAGE_VHD, None, None, - os_type="windows", architecture="i386", - instance_type_id=5) - self.check_vm_params_for_windows() - - def test_spawn_iso_glance(self): - self._test_spawn(IMAGE_ISO, None, None, - os_type="windows", architecture="i386") - self.check_vm_params_for_windows() - - @mock.patch.object(vm_utils, '_fetch_disk_image') - def test_spawn_glance(self, mock_fetch_disk_image): - - def fake_fetch_disk_image(context, session, instance, name_label, - image_id, image_type): - sr_ref = vm_utils.safe_find_sr(session) - image_type_str = vm_utils.ImageType.to_string(image_type) - vdi_ref = vm_utils.create_vdi(session, sr_ref, instance, - name_label, image_type_str, "20") - vdi_role = vm_utils.ImageType.get_role(image_type) - vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) - return {vdi_role: dict(uuid=vdi_uuid, file=None)} - mock_fetch_disk_image.side_effect = fake_fetch_disk_image - - self._test_spawn(IMAGE_MACHINE, - IMAGE_KERNEL, - IMAGE_RAMDISK) - self.check_vm_params_for_linux_with_external_kernel() - - def test_spawn_boot_from_volume_no_glance_image_meta(self): - dev_info = get_fake_device_info() - self._test_spawn(IMAGE_FROM_VOLUME, None, None, - block_device_info=dev_info) - - def test_spawn_boot_from_volume_with_image_meta(self): - dev_info = get_fake_device_info() - self._test_spawn(IMAGE_VHD, None, None, - block_device_info=dev_info) - - @testtools.skipIf(test_utils.is_osx(), - 'IPv6 pretty-printing broken on OSX, see bug 1409135') - @mock.patch.object(nova.privsep.path, 'readlink') - @mock.patch.object(nova.privsep.path, 'writefile') - @mock.patch.object(nova.privsep.path, 'makedirs') - @mock.patch.object(nova.privsep.path, 'chown') - @mock.patch.object(nova.privsep.path, 'chmod') - @mock.patch.object(nova.privsep.fs, 'mount', return_value=(None, None)) - @mock.patch.object(nova.privsep.fs, 'umount') - def test_spawn_netinject_file(self, umount, mount, chmod, chown, mkdir, - write_file, read_link): - self.flags(flat_injected=True) - db_fakes.stub_out_db_instance_api(self, injected=True) - - self._test_spawn(IMAGE_MACHINE, - IMAGE_KERNEL, - IMAGE_RAMDISK, - check_injection=True) - read_link.assert_called() - mkdir.assert_called() - chown.assert_called() - chmod.assert_called() - write_file.assert_called() - - @testtools.skipIf(test_utils.is_osx(), - 'IPv6 pretty-printing broken on OSX, see bug 1409135') - def test_spawn_netinject_xenstore(self): - db_fakes.stub_out_db_instance_api(self, injected=True) - - self._tee_executed = False - - def _mount_handler(cmd, *ignore_args, **ignore_kwargs): - # When mounting, create real files under the mountpoint to simulate - # files in the mounted filesystem - - # mount point will be the last item of the command list - self._tmpdir = cmd[len(cmd) - 1] - LOG.debug('Creating files in %s to simulate guest agent', - self._tmpdir) - os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin')) - # Touch the file using open - open(os.path.join(self._tmpdir, 'usr', 'sbin', - 'xe-update-networking'), 'w').close() - return '', '' - - def _umount_handler(cmd, *ignore_args, **ignore_kwargs): - # Umount would normally make files in the mounted filesystem - # disappear, so do that here - LOG.debug('Removing simulated guest agent files in %s', - self._tmpdir) - os.remove(os.path.join(self._tmpdir, 'usr', 'sbin', - 'xe-update-networking')) - os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin')) - os.rmdir(os.path.join(self._tmpdir, 'usr')) - return '', '' - - def _tee_handler(cmd, *ignore_args, **ignore_kwargs): - self._tee_executed = True - return '', '' - - fake_processutils.fake_execute_set_repliers([ - (r'mount', _mount_handler), - (r'umount', _umount_handler), - (r'tee.*interfaces', _tee_handler)]) - self._test_spawn(IMAGE_MACHINE, IMAGE_KERNEL, - IMAGE_RAMDISK, check_injection=True) - - # tee must not run in this case, where an injection-capable - # guest agent is detected - self.assertFalse(self._tee_executed) - - @mock.patch.object(vmops.VMOps, '_inject_auto_disk_config') - def test_spawn_injects_auto_disk_config_to_xenstore( - self, mock_inject_auto_disk_config): - instance = self._create_instance(spawn=False, obj=True) - image_meta = objects.ImageMeta.from_dict( - IMAGE_FIXTURES[IMAGE_MACHINE]["image_meta"]) - self.conn.spawn(self.context, instance, image_meta, [], 'herp', {}, '') - - mock_inject_auto_disk_config.assert_called_once_with(instance, - mock.ANY) - - def test_spawn_with_network_qos(self): - self._create_instance() - for vif_ref in xenapi_fake.get_all('VIF'): - vif_rec = xenapi_fake.get_record('VIF', vif_ref) - self.assertEqual(vif_rec['qos_algorithm_type'], '') - self.assertEqual(vif_rec['qos_algorithm_params'], {}) - - @mock.patch.object(crypto, 'ssh_encrypt_text') - @mock.patch.object(stubs.FakeSessionForVMTests, - '_plugin_agent_inject_file') - def test_spawn_ssh_key_injection(self, mock_plugin_agent_inject_file, - mock_ssh_encrypt_text): - # Test spawning with key_data on an instance. Should use - # agent file injection. - self.flags(use_agent_default=True, - group='xenserver') - actual_injected_files = [] - - def fake_inject_file(method, args): - path = base64.b64decode(args['b64_path']) - contents = base64.b64decode(args['b64_contents']) - actual_injected_files.append((path, contents)) - return jsonutils.dumps({'returncode': '0', 'message': 'success'}) - mock_plugin_agent_inject_file.side_effect = fake_inject_file - - def fake_encrypt_text(sshkey, new_pass): - self.assertEqual("ssh-rsa fake_keydata", sshkey) - return "fake" - mock_ssh_encrypt_text.side_effect = fake_encrypt_text - - expected_data = (b'\n# The following ssh key was injected by ' - b'Nova\nssh-rsa fake_keydata\n') - - injected_files = [(b'/root/.ssh/authorized_keys', expected_data)] - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64", - key_data='ssh-rsa fake_keydata') - self.assertEqual(actual_injected_files, injected_files) - - @mock.patch.object(crypto, 'ssh_encrypt_text', - side_effect=NotImplementedError("Should not be called")) - @mock.patch.object(stubs.FakeSessionForVMTests, - '_plugin_agent_inject_file') - def test_spawn_ssh_key_injection_non_rsa( - self, mock_plugin_agent_inject_file, mock_ssh_encrypt_text): - # Test spawning with key_data on an instance. Should use - # agent file injection. - self.flags(use_agent_default=True, - group='xenserver') - actual_injected_files = [] - - def fake_inject_file(method, args): - path = base64.b64decode(args['b64_path']) - contents = base64.b64decode(args['b64_contents']) - actual_injected_files.append((path, contents)) - return jsonutils.dumps({'returncode': '0', 'message': 'success'}) - mock_plugin_agent_inject_file.side_effect = fake_inject_file - - expected_data = (b'\n# The following ssh key was injected by ' - b'Nova\nssh-dsa fake_keydata\n') - - injected_files = [(b'/root/.ssh/authorized_keys', expected_data)] - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64", - key_data='ssh-dsa fake_keydata') - self.assertEqual(actual_injected_files, injected_files) - - @mock.patch.object(stubs.FakeSessionForVMTests, - '_plugin_agent_inject_file') - def test_spawn_injected_files(self, mock_plugin_agent_inject_file): - # Test spawning with injected_files. - self.flags(use_agent_default=True, - group='xenserver') - actual_injected_files = [] - - def fake_inject_file(method, args): - path = base64.b64decode(args['b64_path']) - contents = base64.b64decode(args['b64_contents']) - actual_injected_files.append((path, contents)) - return jsonutils.dumps({'returncode': '0', 'message': 'success'}) - mock_plugin_agent_inject_file.side_effect = fake_inject_file - - injected_files = [(b'/tmp/foo', b'foobar')] - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64", - injected_files=injected_files) - self.check_vm_params_for_linux() - self.assertEqual(actual_injected_files, injected_files) - - @mock.patch('nova.db.api.agent_build_get_by_triple') - def test_spawn_agent_upgrade(self, mock_get): - self.flags(use_agent_default=True, - group='xenserver') - - mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64", - "hypervisor": "xen", "os": "windows", - "url": "url", "md5hash": "asdf", - 'created_at': None, 'updated_at': None, - 'deleted_at': None, 'deleted': False, - 'id': 1} - - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64") - - @mock.patch('nova.db.api.agent_build_get_by_triple') - def test_spawn_agent_upgrade_fails_silently(self, mock_get): - mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64", - "hypervisor": "xen", "os": "windows", - "url": "url", "md5hash": "asdf", - 'created_at': None, 'updated_at': None, - 'deleted_at': None, 'deleted': False, - 'id': 1} - - self._test_spawn_fails_silently_with(exception.AgentError, - method="_plugin_agent_agentupdate", failure="fake_error") - - @mock.patch.object(stubs.FakeSessionForVMTests, - '_plugin_agent_resetnetwork') - def test_spawn_with_resetnetwork_alternative_returncode( - self, mock_plugin_agent_resetnetwork): - self.flags(use_agent_default=True, - group='xenserver') - - def fake_resetnetwork(method, args): - # NOTE(johngarbutt): as returned by FreeBSD and Gentoo - return jsonutils.dumps({'returncode': '500', - 'message': 'success'}) - mock_plugin_agent_resetnetwork.side_effect = fake_resetnetwork - - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64") - self.assertTrue(mock_plugin_agent_resetnetwork.called) - - @mock.patch.object(compute_utils, 'add_instance_fault_from_exc') - def _test_spawn_fails_silently_with(self, expected_exception_cls, - mock_add_instance_fault_from_exc, - method="_plugin_agent_version", - failure=None, value=None): - self.flags(use_agent_default=True, - agent_version_timeout=0, - group='xenserver') - - def fake_agent_call(method, args): - if failure: - raise XenAPI.Failure([failure]) - else: - return value - - with mock.patch.object(stubs.FakeSessionForVMTests, method, - side_effect=fake_agent_call): - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64") - mock_add_instance_fault_from_exc.assert_called_once_with( - mock.ANY, mock.ANY, test.MatchType(expected_exception_cls), - exc_info=mock.ANY) - - def test_spawn_fails_silently_with_agent_timeout(self): - self._test_spawn_fails_silently_with(exception.AgentTimeout, - failure="TIMEOUT:fake") - - def test_spawn_fails_silently_with_agent_not_implemented(self): - self._test_spawn_fails_silently_with(exception.AgentNotImplemented, - failure="NOT IMPLEMENTED:fake") - - def test_spawn_fails_silently_with_agent_error(self): - self._test_spawn_fails_silently_with(exception.AgentError, - failure="fake_error") - - def test_spawn_fails_silently_with_agent_bad_return(self): - error = jsonutils.dumps({'returncode': -1, 'message': 'fake'}) - self._test_spawn_fails_silently_with(exception.AgentError, - value=error) - - def test_spawn_sets_last_dom_id(self): - self._test_spawn(IMAGE_VHD, None, None, - os_type="linux", architecture="x86-64") - self.assertEqual(self.vm['domid'], - self.vm['other_config']['last_dom_id']) - - def test_rescue(self): - instance = self._create_instance(spawn=False, obj=True) - xenapi_fake.create_vm(instance['name'], 'Running') - - session = get_session() - vm_ref = vm_utils.lookup(session, instance['name']) - - swap_vdi_ref = xenapi_fake.create_vdi('swap', None) - root_vdi_ref = xenapi_fake.create_vdi('root', None) - eph1_vdi_ref = xenapi_fake.create_vdi('eph', None) - eph2_vdi_ref = xenapi_fake.create_vdi('eph', None) - vol_vdi_ref = xenapi_fake.create_vdi('volume', None) - - xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2) - xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0) - xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4) - xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5) - xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6, - other_config={'osvol': True}) - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - image_meta = objects.ImageMeta.from_dict( - {'id': IMAGE_VHD, - 'disk_format': 'vhd', - 'properties': {'vm_mode': 'xen'}}) - conn.rescue(self.context, instance, [], image_meta, '', None) - - vm = xenapi_fake.get_record('VM', vm_ref) - rescue_name = "%s-rescue" % vm["name_label"] - rescue_ref = vm_utils.lookup(session, rescue_name) - rescue_vm = xenapi_fake.get_record('VM', rescue_ref) - - vdi_refs = {} - for vbd_ref in rescue_vm['VBDs']: - vbd = xenapi_fake.get_record('VBD', vbd_ref) - vdi_refs[vbd['VDI']] = vbd['userdevice'] - - self.assertEqual('1', vdi_refs[root_vdi_ref]) - self.assertEqual('2', vdi_refs[swap_vdi_ref]) - self.assertEqual('4', vdi_refs[eph1_vdi_ref]) - self.assertEqual('5', vdi_refs[eph2_vdi_ref]) - self.assertNotIn(vol_vdi_ref, vdi_refs) - - def test_rescue_preserve_disk_on_failure(self): - # test that the original disk is preserved if rescue setup fails - # bug #1227898 - instance = self._create_instance(obj=True) - session = get_session() - image_meta = objects.ImageMeta.from_dict( - {'id': IMAGE_VHD, - 'disk_format': 'vhd', - 'properties': {'vm_mode': 'xen'}}) - vm_ref = vm_utils.lookup(session, instance['name']) - vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref) - - # raise an error in the spawn setup process and trigger the - # undo manager logic: - with mock.patch.object( - self.conn._vmops, '_start', - side_effect=test.TestingException('Start Error')): - self.assertRaises(test.TestingException, self.conn.rescue, - self.context, instance, [], image_meta, '', []) - - # confirm original disk still exists: - vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, - vm_ref) - self.assertEqual(vdi_ref, vdi_ref2) - self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid']) - - def test_unrescue(self): - instance = self._create_instance(obj=True) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - # Unrescue expects the original instance to be powered off - conn.power_off(instance) - xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running') - conn.unrescue(self.context, instance) - - def test_unrescue_not_in_rescue(self): - instance = self._create_instance(obj=True) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - # Ensure that it will not unrescue a non-rescued instance. - self.assertRaises(exception.InstanceNotInRescueMode, - conn.unrescue, self.context, instance) - - def test_finish_revert_migration(self): - instance = self._create_instance() - - class VMOpsMock(object): - - def __init__(self): - self.finish_revert_migration_called = False - - def finish_revert_migration(self, context, instance, block_info, - power_on): - self.finish_revert_migration_called = True - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn._vmops = VMOpsMock() - conn.finish_revert_migration(self.context, instance, None, None) - self.assertTrue(conn._vmops.finish_revert_migration_called) - - def test_reboot_hard(self): - instance = self._create_instance() - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn.reboot(self.context, instance, None, "HARD") - - @mock.patch.object(compute_api.API, 'reboot') - def test_poll_rebooting_instances(self, mock_reboot): - instance = self._create_instance() - instances = [instance] - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn.poll_rebooting_instances(60, instances) - - mock_reboot.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY) - - def test_reboot_soft(self): - instance = self._create_instance() - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn.reboot(self.context, instance, None, "SOFT") - - def test_reboot_halted(self): - session = get_session() - instance = self._create_instance(spawn=False) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - xenapi_fake.create_vm(instance['name'], 'Halted') - conn.reboot(self.context, instance, None, "SOFT") - vm_ref = vm_utils.lookup(session, instance['name']) - vm = xenapi_fake.get_record('VM', vm_ref) - self.assertEqual(vm['power_state'], 'Running') - - def test_reboot_unknown_state(self): - instance = self._create_instance(spawn=False) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - xenapi_fake.create_vm(instance['name'], 'Unknown') - self.assertRaises(XenAPI.Failure, conn.reboot, self.context, - instance, None, "SOFT") - - def test_reboot_rescued(self): - instance = self._create_instance() - instance['vm_state'] = vm_states.RESCUED - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - real_result = vm_utils.lookup(conn._session, instance['name']) - - with mock.patch.object(vm_utils, 'lookup', - return_value=real_result) as mock_lookup: - conn.reboot(self.context, instance, None, "SOFT") - mock_lookup.assert_called_once_with(conn._session, - instance['name'], True) - - def test_get_console_output_succeeds(self): - def fake_get_console_output(instance): - self.assertEqual("instance", instance) - return "console_log" - - with mock.patch.object(self.conn._vmops, 'get_console_output', - side_effect=fake_get_console_output): - self.assertEqual( - self.conn.get_console_output('context', "instance"), - "console_log") - - def _test_maintenance_mode(self, find_host, find_aggregate): - real_call_xenapi = self.conn._session.call_xenapi - instance = self._create_instance(spawn=True) - api_calls = {} - - # Record all the xenapi calls, and return a fake list of hosts - # for the host.get_all call - def fake_call_xenapi(self, method, *args): - api_calls[method] = args - if method == 'host.get_all': - return ['foo', 'bar', 'baz'] - return real_call_xenapi(method, *args) - self.stub_out('os_xenapi.client.session.XenAPISession.call_xenapi', - fake_call_xenapi) - - def fake_aggregate_get(context, host, key): - if find_aggregate: - return [test_aggregate.fake_aggregate] - else: - return [] - self.stub_out('nova.objects.aggregate._get_by_host_from_db', - fake_aggregate_get) - - def fake_host_find(context, session, src, dst): - if find_host: - return 'bar' - else: - raise exception.NoValidHost("I saw this one coming...") - self.stub_out('nova.virt.xenapi.host._host_find', fake_host_find) - - result = self.conn.host_maintenance_mode('bar', 'on_maintenance') - self.assertEqual(result, 'on_maintenance') - - # We expect the VM.pool_migrate call to have been called to - # migrate our instance to the 'bar' host - vm_ref = vm_utils.lookup(self.conn._session, instance['name']) - host_ref = "foo" - expected = (vm_ref, host_ref, {"live": "true"}) - self.assertEqual(api_calls.get('VM.pool_migrate'), expected) - - instance = db.instance_get_by_uuid(self.context, instance['uuid']) - self.assertEqual(instance['vm_state'], vm_states.ACTIVE) - self.assertEqual(instance['task_state'], task_states.MIGRATING) - - def test_maintenance_mode(self): - self._test_maintenance_mode(True, True) - - def test_maintenance_mode_no_host(self): - self.assertRaises(exception.NoValidHost, - self._test_maintenance_mode, False, True) - - def test_maintenance_mode_no_aggregate(self): - self.assertRaises(exception.NotFound, - self._test_maintenance_mode, True, False) - - @mock.patch.object(db, 'instance_get_all_by_host') - def test_uuid_find(self, mock_instance_get_all_by_host): - fake_inst = fake_instance.fake_db_instance(id=123) - fake_inst2 = fake_instance.fake_db_instance(id=456) - mock_instance_get_all_by_host.return_value = [fake_inst, fake_inst2] - expected_name = CONF.instance_name_template % fake_inst['id'] - - inst_uuid = host._uuid_find(self.context, fake_inst['host'], - expected_name) - - self.assertEqual(inst_uuid, fake_inst['uuid']) - mock_instance_get_all_by_host.assert_called_once_with( - self.context, fake_inst['host'], columns_to_join=None) - - def test_per_instance_usage_running(self): - instance = self._create_instance(spawn=True) - flavor = objects.Flavor.get_by_id(self.context, 3) - - expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'], - 'uuid': instance['uuid']}} - actual = self.conn.get_per_instance_usage() - self.assertEqual(expected, actual) - - # Paused instances still consume resources: - self.conn.pause(instance) - actual = self.conn.get_per_instance_usage() - self.assertEqual(expected, actual) - - def test_per_instance_usage_suspended(self): - # Suspended instances do not consume memory: - instance = self._create_instance(spawn=True) - self.conn.suspend(self.context, instance) - actual = self.conn.get_per_instance_usage() - self.assertEqual({}, actual) - - def test_per_instance_usage_halted(self): - instance = self._create_instance(spawn=True, obj=True) - self.conn.power_off(instance) - actual = self.conn.get_per_instance_usage() - self.assertEqual({}, actual) - - def _create_instance(self, spawn=True, obj=False, **attrs): - """Creates and spawns a test instance.""" - instance_values = { - 'uuid': uuidutils.generate_uuid(), - 'display_name': 'host-', - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': IMAGE_MACHINE, - 'kernel_id': IMAGE_KERNEL, - 'ramdisk_id': IMAGE_RAMDISK, - 'root_gb': 80, - 'ephemeral_gb': 0, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'vm_mode': 'hvm', - 'architecture': 'x86-64'} - instance_values.update(attrs) - - instance = create_instance_with_system_metadata(self.context, - instance_values) - network_info = fake_network.fake_get_instance_nw_info(self) - image_meta = objects.ImageMeta.from_dict( - {'id': uuids.image_id, - 'disk_format': 'vhd'}) - if spawn: - self.conn.spawn(self.context, instance, image_meta, [], 'herp', - {}, network_info) - if obj: - return instance - return base.obj_to_primitive(instance) - - @mock.patch.object(vm_utils, 'destroy_kernel_ramdisk') - @mock.patch.object(vm_utils, 'lookup_kernel_ramdisk', - return_value=('kernel', 'ramdisk')) - def test_destroy_clean_up_kernel_and_ramdisk( - self, mock_lookup_kernel_ramdisk, mock_destroy_kernel_ramdisk): - def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk): - self.assertEqual("kernel", kernel) - self.assertEqual("ramdisk", ramdisk) - - mock_destroy_kernel_ramdisk.side_effect = fake_destroy_kernel_ramdisk - - instance = self._create_instance(spawn=True, obj=True) - network_info = fake_network.fake_get_instance_nw_info(self) - self.conn.destroy(self.context, instance, network_info) - - vm_ref = vm_utils.lookup(self.conn._session, instance['name']) - self.assertIsNone(vm_ref) - self.assertTrue(mock_destroy_kernel_ramdisk.called) - - -class XenAPIDiffieHellmanTestCase(test.NoDBTestCase): - """Unit tests for Diffie-Hellman code.""" - def setUp(self): - super(XenAPIDiffieHellmanTestCase, self).setUp() - self.alice = agent.SimpleDH() - self.bob = agent.SimpleDH() - - def test_shared(self): - alice_pub = self.alice.get_public() - bob_pub = self.bob.get_public() - alice_shared = self.alice.compute_shared(bob_pub) - bob_shared = self.bob.compute_shared(alice_pub) - self.assertEqual(alice_shared, bob_shared) - - def _test_encryption(self, message): - enc = self.alice.encrypt(message) - self.assertFalse(enc.endswith('\n')) - dec = self.bob.decrypt(enc) - self.assertEqual(dec, message) - - def test_encrypt_simple_message(self): - self._test_encryption('This is a simple message.') - - def test_encrypt_message_with_newlines_at_end(self): - self._test_encryption('This message has a newline at the end.\n') - - def test_encrypt_many_newlines_at_end(self): - self._test_encryption('Message with lotsa newlines.\n\n\n') - - def test_encrypt_newlines_inside_message(self): - self._test_encryption('Message\nwith\ninterior\nnewlines.') - - def test_encrypt_with_leading_newlines(self): - self._test_encryption('\n\nMessage with leading newlines.') - - def test_encrypt_really_long_message(self): - self._test_encryption(''.join(['abcd' for i in range(1024)])) - - -# FIXME(sirp): convert this to use XenAPITestBaseNoDB -class XenAPIMigrateInstance(stubs.XenAPITestBase): - """Unit test for verifying migration-related actions.""" - - REQUIRES_LOCKING = True - - def setUp(self): - super(XenAPIMigrateInstance, self).setUp() - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - db_fakes.stub_out_db_instance_api(self) - xenapi_fake.create_network('fake', 'fake_br1') - self.user_id = 'fake' - self.project_id = 'fake' - self.context = context.RequestContext(self.user_id, self.project_id) - self.instance_values = { - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': IMAGE_MACHINE, - 'kernel_id': None, - 'ramdisk_id': None, - 'root_gb': 80, - 'ephemeral_gb': 0, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - - migration_values = { - 'source_compute': 'nova-compute', - 'dest_compute': 'nova-compute', - 'dest_host': '10.127.5.114', - 'status': 'post-migrating', - 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7', - 'old_instance_type_id': 5, - 'new_instance_type_id': 1 - } - self.migration = db.migration_create( - context.get_admin_context(), migration_values) - - fake_processutils.stub_out_processutils_execute(self) - self.stub_out_migration_methods() - self.stubout_get_this_vm_uuid() - - def fake_inject_instance_metadata(self, instance, vm): - pass - self.stub_out('nova.virt.xenapi.vmops.VMOps._inject_instance_metadata', - fake_inject_instance_metadata) - - def fake_unpause_and_wait(self, vm_ref, instance, power_on): - pass - self.stub_out('nova.virt.xenapi.vmops.VMOps._unpause_and_wait', - fake_unpause_and_wait) - - def stub_out_migration_methods(self): - fakesr = xenapi_fake.create_sr() - - def fake_import_all_migrated_disks(session, instance, - import_root=True): - vdi_ref = xenapi_fake.create_vdi(instance['name'], fakesr) - vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) - vdi_rec['other_config']['nova_disk_type'] = 'root' - return {"root": {'uuid': vdi_rec['uuid'], 'ref': vdi_ref}, - "ephemerals": {}} - - def fake_get_vdi(session, vm_ref, userdevice='0'): - vdi_ref_parent = xenapi_fake.create_vdi('derp-parent', fakesr) - vdi_rec_parent = xenapi_fake.get_record('VDI', vdi_ref_parent) - vdi_ref = fake.create_vdi('derp', fakesr, - sm_config={'vhd-parent': vdi_rec_parent['uuid']}) - vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) - return vdi_ref, vdi_rec - - self.stub_out('nova.virt.xenapi.vmops.VMOps._destroy', - lambda *args, **kwargs: None) - self.stub_out('nova.virt.xenapi.vmops.VMOps.' - '_wait_for_instance_to_start', - lambda self, *args: None) - self.stub_out('nova.virt.xenapi.vm_utils.import_all_migrated_disks', - fake_import_all_migrated_disks) - self.stub_out('nova.virt.xenapi.vm_utils.scan_default_sr', - lambda session, *args: fakesr) - self.stub_out('nova.virt.xenapi.vm_utils.get_vdi_for_vm_safely', - fake_get_vdi) - self.stub_out('nova.virt.xenapi.vm_utils.get_sr_path', - lambda *args: 'fake') - self.stub_out('nova.virt.xenapi.vm_utils.generate_ephemeral', - lambda *args: None) - - def _create_instance(self, **kw): - values = self.instance_values.copy() - values.update(kw) - instance = objects.Instance(context=self.context, **values) - instance.flavor = objects.Flavor(root_gb=80, - ephemeral_gb=0) - instance.create() - return instance - - @mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up') - @mock.patch.object(vm_utils, 'get_sr_path') - @mock.patch.object(vm_utils, 'lookup') - @mock.patch.object(volume_utils, 'is_booted_from_volume') - def test_migrate_disk_and_power_off(self, mock_boot_from_volume, - mock_lookup, mock_sr_path, - mock_migrate): - instance = self._create_instance() - xenapi_fake.create_vm(instance['name'], 'Running') - flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80, - ephemeral_gb=0) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - mock_boot_from_volume.return_value = True - mock_lookup.return_value = 'fake_vm_ref' - mock_sr_path.return_value = 'fake_sr_path' - conn.migrate_disk_and_power_off(self.context, instance, - '127.0.0.1', flavor, None) - mock_lookup.assert_called_once_with(conn._session, instance['name'], - False) - mock_sr_path.assert_called_once_with(conn._session) - mock_migrate.assert_called_once_with(self.context, instance, - '127.0.0.1', 'fake_vm_ref', - 'fake_sr_path') - - def test_migrate_disk_and_power_off_passes_exceptions(self): - instance = self._create_instance() - xenapi_fake.create_vm(instance['name'], 'Running') - flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80, - ephemeral_gb=0) - - def fake_raise(*args, **kwargs): - raise exception.MigrationError(reason='test failure') - self.stub_out( - 'nova.virt.xenapi.vmops.VMOps._migrate_disk_resizing_up', - fake_raise) - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.MigrationError, - conn.migrate_disk_and_power_off, - self.context, instance, - '127.0.0.1', flavor, None) - - def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self): - instance = self._create_instance() - flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0, - ephemeral_gb=0) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.ResizeError, - conn.migrate_disk_and_power_off, - self.context, instance, - 'fake_dest', flavor, None) - - @mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up') - @mock.patch.object(vm_utils, 'get_sr_path') - @mock.patch.object(vm_utils, 'lookup') - @mock.patch.object(volume_utils, 'is_booted_from_volume') - def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works( - self, mock_boot_from_volume, mock_lookup, mock_sr_path, - mock_migrate): - flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0, - ephemeral_gb=0) - instance = self._create_instance(root_gb=0, ephemeral_gb=0) - instance.flavor.root_gb = 0 - instance.flavor.ephemeral_gb = 0 - xenapi_fake.create_vm(instance['name'], 'Running') - mock_boot_from_volume.return_value = True - mock_lookup.return_value = 'fake_vm_ref' - mock_sr_path.return_value = 'fake_sr_path' - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - conn.migrate_disk_and_power_off(self.context, instance, - '127.0.0.1', flavor, None) - mock_lookup.assert_called_once_with(conn._session, instance['name'], - False) - mock_sr_path.assert_called_once_with(conn._session) - mock_migrate.assert_called_once_with(self.context, instance, - '127.0.0.1', 'fake_vm_ref', - 'fake_sr_path') - - def _test_revert_migrate(self, power_on): - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - self.called = False - self.fake_vm_start_called = False - self.fake_finish_revert_migration_called = False - context = 'fake_context' - - def fake_vm_start(*args, **kwargs): - self.fake_vm_start_called = True - - def fake_vdi_resize(*args, **kwargs): - self.called = True - - def fake_finish_revert_migration(*args, **kwargs): - self.fake_finish_revert_migration_called = True - - self.stub_out( - 'nova.tests.unit.virt.xenapi.stubs.FakeSessionForVMTests' - '.VDI_resize_online', fake_vdi_resize) - self.stub_out('nova.virt.xenapi.vmops.VMOps._start', fake_vm_start) - self.stub_out('nova.virt.xenapi.vmops.VMOps.finish_revert_migration', - fake_finish_revert_migration) - stubs.stubout_session(self, stubs.FakeSessionForVMTests, - product_version=(4, 0, 0), - product_brand='XenServer') - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = fake_network.fake_get_instance_nw_info(self) - image_meta = objects.ImageMeta.from_dict( - {'id': instance['image_ref'], 'disk_format': 'vhd'}) - base = xenapi_fake.create_vdi('hurr', 'fake') - base_uuid = xenapi_fake.get_record('VDI', base)['uuid'] - cow = xenapi_fake.create_vdi('durr', 'fake') - cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid'] - conn.finish_migration(self.context, self.migration, instance, - dict(base_copy=base_uuid, cow=cow_uuid), - network_info, image_meta, resize_instance=True, - allocations={}, block_device_info=None, - power_on=power_on) - self.assertTrue(self.called) - self.assertEqual(self.fake_vm_start_called, power_on) - - conn.finish_revert_migration(context, instance, network_info, - self.migration) - self.assertTrue(self.fake_finish_revert_migration_called) - - def test_revert_migrate_power_on(self): - self._test_revert_migrate(True) - - def test_revert_migrate_power_off(self): - self._test_revert_migrate(False) - - def _test_finish_migrate(self, power_on): - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - self.called = False - self.fake_vm_start_called = False - - def fake_vm_start(*args, **kwargs): - self.fake_vm_start_called = True - - def fake_vdi_resize(*args, **kwargs): - self.called = True - - self.stub_out('nova.virt.xenapi.vmops.VMOps._start', fake_vm_start) - self.stub_out('nova.tests.unit.virt.xenapi.stubs' - '.FakeSessionForVMTests.VDI_resize_online', - fake_vdi_resize) - stubs.stubout_session(self, stubs.FakeSessionForVMTests, - product_version=(4, 0, 0), - product_brand='XenServer') - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = fake_network.fake_get_instance_nw_info(self) - image_meta = objects.ImageMeta.from_dict( - {'id': instance['image_ref'], 'disk_format': 'vhd'}) - conn.finish_migration(self.context, self.migration, instance, - dict(base_copy='hurr', cow='durr'), - network_info, image_meta, resize_instance=True, - allocations={}, block_device_info=None, - power_on=power_on) - self.assertTrue(self.called) - self.assertEqual(self.fake_vm_start_called, power_on) - - def test_finish_migrate_power_on(self): - self._test_finish_migrate(True) - - def test_finish_migrate_power_off(self): - self._test_finish_migrate(False) - - def test_finish_migrate_no_local_storage(self): - values = copy.copy(self.instance_values) - values["root_gb"] = 0 - values["ephemeral_gb"] = 0 - instance = create_instance_with_system_metadata(self.context, values) - instance.flavor.root_gb = 0 - instance.flavor.ephemeral_gb = 0 - - def fake_vdi_resize(*args, **kwargs): - raise Exception("This shouldn't be called") - - self.stub_out('nova.tests.unit.virt.xenapi.stubs' - '.FakeSessionForVMTests.VDI_resize_online', - fake_vdi_resize) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = fake_network.fake_get_instance_nw_info(self) - image_meta = objects.ImageMeta.from_dict( - {'id': instance['image_ref'], 'disk_format': 'vhd'}) - conn.finish_migration(self.context, self.migration, instance, - dict(base_copy='hurr', cow='durr'), - network_info, image_meta, resize_instance=True, - allocations={}) - - def test_finish_migrate_no_resize_vdi(self): - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - - def fake_vdi_resize(*args, **kwargs): - raise Exception("This shouldn't be called") - - self.stub_out('nova.tests.unit.virt.xenapi.stubs' - '.FakeSessionForVMTests.VDI_resize_online', - fake_vdi_resize) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = fake_network.fake_get_instance_nw_info(self) - # Resize instance would be determined by the compute call - image_meta = objects.ImageMeta.from_dict( - {'id': instance['image_ref'], 'disk_format': 'vhd'}) - conn.finish_migration(self.context, self.migration, instance, - dict(base_copy='hurr', cow='durr'), - network_info, image_meta, resize_instance=False, - allocations={}) - - @stub_vm_utils_with_vdi_attached - def test_migrate_too_many_partitions_no_resize_down(self): - instance = self._create_instance() - xenapi_fake.create_vm(instance['name'], 'Running') - flavor = objects.Flavor.get_by_name(self.context, 'm1.small') - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def fake_get_partitions(partition): - return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")] - - self.stub_out('nova.virt.xenapi.vm_utils._get_partitions', - fake_get_partitions) - - self.assertRaises(exception.InstanceFaultRollback, - conn.migrate_disk_and_power_off, - self.context, instance, - '127.0.0.1', flavor, None) - - @stub_vm_utils_with_vdi_attached - def test_migrate_bad_fs_type_no_resize_down(self): - instance = self._create_instance() - xenapi_fake.create_vm(instance['name'], 'Running') - flavor = objects.Flavor.get_by_name(self.context, 'm1.small') - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def fake_get_partitions(partition): - return [(1, 2, 3, "ext2", "", "boot")] - - self.stub_out('nova.virt.xenapi.vm_utils._get_partitions', - fake_get_partitions) - - self.assertRaises(exception.InstanceFaultRollback, - conn.migrate_disk_and_power_off, - self.context, instance, - '127.0.0.1', flavor, None) - - @mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown') - @mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label') - @mock.patch.object(vm_utils, 'resize_disk') - @mock.patch.object(vm_utils, 'migrate_vhd') - @mock.patch.object(vm_utils, 'destroy_vdi') - @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely') - @mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan') - def test_migrate_rollback_when_resize_down_fs_fails(self, mock_restore, - mock_get_vdi, - mock_destroy, - mock_migrate, - mock_disk, - mock_label, - mock_resize): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - instance = objects.Instance(context=self.context, - auto_disk_config=True, - uuid=uuids.instance) - instance.obj_reset_changes() - vm_ref = "vm_ref" - dest = "dest" - flavor = "type" - sr_path = "sr_path" - - vmops._resize_ensure_vm_is_shutdown(instance, vm_ref) - vmops._apply_orig_vm_name_label(instance, vm_ref) - old_vdi_ref = "old_ref" - mock_get_vdi.return_value = (old_vdi_ref, None) - new_vdi_ref = "new_ref" - new_vdi_uuid = "new_uuid" - mock_disk.return_value = (new_vdi_ref, new_vdi_uuid) - mock_migrate.side_effect = exception.ResizeError(reason="asdf") - vm_utils.destroy_vdi(vmops._session, new_vdi_ref) - vmops._restore_orig_vm_and_cleanup_orphan(instance) - - with mock.patch.object(instance, 'save') as mock_save: - self.assertRaises(exception.InstanceFaultRollback, - vmops._migrate_disk_resizing_down, self.context, - instance, dest, flavor, vm_ref, sr_path) - self.assertEqual(3, mock_save.call_count) - self.assertEqual(60.0, instance.progress) - - mock_resize.assert_any_call(instance, vm_ref) - mock_label.assert_any_call(instance, vm_ref) - mock_get_vdi.assert_called_once_with(vmops._session, vm_ref) - mock_disk.assert_called_once_with(vmops._session, instance, - old_vdi_ref, flavor) - mock_migrate.assert_called_once_with(vmops._session, instance, - new_vdi_uuid, dest, sr_path, 0) - mock_destroy.assert_any_call(vmops._session, new_vdi_ref) - mock_restore.assert_any_call(instance) - - @mock.patch.object(vm_utils, 'is_vm_shutdown') - @mock.patch.object(vm_utils, 'clean_shutdown_vm') - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - def test_resize_ensure_vm_is_shutdown_cleanly(self, mock_hard, mock_clean, - mock_shutdown): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - fake_instance = {'uuid': 'uuid'} - - mock_shutdown.return_value = False - mock_clean.return_value = False - - vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") - mock_shutdown.assert_called_once_with(vmops._session, "ref") - mock_clean.assert_called_once_with(vmops._session, fake_instance, - "ref") - - @mock.patch.object(vm_utils, 'is_vm_shutdown') - @mock.patch.object(vm_utils, 'clean_shutdown_vm') - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - def test_resize_ensure_vm_is_shutdown_forced(self, mock_hard, mock_clean, - mock_shutdown): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - fake_instance = {'uuid': 'uuid'} - - mock_shutdown.return_value = False - mock_clean.return_value = False - mock_hard.return_value = True - - vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") - mock_shutdown.assert_called_once_with(vmops._session, "ref") - mock_clean.assert_called_once_with(vmops._session, fake_instance, - "ref") - mock_hard.assert_called_once_with(vmops._session, fake_instance, - "ref") - - @mock.patch.object(vm_utils, 'is_vm_shutdown') - @mock.patch.object(vm_utils, 'clean_shutdown_vm') - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - def test_resize_ensure_vm_is_shutdown_fails(self, mock_hard, mock_clean, - mock_shutdown): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - fake_instance = {'uuid': 'uuid'} - - mock_shutdown.return_value = False - mock_clean.return_value = False - mock_hard.return_value = False - - self.assertRaises(exception.ResizeError, - vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref") - mock_shutdown.assert_called_once_with(vmops._session, "ref") - mock_clean.assert_called_once_with(vmops._session, fake_instance, - "ref") - mock_hard.assert_called_once_with(vmops._session, fake_instance, - "ref") - - @mock.patch.object(vm_utils, 'is_vm_shutdown') - @mock.patch.object(vm_utils, 'clean_shutdown_vm') - @mock.patch.object(vm_utils, 'hard_shutdown_vm') - def test_resize_ensure_vm_is_shutdown_already_shutdown(self, mock_hard, - mock_clean, - mock_shutdown): - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - vmops = conn._vmops - fake_instance = {'uuid': 'uuid'} - - mock_shutdown.return_value = True - - vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref") - mock_shutdown.assert_called_once_with(vmops._session, "ref") - - -class XenAPIImageTypeTestCase(test.NoDBTestCase): - """Test ImageType class.""" - - def test_to_string(self): - # Can convert from type id to type string. - self.assertEqual( - vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL), - vm_utils.ImageType.KERNEL_STR) - - def _assert_role(self, expected_role, image_type_id): - self.assertEqual( - expected_role, - vm_utils.ImageType.get_role(image_type_id)) - - def test_get_image_role_kernel(self): - self._assert_role('kernel', vm_utils.ImageType.KERNEL) - - def test_get_image_role_ramdisk(self): - self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK) - - def test_get_image_role_disk(self): - self._assert_role('root', vm_utils.ImageType.DISK) - - def test_get_image_role_disk_raw(self): - self._assert_role('root', vm_utils.ImageType.DISK_RAW) - - def test_get_image_role_disk_vhd(self): - self._assert_role('root', vm_utils.ImageType.DISK_VHD) - - -class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase): - """Unit tests for code that detects the ImageType.""" - def assert_disk_type(self, image_meta, expected_disk_type): - actual = vm_utils.determine_disk_image_type(image_meta) - self.assertEqual(expected_disk_type, actual) - - def test_machine(self): - image_meta = objects.ImageMeta.from_dict( - {'disk_format': 'ami'}) - self.assert_disk_type(image_meta, vm_utils.ImageType.DISK) - - def test_raw(self): - image_meta = objects.ImageMeta.from_dict( - {'disk_format': 'raw'}) - self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW) - - def test_vhd(self): - image_meta = objects.ImageMeta.from_dict( - {'disk_format': 'vhd'}) - self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD) - - -# FIXME(sirp): convert this to use XenAPITestBaseNoDB -class XenAPIHostTestCase(stubs.XenAPITestBase): - """Tests HostState, which holds metrics from XenServer that get - reported back to the Schedulers. - """ - - def setUp(self): - super(XenAPIHostTestCase, self).setUp() - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.context = context.get_admin_context() - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.instance = fake_instance.fake_db_instance(name='foo') - self.useFixture(fixtures.SingleCellSimple()) - - def test_host_state(self): - stats = self.conn.host_state.get_host_stats(False) - # Values from fake.create_local_srs (ext SR) - self.assertEqual(stats['disk_total'], 40000) - self.assertEqual(stats['disk_used'], 0) - # Values from fake._plugin_xenhost_host_data - self.assertEqual(stats['host_memory_total'], 10) - self.assertEqual(stats['host_memory_overhead'], 20) - self.assertEqual(stats['host_memory_free'], 30) - self.assertEqual(stats['host_memory_free_computed'], 40) - self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost') - self.assertEqual(stats['host_cpu_info']['cpu_count'], 4) - self.assertThat({ - 'vendor': 'GenuineIntel', - 'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz', - 'topology': { - 'sockets': 1, - 'cores': 4, - 'threads': 1, - }, - 'features': [ - 'fpu', 'de', 'tsc', 'msr', 'pae', 'mce', - 'cx8', 'apic', 'sep', 'mtrr', 'mca', - 'cmov', 'pat', 'clflush', 'acpi', 'mmx', - 'fxsr', 'sse', 'sse2', 'ss', 'ht', - 'nx', 'constant_tsc', 'nonstop_tsc', - 'aperfmperf', 'pni', 'vmx', 'est', 'ssse3', - 'sse4_1', 'sse4_2', 'popcnt', 'hypervisor', - 'ida', 'tpr_shadow', 'vnmi', 'flexpriority', - 'ept', 'vpid', - ]}, - matchers.DictMatches(stats['cpu_model'])) - # No VMs running - self.assertEqual(stats['vcpus_used'], 0) - - def test_host_state_vcpus_used(self): - stats = self.conn.host_state.get_host_stats(True) - self.assertEqual(stats['vcpus_used'], 0) - xenapi_fake.create_vm(self.instance['name'], 'Running') - stats = self.conn.host_state.get_host_stats(True) - self.assertEqual(stats['vcpus_used'], 4) - - def test_pci_passthrough_devices(self): - stats = self.conn.host_state.get_host_stats(False) - self.assertEqual(len(stats['pci_passthrough_devices']), 2) - - def test_host_state_missing_sr(self): - # Must trigger construction of 'host_state' property - # before introducing the stub which raises the error - hs = self.conn.host_state - - with mock.patch.object( - vm_utils, 'safe_find_sr', - side_effect=exception.StorageRepositoryNotFound('not there')): - self.assertRaises(exception.StorageRepositoryNotFound, - hs.get_host_stats, - refresh=True) - - def _test_host_action(self, method, action, expected=None): - result = method('host', action) - if not expected: - expected = action - self.assertEqual(result, expected) - - def _test_host_action_no_param(self, method, action, expected=None): - result = method(action) - if not expected: - expected = action - self.assertEqual(result, expected) - - def test_host_reboot(self): - self._test_host_action_no_param(self.conn.host_power_action, 'reboot') - - def test_host_shutdown(self): - self._test_host_action_no_param(self.conn.host_power_action, - 'shutdown') - - def test_host_startup(self): - self.assertRaises(NotImplementedError, - self.conn.host_power_action, 'startup') - - def test_host_maintenance_on(self): - self._test_host_action(self.conn.host_maintenance_mode, - True, 'on_maintenance') - - def test_host_maintenance_off(self): - self._test_host_action(self.conn.host_maintenance_mode, - False, 'off_maintenance') - - def test_set_enable_host_enable(self): - _create_service_entries(self.context, values={'nova': ['fake-mini']}) - self._test_host_action_no_param(self.conn.set_host_enabled, - True, 'enabled') - service = db.service_get_by_host_and_binary(self.context, 'fake-mini', - 'nova-compute') - self.assertFalse(service.disabled) - - def test_set_enable_host_disable(self): - _create_service_entries(self.context, values={'nova': ['fake-mini']}) - self._test_host_action_no_param(self.conn.set_host_enabled, - False, 'disabled') - service = db.service_get_by_host_and_binary(self.context, 'fake-mini', - 'nova-compute') - self.assertTrue(service.disabled) - - def test_get_host_uptime(self): - result = self.conn.get_host_uptime() - self.assertEqual(result, 'fake uptime') - - def test_supported_instances_is_included_in_host_state(self): - stats = self.conn.host_state.get_host_stats(False) - self.assertIn('supported_instances', stats) - - @mock.patch.object(host, 'to_supported_instances', - return_value='SOMERETURNVALUE') - def test_supported_instances_is_calculated_by_to_supported_instances( - self, mock_to_supported_instances): - stats = self.conn.host_state.get_host_stats(False) - self.assertEqual("SOMERETURNVALUE", stats['supported_instances']) - mock_to_supported_instances.assert_called_once_with( - ['xen-3.0-x86_64', 'xen-3.0-x86_32p', 'hvm-3.0-x86_32', - 'hvm-3.0-x86_32p', 'hvm-3.0-x86_64']) - - @mock.patch.object(host.HostState, 'get_disk_used') - @mock.patch.object(host.HostState, '_get_passthrough_devices') - @mock.patch.object(host.HostState, '_get_vgpu_stats') - @mock.patch.object(jsonutils, 'loads') - @mock.patch.object(vm_utils, 'list_vms') - @mock.patch.object(vm_utils, 'scan_default_sr') - @mock.patch.object(host_management, 'get_host_data') - def test_update_stats_caches_hostname(self, mock_host_data, mock_scan_sr, - mock_list_vms, mock_loads, - mock_vgpus_stats, - mock_devices, mock_dis_used): - data = {'disk_total': 0, - 'disk_used': 0, - 'disk_available': 0, - 'supported_instances': 0, - 'host_capabilities': [], - 'host_hostname': 'foo', - 'vcpus_used': 0, - } - sr_rec = { - 'physical_size': 0, - 'physical_utilisation': 0, - 'virtual_allocation': 0, - } - mock_loads.return_value = data - mock_host_data.return_value = data - mock_scan_sr.return_value = 'ref' - mock_list_vms.return_value = [] - mock_devices.return_value = "dev1" - mock_dis_used.return_value = (0, 0) - self.conn._session = mock.Mock() - with mock.patch.object(self.conn._session.SR, 'get_record') \ - as mock_record: - mock_record.return_value = sr_rec - stats = self.conn.host_state.get_host_stats(refresh=True) - self.assertEqual('foo', stats['hypervisor_hostname']) - self.assertEqual(2, mock_loads.call_count) - self.assertEqual(2, mock_host_data.call_count) - self.assertEqual(2, mock_scan_sr.call_count) - self.assertEqual(2, mock_devices.call_count) - self.assertEqual(2, mock_vgpus_stats.call_count) - mock_loads.assert_called_with(data) - mock_host_data.assert_called_with(self.conn._session) - mock_scan_sr.assert_called_with(self.conn._session) - mock_devices.assert_called_with() - mock_vgpus_stats.assert_called_with() - - -@mock.patch.object(host.HostState, 'update_status') -class XenAPIHostStateTestCase(stubs.XenAPITestBaseNoDB): - - def _test_get_disk_used(self, vdis, attached_vbds): - session = mock.MagicMock() - host_state = host.HostState(session) - - sr_ref = 'sr_ref' - - session.SR.get_VDIs.return_value = vdis.keys() - session.VDI.get_virtual_size.side_effect = \ - lambda vdi_ref: vdis[vdi_ref]['virtual_size'] - session.VDI.get_physical_utilisation.side_effect = \ - lambda vdi_ref: vdis[vdi_ref]['physical_utilisation'] - session.VDI.get_VBDs.side_effect = \ - lambda vdi_ref: vdis[vdi_ref]['VBDs'] - session.VBD.get_currently_attached.side_effect = \ - lambda vbd_ref: vbd_ref in attached_vbds - - disk_used = host_state.get_disk_used(sr_ref) - session.SR.get_VDIs.assert_called_once_with(sr_ref) - return disk_used - - def test_get_disk_used_virtual(self, mock_update_status): - # Both VDIs are attached - attached_vbds = ['vbd_1', 'vbd_2'] - vdis = { - 'vdi_1': {'physical_utilisation': 1, - 'virtual_size': 100, - 'VBDs': ['vbd_1']}, - 'vdi_2': {'physical_utilisation': 1, - 'virtual_size': 100, - 'VBDs': ['vbd_2']} - } - disk_used = self._test_get_disk_used(vdis, attached_vbds) - self.assertEqual((200, 2), disk_used) - - def test_get_disk_used_physical(self, mock_update_status): - # Neither VDIs are attached - attached_vbds = [] - vdis = { - 'vdi_1': {'physical_utilisation': 1, - 'virtual_size': 100, - 'VBDs': ['vbd_1']}, - 'vdi_2': {'physical_utilisation': 1, - 'virtual_size': 100, - 'VBDs': ['vbd_2']} - } - disk_used = self._test_get_disk_used(vdis, attached_vbds) - self.assertEqual((2, 2), disk_used) - - def test_get_disk_used_both(self, mock_update_status): - # One VDI is attached - attached_vbds = ['vbd_1'] - vdis = { - 'vdi_1': {'physical_utilisation': 1, - 'virtual_size': 100, - 'VBDs': ['vbd_1']}, - 'vdi_2': {'physical_utilisation': 1, - 'virtual_size': 100, - 'VBDs': ['vbd_2']} - } - disk_used = self._test_get_disk_used(vdis, attached_vbds) - self.assertEqual((101, 2), disk_used) - - -class ToSupportedInstancesTestCase(test.NoDBTestCase): - def test_default_return_value(self): - self.assertEqual([], - host.to_supported_instances(None)) - - def test_return_value(self): - self.assertEqual( - [(obj_fields.Architecture.X86_64, obj_fields.HVType.XEN, 'xen')], - host.to_supported_instances([u'xen-3.0-x86_64'])) - - def test_invalid_values_do_not_break(self): - self.assertEqual( - [(obj_fields.Architecture.X86_64, obj_fields.HVType.XEN, 'xen')], - host.to_supported_instances([u'xen-3.0-x86_64', 'spam'])) - - def test_multiple_values(self): - self.assertEqual( - [ - (obj_fields.Architecture.X86_64, obj_fields.HVType.XEN, 'xen'), - (obj_fields.Architecture.I686, obj_fields.HVType.XEN, 'hvm') - ], - host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32']) - ) - - -# FIXME(sirp): convert this to use XenAPITestBaseNoDB -class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase): - def setUp(self): - super(XenAPIAutoDiskConfigTestCase, self).setUp() - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self.user_id = 'fake' - self.project_id = 'fake' - - self.instance_values = { - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': IMAGE_MACHINE, - 'kernel_id': IMAGE_KERNEL, - 'ramdisk_id': IMAGE_RAMDISK, - 'root_gb': 80, - 'ephemeral_gb': 0, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - - self.context = context.RequestContext(self.user_id, self.project_id) - - self.stub_out('nova.virt.xenapi.vm_utils.create_vbd', - lambda session, vm_ref, vdi_ref, userdevice, - bootable, osvol: None) - - @mock.patch.object(vm_utils, '_resize_part_and_fs') - def assertIsPartitionCalled(self, called, mock_resize_part_and_fs): - context.RequestContext(self.user_id, self.project_id) - session = get_session() - - disk_image_type = vm_utils.ImageType.DISK_VHD - instance = create_instance_with_system_metadata(self.context, - self.instance_values) - vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted') - vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake') - - vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid'] - vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}} - image_meta = objects.ImageMeta.from_dict( - {'id': uuids.image_id, - 'disk_format': 'vhd', - 'properties': {'vm_mode': 'xen'}}) - - self.conn._vmops._attach_disks(self.context, instance, image_meta, - vm_ref, instance['name'], vdis, disk_image_type, - "fake_nw_inf") - - if called: - mock_resize_part_and_fs.assert_called() - else: - mock_resize_part_and_fs.assert_not_called() - - def test_instance_not_auto_disk_config(self): - """Should not partition unless instance is marked as - auto_disk_config. - """ - self.instance_values['auto_disk_config'] = False - self.assertIsPartitionCalled(False) - - @stub_vm_utils_with_vdi_attached - @mock.patch.object(vm_utils, '_get_partitions', - return_value=[(1, 0, 100, 'ext4', "", ""), - (2, 100, 200, 'ext4' "", "")]) - def test_instance_auto_disk_config_fails_safe_two_partitions( - self, mock_get_partitions): - # Should not partition unless fail safes pass. - self.instance_values['auto_disk_config'] = True - - self.assertIsPartitionCalled(False) - mock_get_partitions.assert_called_once_with('fakedev') - - @stub_vm_utils_with_vdi_attached - @mock.patch.object(vm_utils, '_get_partitions', - return_value=[(2, 100, 200, 'ext4', "", "")]) - def test_instance_auto_disk_config_fails_safe_badly_numbered( - self, mock_get_partitions): - # Should not partition unless fail safes pass. - self.instance_values['auto_disk_config'] = True - self.assertIsPartitionCalled(False) - mock_get_partitions.assert_called_once_with('fakedev') - - @stub_vm_utils_with_vdi_attached - @mock.patch.object(vm_utils, '_get_partitions', - return_value=[(1, 100, 200, 'asdf', "", "")]) - def test_instance_auto_disk_config_fails_safe_bad_fstype( - self, mock_get_partitions): - # Should not partition unless fail safes pass. - self.instance_values['auto_disk_config'] = True - self.assertIsPartitionCalled(False) - mock_get_partitions.assert_called_once_with('fakedev') - - @stub_vm_utils_with_vdi_attached - @mock.patch.object(vm_utils, '_get_partitions', - return_value=[(1, 0, 100, 'ext4', "", "boot")]) - def test_instance_auto_disk_config_passes_fail_safes( - self, mock_get_partitions): - """Should partition if instance is marked as auto_disk_config=True and - virt-layer specific fail-safe checks pass. - """ - self.instance_values['auto_disk_config'] = True - self.assertIsPartitionCalled(True) - mock_get_partitions.assert_called_once_with('fakedev') - - -# FIXME(sirp): convert this to use XenAPITestBaseNoDB -class XenAPIGenerateLocal(stubs.XenAPITestBase): - """Test generating of local disks, like swap and ephemeral.""" - def setUp(self): - super(XenAPIGenerateLocal, self).setUp() - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - db_fakes.stub_out_db_instance_api(self) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self.user_id = 'fake' - self.project_id = 'fake' - - self.instance_values = { - 'project_id': self.project_id, - 'user_id': self.user_id, - 'image_ref': IMAGE_MACHINE, - 'kernel_id': IMAGE_KERNEL, - 'ramdisk_id': IMAGE_RAMDISK, - 'root_gb': 80, - 'ephemeral_gb': 0, - 'instance_type_id': '3', # m1.large - 'os_type': 'linux', - 'architecture': 'x86-64'} - - self.context = context.RequestContext(self.user_id, self.project_id) - - def fake_create_vbd(session, vm_ref, vdi_ref, userdevice, - vbd_type='disk', read_only=False, bootable=True, - osvol=False, empty=False, unpluggable=True): - return session.call_xenapi('VBD.create', {'VM': vm_ref, - 'VDI': vdi_ref}) - - self.stub_out('nova.virt.xenapi.vm_utils.create_vbd', fake_create_vbd) - - def assertCalled(self, instance, - disk_image_type=vm_utils.ImageType.DISK_VHD): - context.RequestContext(self.user_id, self.project_id) - session = get_session() - - vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted') - vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake') - - vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid'] - - vdi_key = 'root' - if disk_image_type == vm_utils.ImageType.DISK_ISO: - vdi_key = 'iso' - vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}} - image_meta = objects.ImageMeta.from_dict( - {'id': uuids.image_id, - 'disk_format': 'vhd', - 'properties': {'vm_mode': 'xen'}}) - self.conn._vmops._attach_disks(self.context, instance, image_meta, - vm_ref, instance['name'], vdis, disk_image_type, - "fake_nw_inf") - - @mock.patch.object(vm_utils, 'generate_swap') - def test_generate_swap(self, mock_generate_swap): - # Test swap disk generation. - instance_values = dict(self.instance_values, instance_type_id=5) - instance = create_instance_with_system_metadata(self.context, - instance_values) - self.assertCalled(instance) - self.assertTrue(mock_generate_swap.called) - - @mock.patch.object(vm_utils, 'generate_ephemeral') - def test_generate_ephemeral(self, mock_generate_ephemeral): - # Test ephemeral disk generation. - instance_values = dict(self.instance_values, instance_type_id=4) - instance = create_instance_with_system_metadata(self.context, - instance_values) - self.assertCalled(instance) - self.assertTrue(mock_generate_ephemeral.called) - - @mock.patch.object( - uuidutils, 'generate_uuid', - new=mock.Mock(return_value='98e2a239-5a96-4a72-840f-2c3836482461')) - @mock.patch.object(vm_utils, 'generate_iso_blank_root_disk') - @mock.patch.object(vm_utils, 'generate_ephemeral') - def test_generate_iso_blank_root_disk( - self, mock_generate_ephemeral, mock_generate_iso_blank_root_disk): - instance_values = dict(self.instance_values, instance_type_id=4) - instance_values.pop('kernel_id') - instance_values.pop('ramdisk_id') - instance = create_instance_with_system_metadata(self.context, - instance_values) - self.assertCalled(instance, vm_utils.ImageType.DISK_ISO) - mock_generate_ephemeral.assert_called_once_with( - test.MatchType(session.XenAPISession), - instance, '98e2a239-5a96-4a72-840f-2c3836482461', '4', - 'instance-00000001', 160) - self.assertTrue(mock_generate_iso_blank_root_disk.called) - - -class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB): - FAKE_VMS = {'test1:ref': dict(name_label='test1', - other_config=dict(nova_uuid='hash'), - domid='12', - _vifmap={'0': "a:b:c:d...", - '1': "e:f:12:q..."}), - 'test2:ref': dict(name_label='test2', - other_config=dict(nova_uuid='hash'), - domid='42', - _vifmap={'0': "a:3:c:d...", - '1': "e:f:42:q..."}), - } - - def setUp(self): - super(XenAPIBWCountersTestCase, self).setUp() - self.stub_out('nova.virt.xenapi.vm_utils.list_vms', - XenAPIBWCountersTestCase._fake_list_vms) - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def _fake_get_vif_device_map(self, vm_rec): - return vm_rec['_vifmap'] - - self.stub_out('nova.virt.xenapi.vmops.VMOps._get_vif_device_map', - _fake_get_vif_device_map) - - @classmethod - def _fake_list_vms(cls, session): - return cls.FAKE_VMS.items() - - @staticmethod - def _fake_fetch_bandwidth_mt(session): - return {} - - @mock.patch.object(vm_utils, 'fetch_bandwidth', - return_value={ - '42': {'0': {'bw_in': 21024, 'bw_out': 22048}, - '1': {'bw_in': 231337, 'bw_out': 221212121}}, - '12': - {'0': {'bw_in': 1024, 'bw_out': 2048}, - '1': {'bw_in': 31337, 'bw_out': 21212121}}}) - def test_get_all_bw_counters(self, mock_fetch_bandwidth): - instances = [dict(name='test1', uuid='1-2-3'), - dict(name='test2', uuid='4-5-6')] - result = self.conn.get_all_bw_counters(instances) - self.assertEqual(4, len(result)) - self.assertIn(dict(uuid='1-2-3', - mac_address="a:b:c:d...", - bw_in=1024, - bw_out=2048), result) - self.assertIn(dict(uuid='1-2-3', - mac_address="e:f:12:q...", - bw_in=31337, - bw_out=21212121), result) - - self.assertIn(dict(uuid='4-5-6', - mac_address="a:3:c:d...", - bw_in=21024, - bw_out=22048), result) - self.assertIn(dict(uuid='4-5-6', - mac_address="e:f:42:q...", - bw_in=231337, - bw_out=221212121), result) - mock_fetch_bandwidth.assert_called_once_with( - test.MatchType(session.XenAPISession)) - - @mock.patch.object(vm_utils, 'fetch_bandwidth', - new=mock.Mock(return_value={})) - def test_get_all_bw_counters_in_failure_case(self): - """Test that get_all_bw_conters returns an empty list when - no data returned from Xenserver. c.f. bug #910045. - """ - instances = [dict(name='instance-0001', uuid='1-2-3-4-5')] - result = self.conn.get_all_bw_counters(instances) - self.assertEqual(result, []) - - -class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB): - """Unit tests for testing we find the right SR.""" - def test_safe_find_sr_raise_exception(self): - # Ensure StorageRepositoryNotFound is raise when wrong filter. - self.flags(sr_matching_filter='yadayadayada', group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - session = get_session() - self.assertRaises(exception.StorageRepositoryNotFound, - vm_utils.safe_find_sr, session) - - def test_safe_find_sr_local_storage(self): - # Ensure the default local-storage is found. - self.flags(sr_matching_filter='other-config:i18n-key=local-storage', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - session = get_session() - # This test is only guaranteed if there is one host in the pool - self.assertEqual(len(xenapi_fake.get_all('host')), 1) - host_ref = xenapi_fake.get_all('host')[0] - pbd_refs = xenapi_fake.get_all('PBD') - for pbd_ref in pbd_refs: - pbd_rec = xenapi_fake.get_record('PBD', pbd_ref) - if pbd_rec['host'] != host_ref: - continue - sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR']) - if sr_rec['other_config']['i18n-key'] == 'local-storage': - local_sr = pbd_rec['SR'] - expected = vm_utils.safe_find_sr(session) - self.assertEqual(local_sr, expected) - - def test_safe_find_sr_by_other_criteria(self): - # Ensure the SR is found when using a different filter. - self.flags(sr_matching_filter='other-config:my_fake_sr=true', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - session = get_session() - host_ref = xenapi_fake.get_all('host')[0] - local_sr = xenapi_fake.create_sr(name_label='Fake Storage', - type='lvm', - other_config={'my_fake_sr': 'true'}, - host_ref=host_ref) - expected = vm_utils.safe_find_sr(session) - self.assertEqual(local_sr, expected) - - def test_safe_find_sr_default(self): - # Ensure the default SR is found regardless of other-config. - self.flags(sr_matching_filter='default-sr:true', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - session = get_session() - pool_ref = session.call_xenapi('pool.get_all')[0] - expected = vm_utils.safe_find_sr(session) - self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref), - expected) - - -def _create_service_entries(context, values={'avail_zone1': ['fake_host1', - 'fake_host2'], - 'avail_zone2': ['fake_host3'], }): - for hosts in values.values(): - for service_host in hosts: - objects.Service(context, - **{'host': service_host, - 'binary': 'nova-compute', - 'topic': 'compute', - 'report_count': 0}).create() - return values - - -# FIXME(sirp): convert this to use XenAPITestBaseNoDB -class XenAPIAggregateTestCase(stubs.XenAPITestBase): - """Unit tests for aggregate operations.""" - def setUp(self): - super(XenAPIAggregateTestCase, self).setUp() - self.flags(connection_url='http://localhost', - connection_username='test_user', - connection_password='test_pass', - group='xenserver') - self.flags(instance_name_template='%d', - host='host', - compute_driver='xenapi.XenAPIDriver', - default_availability_zone='avail_zone1') - host_ref = xenapi_fake.get_all('host')[0] - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.context = context.get_admin_context() - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.compute = manager.ComputeManager() - self.api = compute_api.AggregateAPI() - values = {'name': 'test_aggr', - 'metadata': {'availability_zone': 'test_zone', - pool_states.POOL_FLAG: 'XenAPI'}} - self.aggr = objects.Aggregate(context=self.context, id=1, - **values) - self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', - 'master_compute': 'host', - 'availability_zone': 'fake_zone', - pool_states.KEY: pool_states.ACTIVE, - 'host': xenapi_fake.get_record('host', - host_ref)['uuid']} - self.useFixture(fixtures.SingleCellSimple()) - - @mock.patch('nova.virt.xenapi.pool.ResourcePool.add_to_aggregate') - def test_pool_add_to_aggregate_called_by_driver( - self, mock_add_to_aggregate): - def pool_add_to_aggregate(context, aggregate, host, slave_info=None): - self.assertEqual("CONTEXT", context) - self.assertEqual("AGGREGATE", aggregate) - self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) - mock_add_to_aggregate.side_effect = pool_add_to_aggregate - - self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") - - self.assertTrue(mock_add_to_aggregate.called) - - @mock.patch('nova.virt.xenapi.pool.ResourcePool.remove_from_aggregate') - def test_pool_remove_from_aggregate_called_by_driver( - self, mock_remove_from_aggregate): - def pool_remove_from_aggregate(context, aggregate, host, - slave_info=None): - self.assertEqual("CONTEXT", context) - self.assertEqual("AGGREGATE", aggregate) - self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) - mock_remove_from_aggregate.side_effect = pool_remove_from_aggregate - self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") - - self.assertTrue(mock_remove_from_aggregate.called) - - @mock.patch('nova.virt.xenapi.pool.ResourcePool._init_pool') - def test_add_to_aggregate_for_first_host_sets_metadata( - self, mock_init_pool): - aggregate = self._aggregate_setup() - self.conn._pool.add_to_aggregate(self.context, aggregate, "host") - result = objects.Aggregate.get_by_id(self.context, aggregate.id) - self.assertTrue(mock_init_pool.called) - self.assertThat(self.fake_metadata, - matchers.DictMatches(result.metadata)) - - @mock.patch('nova.virt.xenapi.pool.ResourcePool._join_slave') - def test_join_slave(self, mock_join_slave): - # Ensure join_slave gets called when the request gets to master. - aggregate = self._aggregate_setup(hosts=['host', 'host2'], - metadata=self.fake_metadata) - self.conn._pool.add_to_aggregate(self.context, aggregate, "host2", - dict(compute_uuid='fake_uuid', - url='fake_url', - user='fake_user', - passwd='fake_pass', - xenhost_uuid='fake_uuid')) - self.assertTrue(mock_join_slave.called) - - @mock.patch.object(xenapi_fake.SessionBase, 'pool_set_name_label') - def test_add_to_aggregate_first_host(self, mock_pool_set_name_label): - self.conn._session.call_xenapi("pool.create", {"name": "asdf"}) - - metadata = {'availability_zone': 'fake_zone', - pool_states.POOL_FLAG: "XenAPI", - pool_states.KEY: pool_states.CREATED} - - aggregate = objects.Aggregate(context=self.context) - aggregate.name = 'fake_aggregate' - aggregate.metadata = dict(metadata) - aggregate.create() - aggregate.add_host('host') - self.assertEqual(["host"], aggregate.hosts) - self.assertEqual(metadata, aggregate.metadata) - - self.conn._pool.add_to_aggregate(self.context, aggregate, "host") - self.assertTrue(mock_pool_set_name_label.called) - - @mock.patch('nova.virt.xenapi.pool.ResourcePool.remove_from_aggregate') - def test_remove_from_aggregate_called(self, mock_remove_from_aggregate): - self.conn.remove_from_aggregate(None, None, None) - self.assertTrue(mock_remove_from_aggregate.called) - - def test_remove_from_empty_aggregate(self): - result = self._aggregate_setup() - self.assertRaises(exception.InvalidAggregateActionDelete, - self.conn._pool.remove_from_aggregate, - self.context, result, "test_host") - - @mock.patch('nova.virt.xenapi.pool.ResourcePool._eject_slave') - def test_remove_slave(self, mock_eject_slave): - # Ensure eject slave gets called. - self.fake_metadata['host2'] = 'fake_host2_uuid' - aggregate = self._aggregate_setup(hosts=['host', 'host2'], - metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE) - self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2") - self.assertTrue(mock_eject_slave.called) - - @mock.patch('nova.virt.xenapi.pool.ResourcePool._clear_pool') - def test_remove_master_solo(self, mock_clear_pool): - # Ensure metadata are cleared after removal. - aggregate = self._aggregate_setup(metadata=self.fake_metadata) - self.conn._pool.remove_from_aggregate(self.context, aggregate, "host") - result = objects.Aggregate.get_by_id(self.context, aggregate.id) - self.assertTrue(mock_clear_pool.called) - self.assertThat({'availability_zone': 'fake_zone', - pool_states.POOL_FLAG: 'XenAPI', - pool_states.KEY: pool_states.ACTIVE}, - matchers.DictMatches(result.metadata)) - - def test_remote_master_non_empty_pool(self): - # Ensure AggregateError is raised if removing the master. - aggregate = self._aggregate_setup(hosts=['host', 'host2'], - metadata=self.fake_metadata) - - self.assertRaises(exception.InvalidAggregateActionDelete, - self.conn._pool.remove_from_aggregate, - self.context, aggregate, "host") - - def _aggregate_setup(self, aggr_name='fake_aggregate', - aggr_zone='fake_zone', - aggr_state=pool_states.CREATED, - hosts=['host'], metadata=None): - aggregate = objects.Aggregate(context=self.context) - aggregate.name = aggr_name - aggregate.metadata = {'availability_zone': aggr_zone, - pool_states.POOL_FLAG: 'XenAPI', - pool_states.KEY: aggr_state, - } - if metadata: - aggregate.metadata.update(metadata) - aggregate.create() - for aggregate_host in hosts: - aggregate.add_host(aggregate_host) - return aggregate - - def test_add_host_to_aggregate_invalid_changing_status(self): - """Ensure InvalidAggregateActionAdd is raised when adding host while - aggregate is not ready. - """ - aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) - ex = self.assertRaises(exception.InvalidAggregateActionAdd, - self.conn.add_to_aggregate, self.context, - aggregate, 'host') - self.assertIn('setup in progress', str(ex)) - - def test_add_host_to_aggregate_invalid_dismissed_status(self): - """Ensure InvalidAggregateActionAdd is raised when aggregate is - deleted. - """ - aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) - ex = self.assertRaises(exception.InvalidAggregateActionAdd, - self.conn.add_to_aggregate, self.context, - aggregate, 'fake_host') - self.assertIn('aggregate deleted', str(ex)) - - def test_add_host_to_aggregate_invalid_error_status(self): - """Ensure InvalidAggregateActionAdd is raised when aggregate is - in error. - """ - aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR) - ex = self.assertRaises(exception.InvalidAggregateActionAdd, - self.conn.add_to_aggregate, self.context, - aggregate, 'fake_host') - self.assertIn('aggregate in error', str(ex)) - - @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'aggregate_remove_host') - @mock.patch('nova.scheduler.client.report.SchedulerReportClient.' - 'aggregate_add_host') - def test_remove_host_from_aggregate_error( - self, mock_add_host, mock_remove_host, mock_get_all_by_host): - # Ensure we can remove a host from an aggregate even if in error. - values = _create_service_entries(self.context) - fake_zone = list(values.keys())[0] - aggr = self.api.create_aggregate(self.context, - 'fake_aggregate', fake_zone) - # let's mock the fact that the aggregate is ready! - metadata = {pool_states.POOL_FLAG: "XenAPI", - pool_states.KEY: pool_states.ACTIVE} - self.api.update_aggregate_metadata(self.context, - aggr.id, - metadata) - for aggregate_host in values[fake_zone]: - mock_get_all_by_host.return_value = objects.ComputeNodeList( - objects=[objects.ComputeNode( - host=aggregate_host, - hypervisor_hostname=aggregate_host)]) - aggr = self.api.add_host_to_aggregate(self.context, - aggr.id, aggregate_host) - # let's mock the fact that the aggregate is in error! - expected = self.api.remove_host_from_aggregate(self.context, - aggr.id, - values[fake_zone][0]) - self.assertEqual(len(aggr.hosts) - 1, len(expected.hosts)) - self.assertEqual(expected.metadata[pool_states.KEY], - pool_states.ACTIVE) - - def test_remove_host_from_aggregate_invalid_dismissed_status(self): - """Ensure InvalidAggregateActionDelete is raised when aggregate is - deleted. - """ - aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED) - self.assertRaises(exception.InvalidAggregateActionDelete, - self.conn.remove_from_aggregate, self.context, - aggregate, 'fake_host') - - def test_remove_host_from_aggregate_invalid_changing_status(self): - """Ensure InvalidAggregateActionDelete is raised when aggregate is - changing. - """ - aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING) - self.assertRaises(exception.InvalidAggregateActionDelete, - self.conn.remove_from_aggregate, self.context, - aggregate, 'fake_host') - - @mock.patch('nova.virt.xenapi.driver.XenAPIDriver.add_to_aggregate', - new=mock.Mock( - side_effect=exception.AggregateError( - aggregate_id='', action='', reason=''))) - @mock.patch('nova.compute.utils.notify_about_aggregate_action', - new=mock.Mock()) - def test_add_aggregate_host_raise_err(self): - # Ensure the undo operation works correctly on add. - metadata = {pool_states.POOL_FLAG: "XenAPI", - pool_states.KEY: pool_states.ACTIVE} - self.aggr.metadata = metadata - self.aggr.hosts = ['fake_host'] - - self.assertRaises(exception.AggregateError, - self.compute.add_aggregate_host, - self.context, host="fake_host", - aggregate=self.aggr, - slave_info=None) - self.assertEqual(self.aggr.metadata[pool_states.KEY], - pool_states.ERROR) - self.assertEqual(self.aggr.hosts, ['fake_host']) - - -class MockComputeAPI(object): - def __init__(self): - self._mock_calls = [] - - def add_aggregate_host(self, ctxt, aggregate, - host_param, host, slave_info): - self._mock_calls.append(( - self.add_aggregate_host, ctxt, aggregate, - host_param, host, slave_info)) - - def remove_aggregate_host(self, ctxt, host, aggregate_id, host_param, - slave_info): - self._mock_calls.append(( - self.remove_aggregate_host, ctxt, host, aggregate_id, - host_param, slave_info)) - - -class StubDependencies(object): - """Stub dependencies for ResourcePool.""" - - def __init__(self): - self.compute_rpcapi = MockComputeAPI() - - def _is_hv_pool(self, *_ignore): - return True - - def _get_metadata(self, *_ignore): - return { - pool_states.KEY: {}, - 'master_compute': 'master' - } - - def _create_slave_info(self, *ignore): - return "SLAVE_INFO" - - -class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool): - """A ResourcePool, use stub dependencies.""" - - -class HypervisorPoolTestCase(test.NoDBTestCase): - - fake_aggregate = { - 'id': 98, - 'hosts': [], - 'metadata': { - 'master_compute': 'master', - pool_states.POOL_FLAG: '', - pool_states.KEY: '' - } - } - fake_aggregate = objects.Aggregate(**fake_aggregate) - - def test_slave_asks_master_to_add_slave_to_pool(self): - slave = ResourcePoolWithStubs() - - slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave") - - self.assertIn( - (slave.compute_rpcapi.add_aggregate_host, - "CONTEXT", "slave", self.fake_aggregate, - "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) - - def test_slave_asks_master_to_remove_slave_from_pool(self): - slave = ResourcePoolWithStubs() - - slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave") - - self.assertIn( - (slave.compute_rpcapi.remove_aggregate_host, - "CONTEXT", "slave", 98, "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) - - -class SwapXapiHostTestCase(test.NoDBTestCase): - - def test_swapping(self): - self.assertEqual( - "http://otherserver:8765/somepath", - pool.swap_xapi_host( - "http://someserver:8765/somepath", 'otherserver')) - - def test_no_port(self): - self.assertEqual( - "http://otherserver/somepath", - pool.swap_xapi_host( - "http://someserver/somepath", 'otherserver')) - - def test_no_path(self): - self.assertEqual( - "http://otherserver", - pool.swap_xapi_host( - "http://someserver", 'otherserver')) - - -class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB): - """Unit tests for live_migration.""" - def setUp(self): - super(XenAPILiveMigrateTestCase, self).setUp() - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - self.flags(host='host') - db_fakes.stub_out_db_instance_api(self) - self.context = context.get_admin_context() - - @mock.patch.object(vmops.VMOps, 'live_migrate') - def test_live_migration_calls_vmops(self, mock_live_migrate): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.conn.live_migration(None, None, None, None, None) - self.assertTrue(mock_live_migrate.called) - - def test_pre_live_migration(self): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - with mock.patch.object(self.conn._vmops, "pre_live_migration") as pre: - pre.return_value = True - - result = self.conn.pre_live_migration( - "ctx", "inst", "bdi", "nw", "di", "data") - - self.assertTrue(result) - pre.assert_called_with("ctx", "inst", "bdi", "nw", "di", "data") - - @mock.patch.object(vm_utils, 'create_kernel_and_ramdisk', - return_value=('fake-kernel-file', 'fake-ramdisk-file')) - @mock.patch.object(vm_utils, 'strip_base_mirror_from_vdis') - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref') - @mock.patch.object(vmops.VMOps, '_post_start_actions') - def test_post_live_migration_at_destination( - self, mock_post_action, mock_get_vm_opaque_ref, - mock_strip_base_mirror_from_vdis, mock_create_kernel_and_ramdisk): - # ensure method is present - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - fake_instance = {"name": "fake-name"} - fake_network_info = "network_info" - - self.conn.post_live_migration_at_destination( - self.context, fake_instance, fake_network_info, None) - self.assertTrue(mock_get_vm_opaque_ref.called) - self.assertTrue(mock_strip_base_mirror_from_vdis.called) - mock_post_action.assert_called_once_with(fake_instance) - mock_create_kernel_and_ramdisk.assert_called_once_with( - self.context, self.conn._session, fake_instance, - fake_instance['name']) - - @mock.patch.object(vm_utils, 'host_in_this_pool') - def test_check_can_live_migrate_destination_with_block_migration( - self, - mock_same_pool): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - fake_instance = objects.Instance(host="fake_host") - - self.stub_out('nova.virt.xenapi.vm_utils.safe_find_sr', - lambda _x: 'asdf') - with mock.patch.object(self.conn._vmops._session, "host_ref") as \ - fake_host_ref, mock.patch.object( - self.conn._vmops, '_get_network_ref') as \ - fake_get_network_ref, mock.patch.object( - self.conn._vmops, '_get_host_opaque_ref'): - fake_host_ref.return_value = 'fake_host_ref' - fake_get_network_ref.return_value = 'fake_network_ref' - expected = {'block_migration': True, - 'is_volume_backed': False, - 'migrate_send_data': {'value': 'fake_migrate_data'}, - 'destination_sr_ref': 'asdf', - 'vif_uuid_map': {'': 'fake_network_ref'}} - result = self.conn.check_can_live_migrate_destination( - self.context, - fake_instance, - {}, {}, - True, False) - result.is_volume_backed = False - self.assertEqual(expected, - result.obj_to_primitive()['nova_object.data']) - - def test_check_live_migrate_destination_verifies_ip(self): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - fake_instance = objects.Instance(host="fake_host") - - for pif_ref in xenapi_fake.get_all('PIF'): - pif_rec = xenapi_fake.get_record('PIF', pif_ref) - pif_rec['IP'] = '' - pif_rec['IPv6'] = '' - - self.stub_out('nova.virt.xenapi.vm_utils.safe_find_sr', - lambda _x: 'asdf') - - self.assertRaises(exception.MigrationError, - self.conn.check_can_live_migrate_destination, - self.context, fake_instance, - {}, {}, - True, False) - - def test_check_can_live_migrate_destination_block_migration_fails(self): - - fake_instance = objects.Instance(host="fake_host") - - stubs.stubout_session(self, stubs.FakeSessionForFailedMigrateTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self.assertRaises(exception.MigrationError, - self.conn.check_can_live_migrate_destination, - self.context, fake_instance, - {}, {}, - True, False) - - def _add_default_live_migrate_stubs(self, conn): - @classmethod - def fake_generate_vdi_map(cls, destination_sr_ref, _vm_ref): - pass - - @classmethod - def fake_get_iscsi_srs(cls, destination_sr_ref, _vm_ref): - return [] - - @classmethod - def fake_get_vm_opaque_ref(cls, instance): - return "fake_vm" - - def fake_lookup_kernel_ramdisk(session, vm): - return ("fake_PV_kernel", "fake_PV_ramdisk") - - @classmethod - def fake_generate_vif_map(cls, vif_uuid_map): - return {'vif_ref1': 'dest_net_ref'} - - self.stub_out('nova.virt.xenapi.vmops.VMOps._generate_vdi_map', - fake_generate_vdi_map) - self.stub_out('nova.virt.xenapi.vmops.VMOps._get_iscsi_srs', - fake_get_iscsi_srs) - self.stub_out('nova.virt.xenapi.vmops.VMOps._get_vm_opaque_ref', - fake_get_vm_opaque_ref) - self.stub_out('nova.virt.xenapi.vm_utils.lookup_kernel_ramdisk', - fake_lookup_kernel_ramdisk) - self.stub_out('nova.virt.xenapi.vmops.VMOps._generate_vif_network_map', - fake_generate_vif_map) - - def test_check_can_live_migrate_source_with_block_migrate(self): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - dest_check_data = objects.XenapiLiveMigrateData( - block_migration=True, is_volume_backed=False, - destination_sr_ref=None, migrate_send_data={'key': 'value'}) - result = self.conn.check_can_live_migrate_source(self.context, - {'host': 'host'}, - dest_check_data) - self.assertEqual(dest_check_data, result) - - def test_check_can_live_migrate_source_with_block_migrate_iscsi(self): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - dest_check_data = objects.XenapiLiveMigrateData( - block_migration=True, - is_volume_backed=True, - destination_sr_ref=None, - migrate_send_data={'key': 'value'}) - result = self.conn.check_can_live_migrate_source(self.context, - {'host': 'host'}, - dest_check_data) - self.assertEqual(dest_check_data, result) - - @mock.patch.object(session.XenAPISession, 'is_xsm_sr_check_relaxed', - return_value=False) - def test_check_can_live_migrate_source_with_block_iscsi_fails( - self, mock_is_xsm_sr_check_relaxed): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - with mock.patch.object(vmops.VMOps, '_get_iscsi_srs', - return_value=['sr_ref']): - self.assertRaises(exception.MigrationError, - self.conn.check_can_live_migrate_source, - self.context, {'host': 'host'}, - {}) - mock_is_xsm_sr_check_relaxed.assert_called_once_with() - - def test_check_can_live_migrate_source_with_block_migrate_fails(self): - stubs.stubout_session(self, stubs.FakeSessionForFailedMigrateTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - dest_check_data = objects.XenapiLiveMigrateData( - block_migration=True, is_volume_backed=True, - migrate_send_data={'key': 'value'}, destination_sr_ref=None) - self.assertRaises(exception.MigrationError, - self.conn.check_can_live_migrate_source, - self.context, - {'host': 'host'}, - dest_check_data) - - @mock.patch.object(vm_utils, 'host_in_this_pool') - def test_check_can_live_migrate_works(self, - mock_host_in_this_pool): - # The dest host is in the same pool with the src host, do no block - # live migrate - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - mock_host_in_this_pool.side_effect = [True, True] - with mock.patch.object(self.conn._vmops, "_get_host_opaque_ref") as \ - fake_get_host_opaque_ref, \ - mock.patch.object(self.conn._vmops, '_get_network_ref') as \ - fake_get_network_ref, \ - mock.patch.object(self.conn._vmops._session, 'get_rec') as \ - fake_get_rec: - fake_host_ref = 'fake_host_ref' - fake_get_host_opaque_ref.return_value = fake_host_ref - fake_network_ref = 'fake_network_ref' - fake_get_network_ref.return_value = fake_network_ref - fake_get_rec.return_value = {'shared': True} - fake_host_name = 'fake_host' - instance = objects.Instance(host=fake_host_name) - - # Set block_migration to None to enable pool check, then do pooled - # live migrate - dest_check_data = self.conn.check_can_live_migrate_destination( - self.context, instance, 'fake_src_compute_info', - 'fake_dst_compute_info', None, None) - self.assertFalse(dest_check_data.block_migration) - self.assertEqual(dest_check_data.vif_uuid_map, - {'': fake_network_ref}) - fake_get_host_opaque_ref.assert_called_once_with(fake_host_name) - mock_host_in_this_pool.assert_called_once_with( - self.conn._vmops._session, fake_host_ref) - fake_get_network_ref.assert_called_once() - - @mock.patch.object(vm_utils, 'host_in_this_pool') - def test_check_can_live_migrate_fails(self, mock_host_in_this_pool): - # Caller asks for no block live migrate while the dest host is not in - # the same pool with the src host, raise exception - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - mock_host_in_this_pool.return_value = False - with mock.patch.object(self.conn._vmops, "_get_host_opaque_ref") as \ - fake_get_host_opaque_ref, \ - mock.patch.object(self.conn._vmops, '_get_network_ref') as \ - fake_get_network_ref: - fake_host_ref = 'fake_host_ref' - fake_get_host_opaque_ref.return_value = fake_host_ref - fake_network_ref = 'fake_network_ref' - fake_get_network_ref.return_value = fake_network_ref - fake_host_name = 'fake_host' - instance = objects.Instance(host=fake_host_name) - - # Set block_migration to False to do pooled live migrate - self.assertRaises(exception.MigrationPreCheckError, - self.conn.check_can_live_migrate_destination, - self.context, instance, 'fake_src_compute_info', - 'fake_dst_compute_info', False, None) - - fake_get_host_opaque_ref.assert_called_once_with(fake_host_name) - mock_host_in_this_pool.assert_called_once_with( - self.conn._vmops._session, fake_host_ref) - fake_get_network_ref.assert_not_called() - - @mock.patch.object(vmops.VMOps, '_get_host_opaque_ref', - return_value='fake_host') - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref', - return_value='fake_vm') - @mock.patch.object(vm_utils, 'lookup_kernel_ramdisk', - return_value=('kernel', 'ramdisk')) - def test_live_migration(self, mock_lookup_kernel_ramdisk, - mock_get_vm_opaque_ref, mock_get_host_opaque_ref): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def post_method(context, instance, destination_hostname, - block_migration, migrate_data): - post_method.called = True - migrate_data = objects.XenapiLiveMigrateData( - destination_sr_ref="foo", - migrate_send_data={"bar": "baz"}, - block_migration=False) - - fake_instance = mock.Mock() - self.conn.live_migration(self.context, fake_instance, 'fake-dest', - post_method, None, None, migrate_data) - - self.assertTrue(post_method.called, "post_method was not called") - mock_lookup_kernel_ramdisk.assert_called_once_with( - self.conn._session, 'fake_vm') - mock_get_vm_opaque_ref.assert_called_once_with(fake_instance) - mock_get_host_opaque_ref.assert_called_once_with('fake-dest') - - @mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref', - return_value='fake_vm') - def test_live_migration_on_failure(self, mock_get_vm_opaque_ref): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - def recover_method(context, instance, destination_hostname, - migrate_data=None): - self.assertIsNotNone(migrate_data, 'migrate_data should be passed') - recover_method.called = True - migrate_data = objects.XenapiLiveMigrateData( - destination_sr_ref="foo", - migrate_send_data={"bar": "baz"}, - block_migration=False) - fake_instance = mock.Mock() - - with mock.patch.object(session.XenAPISession, 'call_xenapi', - side_effect=NotImplementedError()): - self.assertRaises(NotImplementedError, self.conn.live_migration, - self.context, fake_instance, 'fake-dest', None, - recover_method, None, migrate_data) - self.assertTrue(recover_method.called, - "recover_method was not called") - mock_get_vm_opaque_ref.assert_called_once_with(fake_instance) - - def test_live_migration_calls_post_migration(self): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - def post_method(context, instance, destination_hostname, - block_migration, migrate_data): - post_method.called = True - - # pass block_migration = True and migrate data - migrate_data = objects.XenapiLiveMigrateData( - destination_sr_ref="foo", - migrate_send_data={"bar": "baz"}, - block_migration=True) - self.conn.live_migration(self.conn, None, None, post_method, None, - True, migrate_data) - self.assertTrue(post_method.called, "post_method.called") - - @mock.patch.object(volume_utils, 'forget_sr') - def test_live_migration_block_cleans_srs(self, mock_forget_sr): - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - def post_method(context, instance, destination_hostname, - block_migration, migrate_data): - post_method.called = True - - migrate_data = objects.XenapiLiveMigrateData( - destination_sr_ref="foo", - migrate_send_data={"bar": "baz"}, - block_migration=True) - - with mock.patch.object(vmops.VMOps, '_get_iscsi_srs', - return_value=['sr_ref']): - self.conn.live_migration(self.conn, None, None, post_method, None, - True, migrate_data) - - self.assertTrue(post_method.called, "post_method was not called") - self.assertTrue(mock_forget_sr.called, "forget_sr was not called") - - def test_live_migration_with_block_migration_fails_migrate_send(self): - stubs.stubout_session(self, stubs.FakeSessionForFailedMigrateTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(self.conn) - - def recover_method(context, instance, destination_hostname, - migrate_data=None): - self.assertIsNotNone(migrate_data, 'migrate_data should be passed') - recover_method.called = True - # pass block_migration = True and migrate data - migrate_data = objects.XenapiLiveMigrateData( - destination_sr_ref='foo', - migrate_send_data={'bar': 'baz'}, - block_migration=True) - self.assertRaises(exception.MigrationError, - self.conn.live_migration, self.conn, - None, None, None, recover_method, True, migrate_data) - self.assertTrue(recover_method.called, "recover_method.called") - - def test_live_migrate_block_migration_xapi_call_parameters(self): - - fake_vdi_map = object() - - class Session(xenapi_fake.SessionBase): - def VM_migrate_send(self_, session, vmref, migrate_data, islive, - vdi_map, vif_map, options): - self.assertEqual({'SOMEDATA': 'SOMEVAL'}, migrate_data) - self.assertEqual(fake_vdi_map, vdi_map) - - stubs.stubout_session(self, Session) - - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self._add_default_live_migrate_stubs(conn) - - def fake_generate_vdi_map(self, destination_sr_ref, _vm_ref): - return fake_vdi_map - - self.stub_out('nova.virt.xenapi.vmops.VMOps._generate_vdi_map', - fake_generate_vdi_map) - - def dummy_callback(*args, **kwargs): - pass - - migrate_data = objects.XenapiLiveMigrateData( - migrate_send_data={'SOMEDATA': 'SOMEVAL'}, - destination_sr_ref='TARGET_SR_OPAQUE_REF', - block_migration=True) - conn.live_migration( - self.context, instance=dict(name='ignore'), dest=None, - post_method=dummy_callback, recover_method=dummy_callback, - block_migration="SOMEDATA", - migrate_data=migrate_data) - - @mock.patch.object(vmops.VMOps, '_get_host_opaque_ref', - return_value='fake_ref') - def test_live_migrate_pool_migration_xapi_call_parameters( - self, mock_get_host_opaque_ref): - - class Session(xenapi_fake.SessionBase): - def VM_pool_migrate(self_, session, vm_ref, host_ref, options): - self.assertEqual("fake_ref", host_ref) - self.assertEqual({"live": "true"}, options) - raise IOError() - - stubs.stubout_session(self, Session) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - self._add_default_live_migrate_stubs(conn) - - def dummy_callback(*args, **kwargs): - pass - - migrate_data = objects.XenapiLiveMigrateData( - migrate_send_data={'foo': 'bar'}, - destination_sr_ref='foo', - block_migration=False) - self.assertRaises(IOError, conn.live_migration, - self.context, instance=dict(name='ignore'), dest='fake-dest', - post_method=dummy_callback, recover_method=dummy_callback, - block_migration=False, migrate_data=migrate_data) - mock_get_host_opaque_ref.assert_called_once_with('fake-dest') - - @mock.patch.object(vm_utils, 'get_instance_vdis_for_sr') - @mock.patch.object(vm_utils, 'safe_find_sr') - def test_generate_vdi_map(self, mock_safe_find_sr, - mock_get_instance_vdis_for_sr): - stubs.stubout_session(self, xenapi_fake.SessionBase) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - vm_ref = "fake_vm_ref" - mock_safe_find_sr.return_value = 'source_sr_ref' - - mock_get_instance_vdis_for_sr.return_value = ['vdi0', 'vdi1'] - - result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref) - - self.assertEqual({"vdi0": "dest_sr_ref", - "vdi1": "dest_sr_ref"}, result) - mock_safe_find_sr.assert_called_once_with(conn._session) - mock_get_instance_vdis_for_sr.assert_called_once_with( - conn._session, vm_ref, 'source_sr_ref') - - @mock.patch.object(vmops.VMOps, "_delete_networks_and_bridges") - def test_rollback_live_migration_at_destination(self, mock_delete_network): - stubs.stubout_session(self, xenapi_fake.SessionBase) - conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - network_info = ["fake_vif1"] - with mock.patch.object(conn, "destroy") as mock_destroy: - conn.rollback_live_migration_at_destination("context", - "instance", network_info, {'block_device_mapping': []}) - self.assertFalse(mock_destroy.called) - self.assertTrue(mock_delete_network.called) - - -class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB): - def setUp(self): - super(XenAPIInjectMetadataTestCase, self).setUp() - self.flags(connection_url='http://localhost', - connection_password='test_pass', - group='xenserver') - stubs.stubout_session(self, stubs.FakeSessionForVMTests) - self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False) - - self.xenstore = dict(persist={}, ephem={}) - - self.called_fake_get_vm_opaque_ref = False - - def fake_get_vm_opaque_ref(inst, instance): - self.called_fake_get_vm_opaque_ref = True - if instance["uuid"] == "not_found": - raise exception.NotFound - self.assertEqual(instance, {'uuid': 'fake'}) - return 'vm_ref' - - def fake_add_to_param_xenstore(inst, vm_ref, key, val): - self.assertEqual(vm_ref, 'vm_ref') - self.xenstore['persist'][key] = val - - def fake_remove_from_param_xenstore(inst, vm_ref, key): - self.assertEqual(vm_ref, 'vm_ref') - if key in self.xenstore['persist']: - del self.xenstore['persist'][key] - - def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None): - self.assertEqual(instance, {'uuid': 'fake'}) - self.assertEqual(vm_ref, 'vm_ref') - self.xenstore['ephem'][path] = jsonutils.dumps(value) - - def fake_delete_from_xenstore(inst, instance, path, vm_ref=None): - self.assertEqual(instance, {'uuid': 'fake'}) - self.assertEqual(vm_ref, 'vm_ref') - if path in self.xenstore['ephem']: - del self.xenstore['ephem'][path] - - self.stub_out('nova.virt.xenapi.vmops.VMOps._get_vm_opaque_ref', - fake_get_vm_opaque_ref) - self.stub_out('nova.virt.xenapi.vmops.VMOps._add_to_param_xenstore', - fake_add_to_param_xenstore) - self.stub_out('nova.virt.xenapi.vmops.VMOps.' - '_remove_from_param_xenstore', - fake_remove_from_param_xenstore) - self.stub_out('nova.virt.xenapi.vmops.VMOps._write_to_xenstore', - fake_write_to_xenstore) - self.stub_out('nova.virt.xenapi.vmops.VMOps._delete_from_xenstore', - fake_delete_from_xenstore) - - def test_inject_instance_metadata(self): - - # Add some system_metadata to ensure it doesn't get added - # to xenstore - instance = dict(metadata=[{'key': 'a', 'value': 1}, - {'key': 'b', 'value': 2}, - {'key': 'c', 'value': 3}, - # Check xenstore key sanitizing - {'key': 'hi.there', 'value': 4}, - {'key': 'hi!t.e/e', 'value': 5}], - # Check xenstore key sanitizing - system_metadata=[{'key': 'sys_a', 'value': 1}, - {'key': 'sys_b', 'value': 2}, - {'key': 'sys_c', 'value': 3}], - uuid='fake') - self.conn._vmops._inject_instance_metadata(instance, 'vm_ref') - - self.assertEqual(self.xenstore, { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - 'vm-data/user-metadata/hi_there': '4', - 'vm-data/user-metadata/hi_t_e_e': '5', - }, - 'ephem': {}, - }) - - def test_change_instance_metadata_add(self): - # Test XenStore key sanitizing here, too. - diff = {'test.key': ['+', 4]} - instance = {'uuid': 'fake'} - self.xenstore = { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - } - - self.conn._vmops.change_instance_metadata(instance, diff) - - self.assertEqual(self.xenstore, { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - 'vm-data/user-metadata/test_key': '4', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - 'vm-data/user-metadata/test_key': '4', - }, - }) - - def test_change_instance_metadata_update(self): - diff = dict(b=['+', 4]) - instance = {'uuid': 'fake'} - self.xenstore = { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - } - - self.conn._vmops.change_instance_metadata(instance, diff) - - self.assertEqual(self.xenstore, { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '4', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '4', - 'vm-data/user-metadata/c': '3', - }, - }) - - def test_change_instance_metadata_delete(self): - diff = dict(b=['-']) - instance = {'uuid': 'fake'} - self.xenstore = { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/b': '2', - 'vm-data/user-metadata/c': '3', - }, - } - - self.conn._vmops.change_instance_metadata(instance, diff) - - self.assertEqual(self.xenstore, { - 'persist': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/c': '3', - }, - 'ephem': { - 'vm-data/user-metadata/a': '1', - 'vm-data/user-metadata/c': '3', - }, - }) - - def test_change_instance_metadata_not_found(self): - instance = {'uuid': 'not_found'} - self.conn._vmops.change_instance_metadata(instance, "fake_diff") - self.assertTrue(self.called_fake_get_vm_opaque_ref) - - -class XenAPIFakeTestCase(test.NoDBTestCase): - def test_query_matches(self): - record = {'a': '1', 'b': '2', 'c_d': '3'} - - tests = {'field "a"="1"': True, - 'field "b"="2"': True, - 'field "b"="4"': False, - 'not field "b"="4"': True, - 'field "a"="1" and field "b"="4"': False, - 'field "a"="1" or field "b"="4"': True, - 'field "c__d"="3"': True, - 'field \'b\'=\'2\'': True, - } - - for query in tests.keys(): - expected = tests[query] - fail_msg = "for test '%s'" % query - self.assertEqual(xenapi_fake._query_matches(record, query), - expected, fail_msg) - - def test_query_bad_format(self): - record = {'a': '1', 'b': '2', 'c': '3'} - - tests = ['"a"="1" or "b"="4"', - 'a=1', - ] - - for query in tests: - fail_msg = "for test '%s'" % query - self.assertFalse(xenapi_fake._query_matches(record, query), - fail_msg) diff --git a/nova/tests/unit/virt/xenapi/vm_rrd.xml b/nova/tests/unit/virt/xenapi/vm_rrd.xml deleted file mode 100644 index f9a7c8083e93..000000000000 --- a/nova/tests/unit/virt/xenapi/vm_rrd.xml +++ /dev/null @@ -1,1101 +0,0 @@ - - 0003 - 5 - 1328795567 - - cpu0 - DERIVE - 300.0000 - 0.0 - 1.0000 - 5102.8417 - 0.0110 - 0 - - - memory - GAUGE - 300.0000 - 0.0 - Infinity - 4294967296 - 10961792000.0000 - 0 - - - memory_target - GAUGE - 300.0000 - 0.0 - Infinity - 4294967296 - 10961792000.0000 - 0 - - - vif_0_tx - DERIVE - 300.0000 - -Infinity - Infinity - 1079132206 - 752.4007 - 0 - - - vif_0_rx - DERIVE - 300.0000 - -Infinity - Infinity - 1093250983 - 4837.8805 - 0 - - - vbd_xvda_write - DERIVE - 300.0000 - -Infinity - Infinity - 4552440832 - 0.0 - 0 - - - vbd_xvda_read - DERIVE - 300.0000 - -Infinity - Infinity - 1371223040 - 0.0 - 0 - - - memory_internal_free - GAUGE - 300.0000 - -Infinity - Infinity - 1415564 - 3612860.6020 - 0 - - - vbd_xvdb_write - DERIVE - 300.0000 - -Infinity - Infinity - 0.0 - 0.0 - 2 - - - vbd_xvdb_read - DERIVE - 300.0000 - -Infinity - Infinity - 0.0 - 0.0 - 2 - - - vif_2_tx - DERIVE - 300.0000 - -Infinity - Infinity - 0.0 - 0.0 - 2 - - - vif_2_rx - DERIVE - 300.0000 - -Infinity - Infinity - 0.0 - 0.0 - 2 - - - AVERAGE - 1 - - 0.5000 - - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - - - 0.0259 - 4294967296.0000 - 4294967296.0000 - 270.6642 - 1968.1381 - 0.0 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0042 - 4294967296.0000 - 4294967296.0000 - 258.6530 - 1890.5522 - 565.3453 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0043 - 4294967296.0000 - 4294967296.0000 - 249.1120 - 1778.2501 - 817.5985 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0039 - 4294967296.0000 - 4294967296.0000 - 270.5131 - 1806.3336 - 9811.4443 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0041 - 4294967296.0000 - 4294967296.0000 - 264.3683 - 1952.4054 - 4370.4121 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0034 - 4294967296.0000 - 4294967296.0000 - 251.6331 - 1958.8002 - 0.0 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0042 - 4294967296.0000 - 4294967296.0000 - 274.5222 - 2067.5947 - 0.0 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0046 - 4294967296.0000 - 4294967296.0000 - 260.9790 - 2042.7045 - 1671.6940 - 0.0 - 1433552.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0163 - 4294967296.0000 - 4294967296.0000 - 249.0992 - 1845.3728 - 4119.4312 - 0.0 - 1431698.1250 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0098 - 4294967296.0000 - 4294967296.0000 - 273.9898 - 1879.1331 - 5459.4102 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0043 - 4294967296.0000 - 4294967296.0000 - 261.3513 - 2335.3000 - 6837.4907 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0793 - 4294967296.0000 - 4294967296.0000 - 249.2620 - 2092.4504 - 2391.9744 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0406 - 4294967296.0000 - 4294967296.0000 - 270.0746 - 1859.9802 - 0.0 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0043 - 4294967296.0000 - 4294967296.0000 - 263.4259 - 2010.8950 - 550.1484 - 0.0 - 1430824.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0565 - 4294967296.0000 - 4294967296.0000 - 29891.2227 - 26210.6699 - 3213.4324 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0645 - 4294967296.0000 - 4294967296.0000 - 31501.1562 - 29642.1641 - 400.9566 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0381 - 4294967296.0000 - 4294967296.0000 - 17350.7676 - 20748.6133 - 1247.4755 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0212 - 4294967296.0000 - 4294967296.0000 - 11981.0918 - 12866.9775 - 5774.9497 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0045 - 4294967296.0000 - 4294967296.0000 - 249.0901 - 1898.6758 - 4446.3750 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0614 - 4294967296.0000 - 4294967296.0000 - 249.0959 - 2255.1912 - 0.0 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0609 - 4294967296.0000 - 4294967296.0000 - 253.1091 - 2099.0601 - 1230.0925 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0047 - 4294967296.0000 - 4294967296.0000 - 268.6620 - 1759.5667 - 2861.2107 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0100 - 4294967296.0000 - 4294967296.0000 - 292.2647 - 1828.5435 - 3270.3474 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0093 - 4294967296.0000 - 4294967296.0000 - 303.5810 - 1932.1176 - 4485.4355 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0038 - 4294967296.0000 - 4294967296.0000 - 291.6633 - 1842.4425 - 2898.5137 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0042 - 4294967296.0000 - 4294967296.0000 - 287.4134 - 1816.0144 - 0.0 - 0.0 - 1415564.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - - - AVERAGE - 12 - - 0.5000 - - - - 0.0 - 0.0 - 0.0150 - 0 - - - 0.0 - 0.0 - 3221225472.0000 - 0 - - - 0.0 - 0.0 - 3221225472.0000 - 0 - - - 0.0 - 0.0 - 1181.3309 - 0 - - - 0.0 - 0.0 - 2358.2158 - 0 - - - 0.0 - 0.0 - 2080.5770 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 1061673.0000 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - - - 0.0130 - 4294967296.0000 - 4294967296.0000 - 261.6000 - 1990.6442 - 1432.2385 - 0.0 - 1441908.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0172 - 4294967296.0000 - 4294967296.0000 - 318.8885 - 1979.7030 - 1724.9528 - 0.0 - 1441912.7500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0483 - 4294967296.0000 - 4294967296.0000 - 3108.1233 - 4815.9639 - 4962.0503 - 68.2667 - 1441916.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0229 - 4294967296.0000 - 4294967296.0000 - 1944.2039 - 3757.9177 - 10861.6670 - 0.0 - 1439546.7500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0639 - 4294967296.0000 - 4294967296.0000 - 44504.8789 - 34745.1523 - 9571.1455 - 0.0 - 1437892.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.2945 - 4294967296.0000 - 4294967296.0000 - 79219.1641 - 102827.0781 - 438999.3438 - 0.0 - 1415337.7500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.1219 - 4294967296.0000 - 4294967296.0000 - 61093.7109 - 49836.3164 - 8734.3730 - 0.0 - 1399324.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0151 - 4294967296.0000 - 4294967296.0000 - 48.3914 - 1922.5935 - 2251.4346 - 0.0 - 1421237.1250 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.3162 - 4294967296.0000 - 4294967296.0000 - 80667.4922 - 53950.0430 - 416858.5000 - 0.0 - 1437032.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - - - AVERAGE - 720 - - 0.5000 - - - - 0.0 - 0.0 - 0.0848 - 0 - - - 0.0 - 0.0 - 3775992081.0667 - 0 - - - 0.0 - 0.0 - 3775992081.0667 - 0 - - - 0.0 - 0.0 - 16179.3166 - 0 - - - 0.0 - 0.0 - 13379.7997 - 0 - - - 0.0 - 0.0 - 109091.4636 - 0 - - - 0.0 - 0.0 - 323.1289 - 0 - - - 0.0 - 0.0 - 1259057.5294 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - - - 0.1458 - 4294967296.0000 - 4294967296.0000 - 6454.3096 - 5327.6709 - 116520.9609 - 738.4178 - 2653538.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0971 - 4294967296.0000 - 4294967296.0000 - 10180.4941 - 10825.1777 - 98749.3438 - 523.3778 - 2381725.7500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0683 - 4294967296.0000 - 4294967296.0000 - 23183.2695 - 19607.6523 - 93946.5703 - 807.8222 - 2143269.2500 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0352 - 4294967296.0000 - 4294967296.0000 - 7552.5708 - 7320.5391 - 30907.9453 - 150384.6406 - 1583336.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - - - AVERAGE - 17280 - - 0.5000 - - - - 0.0 - 0.0 - 0.0187 - 0 - - - 0.0 - 0.0 - 2483773622.0445 - 0 - - - 0.0 - 0.0 - 2483773622.0445 - 0 - - - 0.0 - 0.0 - 2648.2715 - 0 - - - 0.0 - 0.0 - 3002.4238 - 0 - - - 0.0 - 0.0 - 19129.3156 - 0 - - - 0.0 - 0.0 - 6365.7244 - 0 - - - 0.0 - 0.0 - 1468863.7753 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - 0.0 - 0.0 - 0.0 - 0 - - - - - 0.0579 - 4294967296.0000 - 4294967296.0000 - 6291.0151 - 7489.2583 - 70915.3750 - 50.1570 - 613674.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0541 - 4294967296.0000 - 4294967296.0000 - 10406.3682 - 10638.9365 - 32972.1250 - 7.6800 - 647683.5625 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0189 - 4294967296.0000 - 4294967296.0000 - 207.0768 - 2145.3167 - 1685.8905 - 0.0 - 599934.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0202 - 4294967296.0000 - 4294967296.0000 - 71.0270 - 2046.6521 - 6703.9795 - 182.0444 - 595963.8750 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0661 - 4294967296.0000 - 4294967296.0000 - 8520.3213 - 8488.0664 - 52978.7930 - 7.3956 - 727540.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0219 - 4294967296.0000 - 4294967296.0000 - 40443.0117 - 20702.5996 - -1377536.8750 - 36990.5898 - 1823778.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - 0.0265 - 4294971904.0000 - 4294754304.0000 - 6384.6367 - 6513.4951 - 22415.6348 - 2486.9690 - 3072170.0000 - 0.0 - 0.0 - 0.0 - 0.0 - - - - diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py deleted file mode 100644 index 2e9a5d372d8f..000000000000 --- a/nova/virt/xenapi/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI -================================================================== -""" -from nova.virt.xenapi import driver - -XenAPIDriver = driver.XenAPIDriver diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py deleted file mode 100644 index e763cc2cfd7a..000000000000 --- a/nova/virt/xenapi/agent.py +++ /dev/null @@ -1,442 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright 2010-2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import binascii -from distutils import version -import os -import sys -import time - -from os_xenapi.client import host_agent -from os_xenapi.client import XenAPI -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_serialization import base64 -from oslo_serialization import jsonutils -from oslo_utils import encodeutils -from oslo_utils import strutils -from oslo_utils import uuidutils - -from nova.api.metadata import password -from nova.compute import utils as compute_utils -import nova.conf -from nova import context -from nova import crypto -from nova import exception -from nova.i18n import _ -from nova import objects -from nova import utils - - -USE_AGENT_KEY = "xenapi_use_agent" -USE_AGENT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + USE_AGENT_KEY -SKIP_SSH_KEY = "xenapi_skip_agent_inject_ssh" -SKIP_SSH_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + SKIP_SSH_KEY -SKIP_FILES_AT_BOOT_KEY = "xenapi_skip_agent_inject_files_at_boot" -SKIP_FILES_AT_BOOT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX \ - + SKIP_FILES_AT_BOOT_KEY - -LOG = logging.getLogger(__name__) -CONF = nova.conf.CONF - - -def _call_agent(session, instance, vm_ref, method, addl_args=None, - timeout=None, success_codes=None): - """Abstracts out the interaction with the agent xenapi plugin.""" - if addl_args is None: - addl_args = {} - if timeout is None: - timeout = CONF.xenserver.agent_timeout - if success_codes is None: - success_codes = ['0'] - - # always fetch domid because VM may have rebooted - dom_id = session.VM.get_domid(vm_ref) - uuid = uuidutils.generate_uuid() - args = { - 'id': uuid, - 'dom_id': str(dom_id), - 'timeout': str(timeout), - } - - try: - ret = method(session, uuid, dom_id, timeout, **addl_args) - except XenAPI.Failure as e: - err_msg = e.details[-1].splitlines()[-1] - if 'TIMEOUT:' in err_msg: - LOG.error('TIMEOUT: The call to %(method)s timed out. ' - 'args=%(args)r', - {'method': method, 'args': args}, instance=instance) - raise exception.AgentTimeout(method=method.__name__) - elif 'REBOOT:' in err_msg: - LOG.debug('REBOOT: The call to %(method)s detected a reboot. ' - 'args=%(args)r', - {'method': method, 'args': args}, instance=instance) - _wait_for_new_dom_id(session, vm_ref, dom_id, method) - return _call_agent(session, instance, vm_ref, method, - addl_args, timeout, success_codes) - elif 'NOT IMPLEMENTED:' in err_msg: - LOG.error('NOT IMPLEMENTED: The call to %(method)s is not ' - 'supported by the agent. args=%(args)r', - {'method': method, 'args': args}, instance=instance) - raise exception.AgentNotImplemented(method=method.__name__) - else: - LOG.error('The call to %(method)s returned an error: %(e)s. ' - 'args=%(args)r', - {'method': method, 'args': args, 'e': e}, - instance=instance) - raise exception.AgentError(method=method.__name__) - - if not isinstance(ret, dict): - try: - ret = jsonutils.loads(ret) - except TypeError: - LOG.error('The agent call to %(method)s returned an invalid ' - 'response: %(ret)r. args=%(args)r', - {'method': method, 'ret': ret, 'args': args}, - instance=instance) - raise exception.AgentError(method=method.__name__) - - if ret['returncode'] not in success_codes: - LOG.error('The agent call to %(method)s returned ' - 'an error: %(ret)r. args=%(args)r', - {'method': method, 'ret': ret, 'args': args}, - instance=instance) - raise exception.AgentError(method=method.__name__) - - LOG.debug('The agent call to %(method)s was successful: ' - '%(ret)r. args=%(args)r', - {'method': method, 'ret': ret, 'args': args}, - instance=instance) - - # Some old versions of the Windows agent have a trailing \\r\\n - # (ie CRLF escaped) for some reason. Strip that off. - return ret['message'].replace('\\r\\n', '') - - -def _wait_for_new_dom_id(session, vm_ref, old_dom_id, method): - expiration = time.time() + CONF.xenserver.agent_timeout - while True: - dom_id = session.VM.get_domid(vm_ref) - - if dom_id and dom_id != "-1" and dom_id != old_dom_id: - LOG.debug("Found new dom_id %s", dom_id) - return - - if time.time() > expiration: - LOG.debug("Timed out waiting for new dom_id %s", dom_id) - raise exception.AgentTimeout(method=method.__name__) - - time.sleep(1) - - -def is_upgrade_required(current_version, available_version): - # NOTE(johngarbutt): agent version numbers are four part, - # so we need to use the loose version to compare them - current = version.LooseVersion(current_version) - available = version.LooseVersion(available_version) - return available > current - - -class XenAPIBasedAgent(object): - def __init__(self, session, virtapi, instance, vm_ref): - self.session = session - self.virtapi = virtapi - self.instance = instance - self.vm_ref = vm_ref - - def _add_instance_fault(self, error, exc_info): - LOG.warning("Ignoring error while configuring instance with agent: %s", - error, instance=self.instance, exc_info=True) - try: - ctxt = context.get_admin_context() - compute_utils.add_instance_fault_from_exc( - ctxt, self.instance, error, exc_info=exc_info) - except Exception: - LOG.debug("Error setting instance fault.", exc_info=True) - - def _call_agent(self, method, addl_args=None, timeout=None, - success_codes=None, ignore_errors=True): - try: - return _call_agent(self.session, self.instance, self.vm_ref, - method, addl_args, timeout, success_codes) - except exception.AgentError as error: - if ignore_errors: - self._add_instance_fault(error, sys.exc_info()) - else: - raise - - def get_version(self): - LOG.debug('Querying agent version', instance=self.instance) - - # The agent can be slow to start for a variety of reasons. On Windows, - # it will generally perform a setup process on first boot that can - # take a couple of minutes and then reboot. On Linux, the system can - # also take a while to boot. - expiration = time.time() + CONF.xenserver.agent_version_timeout - while True: - try: - # NOTE(johngarbutt): we can't use the xapi plugin - # timeout, because the domid may change when - # the server is rebooted - return self._call_agent(host_agent.version, - ignore_errors=False) - except exception.AgentError as error: - if time.time() > expiration: - self._add_instance_fault(error, sys.exc_info()) - return - - def _get_expected_build(self): - ctxt = context.get_admin_context() - agent_build = objects.Agent.get_by_triple( - ctxt, 'xen', self.instance['os_type'], - self.instance['architecture']) - if agent_build: - LOG.debug('Latest agent build for %(hypervisor)s/%(os)s' - '/%(architecture)s is %(version)s', { - 'hypervisor': agent_build.hypervisor, - 'os': agent_build.os, - 'architecture': agent_build.architecture, - 'version': agent_build.version}) - else: - LOG.debug('No agent build found for %(hypervisor)s/%(os)s' - '/%(architecture)s', { - 'hypervisor': 'xen', - 'os': self.instance['os_type'], - 'architecture': self.instance['architecture']}) - return agent_build - - def update_if_needed(self, version): - agent_build = self._get_expected_build() - if version and agent_build and \ - is_upgrade_required(version, agent_build.version): - LOG.debug('Updating agent to %s', agent_build.version, - instance=self.instance) - self._perform_update(agent_build) - else: - LOG.debug('Skipping agent update.', instance=self.instance) - - def _perform_update(self, agent_build): - args = {'url': agent_build.url, 'md5sum': agent_build.md5hash} - try: - self._call_agent(host_agent.agent_update, args) - except exception.AgentError as exc: - # Silently fail for agent upgrades - LOG.warning("Unable to update the agent due to: %(exc)s", - dict(exc=exc), instance=self.instance) - - def _exchange_key_with_agent(self): - dh = SimpleDH() - args = {'pub': str(dh.get_public())} - resp = self._call_agent(host_agent.key_init, args, - success_codes=['D0'], ignore_errors=False) - agent_pub = int(resp) - dh.compute_shared(agent_pub) - return dh - - def _save_instance_password_if_sshkey_present(self, new_pass): - sshkey = self.instance.get('key_data') - if sshkey and sshkey.startswith("ssh-rsa"): - ctxt = context.get_admin_context() - enc = crypto.ssh_encrypt_text(sshkey, new_pass) - self.instance.system_metadata.update( - password.convert_password(ctxt, base64.encode_as_text(enc))) - self.instance.save() - - def set_admin_password(self, new_pass): - """Set the root/admin password on the VM instance. - - This is done via an agent running on the VM. Communication between nova - and the agent is done via writing xenstore records. Since communication - is done over the XenAPI RPC calls, we need to encrypt the password. - We're using a simple Diffie-Hellman class instead of a more advanced - library (such as M2Crypto) for compatibility with the agent code. - """ - LOG.debug('Setting admin password', instance=self.instance) - - try: - dh = self._exchange_key_with_agent() - except exception.AgentError as error: - self._add_instance_fault(error, sys.exc_info()) - return - - # Some old versions of Linux and Windows agent expect trailing \n - # on password to work correctly. - enc_pass = dh.encrypt(new_pass + '\n') - - args = {'enc_pass': enc_pass} - self._call_agent(host_agent.password, args) - self._save_instance_password_if_sshkey_present(new_pass) - - def inject_ssh_key(self): - sshkey = self.instance.get('key_data') - if not sshkey: - return - - if self.instance['os_type'] == 'windows': - LOG.debug("Skipping setting of ssh key for Windows.", - instance=self.instance) - return - - if self._skip_ssh_key_inject(): - LOG.debug("Skipping agent ssh key injection for this image.", - instance=self.instance) - return - - sshkey = str(sshkey) - keyfile = '/root/.ssh/authorized_keys' - key_data = ''.join([ - '\n', - '# The following ssh key was injected by Nova', - '\n', - sshkey.strip(), - '\n', - ]) - return self.inject_file(keyfile, key_data) - - def inject_files(self, injected_files): - if self._skip_inject_files_at_boot(): - LOG.debug("Skipping agent file injection for this image.", - instance=self.instance) - else: - for path, contents in injected_files: - self.inject_file(path, contents) - - def inject_file(self, path, contents): - LOG.debug('Injecting file path: %r', path, instance=self.instance) - - # Files/paths must be base64-encoded for transmission to agent - b64_path = base64.encode_as_bytes(path) - b64_contents = base64.encode_as_bytes(contents) - - args = {'b64_path': b64_path, 'b64_contents': b64_contents} - return self._call_agent(host_agent.inject_file, args) - - def resetnetwork(self): - LOG.debug('Resetting network', instance=self.instance) - - # NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success - return self._call_agent(host_agent.reset_network, - timeout=CONF.xenserver.agent_resetnetwork_timeout, - success_codes=['0', '500']) - - def _skip_ssh_key_inject(self): - return self._get_sys_meta_key(SKIP_SSH_SM_KEY) - - def _skip_inject_files_at_boot(self): - return self._get_sys_meta_key(SKIP_FILES_AT_BOOT_SM_KEY) - - def _get_sys_meta_key(self, key): - sys_meta = utils.instance_sys_meta(self.instance) - raw_value = sys_meta.get(key, 'False') - return strutils.bool_from_string(raw_value, strict=False) - - -def find_guest_agent(base_dir): - """tries to locate a guest agent at the path - specified by agent_rel_path - """ - if CONF.xenserver.disable_agent: - return False - - agent_rel_path = CONF.xenserver.agent_path - agent_path = os.path.join(base_dir, agent_rel_path) - if os.path.isfile(agent_path): - # The presence of the guest agent - # file indicates that this instance can - # reconfigure the network from xenstore data, - # so manipulation of files in /etc is not - # required - LOG.info('XenServer tools installed in this ' - 'image are capable of network injection. ' - 'Networking files will not be manipulated') - return True - xe_daemon_filename = os.path.join(base_dir, - 'usr', 'sbin', 'xe-daemon') - if os.path.isfile(xe_daemon_filename): - LOG.info('XenServer tools are present ' - 'in this image but are not capable ' - 'of network injection') - else: - LOG.info('XenServer tools are not installed in this image') - return False - - -def should_use_agent(instance): - sys_meta = utils.instance_sys_meta(instance) - if USE_AGENT_SM_KEY not in sys_meta: - return CONF.xenserver.use_agent_default - else: - use_agent_raw = sys_meta[USE_AGENT_SM_KEY] - try: - return strutils.bool_from_string(use_agent_raw, strict=True) - except ValueError: - LOG.warning("Invalid 'agent_present' value. " - "Falling back to the default.", - instance=instance) - return CONF.xenserver.use_agent_default - - -class SimpleDH(object): - """This class wraps all the functionality needed to implement - basic Diffie-Hellman-Merkle key exchange in Python. It features - intelligent defaults for the prime and base numbers needed for the - calculation, while allowing you to supply your own. It requires that - the openssl binary be installed on the system on which this is run, - as it uses that to handle the encryption and decryption. If openssl - is not available, a RuntimeError will be raised. - """ - def __init__(self): - self._prime = 162259276829213363391578010288127 - self._base = 5 - self._public = None - self._shared = None - self.generate_private() - - def generate_private(self): - self._private = int(binascii.hexlify(os.urandom(10)), 16) - return self._private - - def get_public(self): - self._public = pow(self._base, self._private, self._prime) - return self._public - - def compute_shared(self, other): - self._shared = pow(other, self._private, self._prime) - return self._shared - - def _run_ssl(self, text, decrypt=False): - cmd = ['openssl', 'aes-128-cbc', '-A', '-a', '-pass', - 'pass:%s' % self._shared, '-nosalt'] - if decrypt: - cmd.append('-d') - try: - out, err = processutils.execute( - *cmd, - process_input=encodeutils.safe_encode(text), - check_exit_code=True) - if err: - LOG.warning("OpenSSL stderr: %s", err) - return out - except processutils.ProcessExecutionError as e: - raise RuntimeError( - _('OpenSSL errored with exit code %(exit_code)d: %(stderr)s') % - {'exit_code': e.exit_code, 'stderr': e.stderr}) - - def encrypt(self, text): - return self._run_ssl(text).strip('\n') - - def decrypt(self, text): - return self._run_ssl(text, decrypt=True) diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py deleted file mode 100644 index e61a4fdda726..000000000000 --- a/nova/virt/xenapi/driver.py +++ /dev/null @@ -1,861 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright 2010 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -A driver for XenServer or Xen Cloud Platform. - -**Variable Naming Scheme** - -- suffix "_ref" for opaque references -- suffix "_uuid" for UUIDs -- suffix "_rec" for record objects -""" - -import os_resource_classes as orc -from os_xenapi.client import session -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import units -from oslo_utils import versionutils -import six.moves.urllib.parse as urlparse - -import nova.conf -from nova import context as nova_context -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.virt import driver -from nova.virt.xenapi import host -from nova.virt.xenapi import pool -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import vmops -from nova.virt.xenapi import volumeops - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -def invalid_option(option_name, recommended_value): - LOG.exception('Current value of ' - 'CONF.xenserver.%(option)s option incompatible with ' - 'CONF.xenserver.independent_compute=True. ' - 'Consider using "%(recommended)s"', - {'option': option_name, - 'recommended': recommended_value}) - raise exception.NotSupportedWithOption( - operation=option_name, - option='CONF.xenserver.independent_compute') - - -class XenAPIDriver(driver.ComputeDriver): - """A connection to XenServer or Xen Cloud Platform.""" - capabilities = { - "has_imagecache": False, - "supports_evacuate": False, - "supports_migrate_to_same_host": False, - "supports_attach_interface": True, - "supports_device_tagging": True, - "supports_multiattach": False, - "supports_trusted_certs": False, - "supports_pcpus": False, - "supports_accelerators": False, - - # Image type support flags - "supports_image_type_aki": False, - "supports_image_type_ami": False, - "supports_image_type_ari": False, - "supports_image_type_iso": False, - "supports_image_type_qcow2": False, - "supports_image_type_raw": True, - "supports_image_type_vdi": True, - "supports_image_type_vhd": True, - "supports_image_type_vhdx": False, - "supports_image_type_vmdk": False, - "supports_image_type_ploop": False, - } - - def __init__(self, virtapi, read_only=False): - super(XenAPIDriver, self).__init__(virtapi) - - url = CONF.xenserver.connection_url - username = CONF.xenserver.connection_username - password = CONF.xenserver.connection_password - if not url or password is None: - raise Exception(_('Must specify connection_url, ' - 'connection_username (optionally), and ' - 'connection_password to use ' - 'compute_driver=xenapi.XenAPIDriver')) - - self._session = session.XenAPISession(url, username, password, - originator="nova") - self._volumeops = volumeops.VolumeOps(self._session) - self._host_state = None - self._host = host.Host(self._session, self.virtapi) - self._vmops = vmops.VMOps(self._session, self.virtapi) - self._initiator = None - self._hypervisor_hostname = None - self._pool = pool.ResourcePool(self._session, self.virtapi) - - @property - def host_state(self): - if not self._host_state: - self._host_state = host.HostState(self._session) - return self._host_state - - def init_host(self, host): - LOG.warning('The xenapi driver is deprecated and may be removed in a ' - 'future release. The driver is not tested by the ' - 'OpenStack project nor does it have clear maintainer(s) ' - 'and thus its quality can not be ensured. If you are ' - 'using the driver in production please let us know in ' - 'freenode IRC and/or the openstack-discuss mailing list.') - - if CONF.xenserver.independent_compute: - # Check various options are in the correct state: - if CONF.xenserver.check_host: - invalid_option('CONF.xenserver.check_host', False) - if CONF.flat_injected: - invalid_option('CONF.flat_injected', False) - if CONF.default_ephemeral_format and \ - CONF.default_ephemeral_format != 'ext3': - invalid_option('CONF.default_ephemeral_format', 'ext3') - - if CONF.xenserver.check_host: - vm_utils.ensure_correct_host(self._session) - - if not CONF.xenserver.independent_compute: - try: - vm_utils.cleanup_attached_vdis(self._session) - except Exception: - LOG.exception('Failure while cleaning up attached VDIs') - - def instance_exists(self, instance): - """Checks existence of an instance on the host. - - :param instance: The instance to lookup - - Returns True if supplied instance exists on the host, False otherwise. - - NOTE(belliott): This is an override of the base method for - efficiency. - """ - return self._vmops.instance_exists(instance.name) - - def list_instances(self): - """List VM instances.""" - return self._vmops.list_instances() - - def list_instance_uuids(self): - """Get the list of nova instance uuids for VMs found on the - hypervisor. - """ - return self._vmops.list_instance_uuids() - - def _is_vgpu_allocated(self, allocations): - # check if allocated vGPUs - if not allocations: - # If no allocations, there is no vGPU request. - return False - RC_VGPU = orc.VGPU - for rp in allocations: - res = allocations[rp]['resources'] - if res and RC_VGPU in res and res[RC_VGPU] > 0: - return True - return False - - def _get_vgpu_info(self, allocations): - """Get vGPU info basing on the allocations. - - :param allocations: Information about resources allocated to the - instance via placement, of the form returned by - SchedulerReportClient.get_allocations_for_consumer. - :returns: Dictionary describing vGPU info if any vGPU allocated; - None otherwise. - :raises: exception.ComputeResourcesUnavailable if there is no - available vGPUs. - """ - if not self._is_vgpu_allocated(allocations): - return None - - # NOTE(jianghuaw): At the moment, we associate all vGPUs resource to - # the compute node regardless which GPU group the vGPUs belong to, so - # we need search all GPU groups until we got one group which has - # remaining capacity to supply one vGPU. Once we switch to the - # nested resource providers, the allocations will contain the resource - # provider which represents a particular GPU group. It's able to get - # the GPU group and vGPU type directly by using the resource provider's - # uuid. Then we can consider moving this function to vmops, as there is - # no need to query host stats to get all GPU groups. - host_stats = self.host_state.get_host_stats(refresh=True) - vgpu_stats = host_stats['vgpu_stats'] - for grp_uuid in vgpu_stats: - if vgpu_stats[grp_uuid]['remaining'] > 0: - # NOTE(jianghuaw): As XenServer only supports single vGPU per - # VM, we've restricted the inventory data having `max_unit` as - # 1. If it reached here, surely only one GPU is allocated. - # So just return the GPU group uuid and vGPU type uuid once - # we got one group which still has remaining vGPUs. - return dict(gpu_grp_uuid=grp_uuid, - vgpu_type_uuid=vgpu_stats[grp_uuid]['uuid']) - # No remaining vGPU available: e.g. the vGPU resource has been used by - # other instance or the vGPU has been changed to be disabled. - raise exception.ComputeResourcesUnavailable( - reason='vGPU resource is not available') - - def spawn(self, context, instance, image_meta, injected_files, - admin_password, allocations, network_info=None, - block_device_info=None, power_on=True, accel_info=None): - """Create VM instance.""" - vgpu_info = self._get_vgpu_info(allocations) - self._vmops.spawn(context, instance, image_meta, injected_files, - admin_password, network_info, block_device_info, - vgpu_info) - - def confirm_migration(self, context, migration, instance, network_info): - """Confirms a resize, destroying the source VM.""" - self._vmops.confirm_migration(migration, instance, network_info) - - def finish_revert_migration(self, context, instance, network_info, - migration, block_device_info=None, - power_on=True): - """Finish reverting a resize.""" - # NOTE(vish): Xen currently does not use network info. - self._vmops.finish_revert_migration(context, instance, - block_device_info, - power_on) - - def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance, - allocations, block_device_info=None, power_on=True): - """Completes a resize, turning on the migrated instance.""" - self._vmops.finish_migration(context, migration, instance, disk_info, - network_info, image_meta, resize_instance, - block_device_info, power_on) - - def snapshot(self, context, instance, image_id, update_task_state): - """Create snapshot from a running VM instance.""" - self._vmops.snapshot(context, instance, image_id, update_task_state) - - def post_interrupted_snapshot_cleanup(self, context, instance): - """Cleans up any resources left after a failed snapshot.""" - self._vmops.post_interrupted_snapshot_cleanup(context, instance) - - def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None, bad_volumes_callback=None, - accel_info=None): - """Reboot VM instance.""" - self._vmops.reboot(instance, reboot_type, - bad_volumes_callback=bad_volumes_callback) - - def set_admin_password(self, instance, new_pass): - """Set the root/admin password on the VM instance.""" - self._vmops.set_admin_password(instance, new_pass) - - def change_instance_metadata(self, context, instance, diff): - """Apply a diff to the instance metadata.""" - self._vmops.change_instance_metadata(instance, diff) - - def destroy(self, context, instance, network_info, block_device_info=None, - destroy_disks=True): - """Destroy VM instance.""" - self._vmops.destroy(instance, network_info, block_device_info, - destroy_disks) - - def cleanup(self, context, instance, network_info, block_device_info=None, - destroy_disks=True, migrate_data=None, destroy_vifs=True): - """Cleanup after instance being destroyed by Hypervisor.""" - pass - - def pause(self, instance): - """Pause VM instance.""" - self._vmops.pause(instance) - - def unpause(self, instance): - """Unpause paused VM instance.""" - self._vmops.unpause(instance) - - def migrate_disk_and_power_off(self, context, instance, dest, - flavor, network_info, - block_device_info=None, - timeout=0, retry_interval=0): - """Transfers the VHD of a running instance to another host, then shuts - off the instance copies over the COW disk - """ - # NOTE(vish): Xen currently does not use network info. - # TODO(PhilDay): Add support for timeout (clean shutdown) - return self._vmops.migrate_disk_and_power_off(context, instance, - dest, flavor, block_device_info) - - def suspend(self, context, instance): - """suspend the specified instance.""" - self._vmops.suspend(instance) - - def resume(self, context, instance, network_info, block_device_info=None): - """resume the specified instance.""" - self._vmops.resume(instance) - - def rescue(self, context, instance, network_info, image_meta, - rescue_password, block_device_info): - """Rescue the specified instance.""" - self._vmops.rescue(context, instance, network_info, image_meta, - rescue_password) - - def set_bootable(self, instance, is_bootable): - """Set the ability to power on/off an instance.""" - self._vmops.set_bootable(instance, is_bootable) - - def unrescue( - self, - context: nova_context.RequestContext, - instance: 'objects.Instance', - ): - """Unrescue the specified instance.""" - self._vmops.unrescue(instance) - - def power_off(self, instance, timeout=0, retry_interval=0): - """Power off the specified instance.""" - # TODO(PhilDay): Add support for timeout (clean shutdown) - self._vmops.power_off(instance) - - def power_on(self, context, instance, network_info, - block_device_info=None, accel_info=None): - """Power on the specified instance.""" - self._vmops.power_on(instance) - - def soft_delete(self, instance): - """Soft delete the specified instance.""" - self._vmops.soft_delete(instance) - - def restore(self, instance): - """Restore the specified instance.""" - self._vmops.restore(instance) - - def poll_rebooting_instances(self, timeout, instances): - """Poll for rebooting instances.""" - self._vmops.poll_rebooting_instances(timeout, instances) - - def reset_network(self, instance): - """reset networking for specified instance.""" - self._vmops.reset_network(instance) - - def inject_network_info(self, instance, nw_info): - """inject network info for specified instance.""" - self._vmops.inject_network_info(instance, nw_info) - - def plug_vifs(self, instance, network_info): - """Plug VIFs into networks.""" - self._vmops.plug_vifs(instance, network_info) - - def unplug_vifs(self, instance, network_info): - """Unplug VIFs from networks.""" - self._vmops.unplug_vifs(instance, network_info) - - def get_info(self, instance, use_cache=True): - """Return data about VM instance.""" - return self._vmops.get_info(instance) - - def get_diagnostics(self, instance): - """Return data about VM diagnostics.""" - return self._vmops.get_diagnostics(instance) - - def get_instance_diagnostics(self, instance): - """Return data about VM diagnostics.""" - return self._vmops.get_instance_diagnostics(instance) - - def get_all_bw_counters(self, instances): - """Return bandwidth usage counters for each interface on each - running VM. - """ - - # we only care about VMs that correspond to a nova-managed - # instance: - imap = {inst['name']: inst['uuid'] for inst in instances} - bwcounters = [] - - # get a dictionary of instance names. values are dictionaries - # of mac addresses with values that are the bw counters: - # e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}} - all_counters = self._vmops.get_all_bw_counters() - for instance_name, counters in all_counters.items(): - if instance_name in imap: - # yes these are stats for a nova-managed vm - # correlate the stats with the nova instance uuid: - for vif_counter in counters.values(): - vif_counter['uuid'] = imap[instance_name] - bwcounters.append(vif_counter) - return bwcounters - - def get_console_output(self, context, instance): - """Return snapshot of console.""" - return self._vmops.get_console_output(instance) - - def get_vnc_console(self, context, instance): - """Return link to instance's VNC console.""" - return self._vmops.get_vnc_console(instance) - - def get_volume_connector(self, instance): - """Return volume connector information.""" - if not self._initiator or not self._hypervisor_hostname: - stats = self.host_state.get_host_stats(refresh=True) - try: - self._initiator = stats['host_other-config']['iscsi_iqn'] - self._hypervisor_hostname = stats['host_hostname'] - except (TypeError, KeyError) as err: - LOG.warning('Could not determine key: %s', err, - instance=instance) - self._initiator = None - return { - 'ip': self._get_block_storage_ip(), - 'initiator': self._initiator, - 'host': self._hypervisor_hostname - } - - def _get_block_storage_ip(self): - # If CONF.my_block_storage_ip is set, use it. - if CONF.my_block_storage_ip != CONF.my_ip: - return CONF.my_block_storage_ip - return self.get_host_ip_addr() - - def get_host_ip_addr(self): - xs_url = urlparse.urlparse(CONF.xenserver.connection_url) - return xs_url.netloc - - def attach_volume(self, context, connection_info, instance, mountpoint, - disk_bus=None, device_type=None, encryption=None): - """Attach volume storage to VM instance.""" - self._volumeops.attach_volume(connection_info, - instance['name'], - mountpoint) - - def detach_volume(self, context, connection_info, instance, mountpoint, - encryption=None): - """Detach volume storage from VM instance.""" - self._volumeops.detach_volume(connection_info, - instance['name'], - mountpoint) - - def get_console_pool_info(self, console_type): - xs_url = urlparse.urlparse(CONF.xenserver.connection_url) - return {'address': xs_url.netloc, - 'username': CONF.xenserver.connection_username, - 'password': CONF.xenserver.connection_password} - - def _get_vgpu_total(self, vgpu_stats): - # NOTE(jianghuaw): Now we only enable one vGPU type in one - # compute node. So normally vgpu_stats should contain only - # one GPU group. If there are multiple GPU groups, they - # must contain the same vGPU type. So just add them up. - total = 0 - for grp_id in vgpu_stats: - total += vgpu_stats[grp_id]['total'] - return total - - def update_provider_tree(self, provider_tree, nodename, allocations=None): - """Update a ProviderTree object with current resource provider and - inventory information. - - :param nova.compute.provider_tree.ProviderTree provider_tree: - A nova.compute.provider_tree.ProviderTree object representing all - the providers in the tree associated with the compute node, and any - sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE`` - trait) associated via aggregate with any of those providers (but - not *their* tree- or aggregate-associated providers), as currently - known by placement. This object is fully owned by the - update_provider_tree method, and can therefore be modified without - locking/concurrency considerations. In other words, the parameter - is passed *by reference* with the expectation that the virt driver - will modify the object. Note, however, that it may contain - providers not directly owned/controlled by the compute host. Care - must be taken not to remove or modify such providers inadvertently. - In addition, providers may be associated with traits and/or - aggregates maintained by outside agents. The - `update_provider_tree`` method must therefore also be careful only - to add/remove traits/aggregates it explicitly controls. - :param nodename: - String name of the compute node (i.e. - ComputeNode.hypervisor_hostname) for which the caller is requesting - updated provider information. Drivers may use this to help identify - the compute node provider in the ProviderTree. Drivers managing - more than one node (e.g. ironic) may also use it as a cue to - indicate which node is being processed by the caller. - :param allocations: - Dict of allocation data of the form: - { $CONSUMER_UUID: { - # The shape of each "allocations" dict below is identical - # to the return from GET /allocations/{consumer_uuid} - "allocations": { - $RP_UUID: { - "generation": $RP_GEN, - "resources": { - $RESOURCE_CLASS: $AMOUNT, - ... - }, - }, - ... - }, - "project_id": $PROJ_ID, - "user_id": $USER_ID, - "consumer_generation": $CONSUMER_GEN, - }, - ... - } - If None, and the method determines that any inventory needs to be - moved (from one provider to another and/or to a different resource - class), the ReshapeNeeded exception must be raised. Otherwise, this - dict must be edited in place to indicate the desired final state of - allocations. Drivers should *only* edit allocation records for - providers whose inventories are being affected by the reshape - operation. - :raises ReshapeNeeded: If allocations is None and any inventory needs - to be moved from one provider to another and/or to a different - resource class. - :raises: ReshapeFailed if the requested tree reshape fails for - whatever reason. - """ - host_stats = self.host_state.get_host_stats(refresh=True) - - vcpus = host_stats['host_cpu_info']['cpu_count'] - memory_mb = int(host_stats['host_memory_total'] / units.Mi) - disk_gb = int(host_stats['disk_total'] / units.Gi) - vgpus = self._get_vgpu_total(host_stats['vgpu_stats']) - # If the inventory record does not exist, the allocation_ratio - # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio - # is set, and fallback to use the initial_xxx_allocation_ratio - # otherwise. - inv = provider_tree.data(nodename).inventory - ratios = self._get_allocation_ratios(inv) - result = { - orc.VCPU: { - 'total': vcpus, - 'min_unit': 1, - 'max_unit': vcpus, - 'step_size': 1, - 'allocation_ratio': ratios[orc.VCPU], - 'reserved': CONF.reserved_host_cpus, - }, - orc.MEMORY_MB: { - 'total': memory_mb, - 'min_unit': 1, - 'max_unit': memory_mb, - 'step_size': 1, - 'allocation_ratio': ratios[orc.MEMORY_MB], - 'reserved': CONF.reserved_host_memory_mb, - }, - orc.DISK_GB: { - 'total': disk_gb, - 'min_unit': 1, - 'max_unit': disk_gb, - 'step_size': 1, - 'allocation_ratio': ratios[orc.DISK_GB], - 'reserved': self._get_reserved_host_disk_gb_from_config(), - }, - } - if vgpus > 0: - # Only create inventory for vGPU when driver can supply vGPUs. - # At the moment, XenAPI can support up to one vGPU per VM, - # so max_unit is 1. - result.update( - { - orc.VGPU: { - 'total': vgpus, - 'min_unit': 1, - 'max_unit': 1, - 'step_size': 1, - } - } - ) - provider_tree.update_inventory(nodename, result) - - def get_available_resource(self, nodename): - """Retrieve resource information. - - This method is called when nova-compute launches, and - as part of a periodic task that records the results in the DB. - - :param nodename: ignored in this driver - :returns: dictionary describing resources - - """ - host_stats = self.host_state.get_host_stats(refresh=True) - - # Updating host information - total_ram_mb = host_stats['host_memory_total'] / units.Mi - # NOTE(belliott) memory-free-computed is a value provided by XenServer - # for gauging free memory more conservatively than memory-free. - free_ram_mb = host_stats['host_memory_free_computed'] / units.Mi - total_disk_gb = host_stats['disk_total'] / units.Gi - used_disk_gb = host_stats['disk_used'] / units.Gi - allocated_disk_gb = host_stats['disk_allocated'] / units.Gi - hyper_ver = versionutils.convert_version_to_int( - self._session.product_version) - dic = {'vcpus': host_stats['host_cpu_info']['cpu_count'], - 'memory_mb': total_ram_mb, - 'local_gb': total_disk_gb, - 'vcpus_used': host_stats['vcpus_used'], - 'memory_mb_used': total_ram_mb - free_ram_mb, - 'local_gb_used': used_disk_gb, - 'hypervisor_type': 'XenServer', - 'hypervisor_version': hyper_ver, - 'hypervisor_hostname': host_stats['host_hostname'], - 'cpu_info': jsonutils.dumps(host_stats['cpu_model']), - 'disk_available_least': total_disk_gb - allocated_disk_gb, - 'supported_instances': host_stats['supported_instances'], - 'pci_passthrough_devices': jsonutils.dumps( - host_stats['pci_passthrough_devices']), - 'numa_topology': None} - - return dic - - def check_can_live_migrate_destination(self, context, instance, - src_compute_info, dst_compute_info, - block_migration=False, disk_over_commit=False): - """Check if it is possible to execute live migration. - - :param context: security context - :param instance: nova.db.sqlalchemy.models.Instance object - :param block_migration: if true, prepare for block migration - :param disk_over_commit: if true, allow disk over commit - :returns: a XenapiLiveMigrateData object - """ - return self._vmops.check_can_live_migrate_destination(context, - instance, - block_migration, - disk_over_commit) - - def cleanup_live_migration_destination_check(self, context, - dest_check_data): - """Do required cleanup on dest host after check_can_live_migrate calls - - :param context: security context - :param dest_check_data: result of check_can_live_migrate_destination - """ - pass - - def check_can_live_migrate_source(self, context, instance, - dest_check_data, block_device_info=None): - """Check if it is possible to execute live migration. - - This checks if the live migration can succeed, based on the - results from check_can_live_migrate_destination. - - :param context: security context - :param instance: nova.db.sqlalchemy.models.Instance - :param dest_check_data: result of check_can_live_migrate_destination - includes the block_migration flag - :param block_device_info: result of _get_instance_block_device_info - :returns: a XenapiLiveMigrateData object - """ - return self._vmops.check_can_live_migrate_source(context, instance, - dest_check_data) - - def get_instance_disk_info(self, instance, - block_device_info=None): - """Used by libvirt for live migration. We rely on xenapi - checks to do this for us. - """ - pass - - def live_migration(self, context, instance, dest, - post_method, recover_method, block_migration=False, - migrate_data=None): - """Performs the live migration of the specified instance. - - :param context: security context - :param instance: - nova.db.sqlalchemy.models.Instance object - instance object that is migrated. - :param dest: destination host - :param post_method: - post operation method. - expected nova.compute.manager._post_live_migration. - :param recover_method: - recovery method when any exception occurs. - expected nova.compute.manager._rollback_live_migration. - :param block_migration: if true, migrate VM disk. - :param migrate_data: a XenapiLiveMigrateData object - """ - self._vmops.live_migrate(context, instance, dest, post_method, - recover_method, block_migration, migrate_data) - - def rollback_live_migration_at_destination(self, context, instance, - network_info, - block_device_info, - destroy_disks=True, - migrate_data=None): - """Performs a live migration rollback. - - :param context: security context - :param instance: instance object that was being migrated - :param network_info: instance network information - :param block_device_info: instance block device information - :param destroy_disks: - if true, destroy disks at destination during cleanup - :param migrate_data: A XenapiLiveMigrateData object - """ - - # NOTE(johngarbutt) Destroying the VM is not appropriate here - # and in the cases where it might make sense, - # XenServer has already done it. - # NOTE(sulo): The only cleanup we do explicitly is to forget - # any volume that was attached to the destination during - # live migration. XAPI should take care of all other cleanup. - self._vmops.rollback_live_migration_at_destination(instance, - network_info, - block_device_info) - - def pre_live_migration(self, context, instance, block_device_info, - network_info, disk_info, migrate_data): - """Preparation live migration. - - :param block_device_info: - It must be the result of _get_instance_volume_bdms() - at compute manager. - :returns: a XenapiLiveMigrateData object - """ - return self._vmops.pre_live_migration(context, instance, - block_device_info, network_info, disk_info, migrate_data) - - def post_live_migration(self, context, instance, block_device_info, - migrate_data=None): - """Post operation of live migration at source host. - - :param context: security context - :instance: instance object that was migrated - :block_device_info: instance block device information - :param migrate_data: a XenapiLiveMigrateData object - """ - self._vmops.post_live_migration(context, instance, migrate_data) - - def post_live_migration_at_source(self, context, instance, network_info): - """Unplug VIFs from networks at source. - - :param context: security context - :param instance: instance object reference - :param network_info: instance network information - """ - self._vmops.post_live_migration_at_source(context, instance, - network_info) - - def post_live_migration_at_destination(self, context, instance, - network_info, - block_migration=False, - block_device_info=None): - """Post operation of live migration at destination host. - - :param context: security context - :param instance: - nova.db.sqlalchemy.models.Instance object - instance object that is migrated. - :param network_info: instance network information - :param block_migration: if true, post operation of block_migration. - - """ - self._vmops.post_live_migration_at_destination(context, instance, - network_info, block_device_info, block_device_info) - - def get_available_nodes(self, refresh=False): - stats = self.host_state.get_host_stats(refresh=refresh) - return [stats["hypervisor_hostname"]] - - def host_power_action(self, action): - """The only valid values for 'action' on XenServer are 'reboot' or - 'shutdown', even though the API also accepts 'startup'. As this is - not technically possible on XenServer, since the host is the same - physical machine as the hypervisor, if this is requested, we need to - raise an exception. - """ - if action in ("reboot", "shutdown"): - return self._host.host_power_action(action) - else: - msg = _("Host startup on XenServer is not supported.") - raise NotImplementedError(msg) - - def set_host_enabled(self, enabled): - """Sets the compute host's ability to accept new instances.""" - return self._host.set_host_enabled(enabled) - - def get_host_uptime(self): - """Returns the result of calling "uptime" on the target host.""" - return self._host.get_host_uptime() - - def host_maintenance_mode(self, host, mode): - """Start/Stop host maintenance window. On start, it triggers - guest VMs evacuation. - """ - return self._host.host_maintenance_mode(host, mode) - - def add_to_aggregate(self, context, aggregate, host, **kwargs): - """Add a compute host to an aggregate.""" - return self._pool.add_to_aggregate(context, aggregate, host, **kwargs) - - def remove_from_aggregate(self, context, aggregate, host, **kwargs): - """Remove a compute host from an aggregate.""" - return self._pool.remove_from_aggregate(context, - aggregate, host, **kwargs) - - def undo_aggregate_operation(self, context, op, aggregate, - host, set_error=True): - """Undo aggregate operation when pool error raised.""" - return self._pool.undo_aggregate_operation(context, op, - aggregate, host, set_error) - - def resume_state_on_host_boot(self, context, instance, network_info, - block_device_info=None): - """resume guest state when a host is booted.""" - self._vmops.power_on(instance) - - def get_per_instance_usage(self): - """Get information about instance resource usage. - - :returns: dict of nova uuid => dict of usage info - """ - return self._vmops.get_per_instance_usage() - - def attach_interface(self, context, instance, image_meta, vif): - """Use hotplug to add a network interface to a running instance. - - The counter action to this is :func:`detach_interface`. - - :param context: The request context. - :param nova.objects.instance.Instance instance: - The instance which will get an additional network interface. - :param nova.objects.ImageMeta image_meta: - The metadata of the image of the instance. - :param nova.network.model.VIF vif: - The object which has the information about the interface to attach. - - :raise nova.exception.NovaException: If the attach fails. - - :return: None - """ - self._vmops.attach_interface(instance, vif) - - def detach_interface(self, context, instance, vif): - """Use hotunplug to remove a network interface from a running instance. - - The counter action to this is :func:`attach_interface`. - - :param context: The request context. - :param nova.objects.instance.Instance instance: - The instance which gets a network interface removed. - :param nova.network.model.VIF vif: - The object which has the information about the interface to detach. - - :raise nova.exception.NovaException: If the detach fails. - - :return: None - """ - self._vmops.detach_interface(instance, vif) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py deleted file mode 100644 index e4ba2a048cac..000000000000 --- a/nova/virt/xenapi/fake.py +++ /dev/null @@ -1,1172 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - - -# Parts of this file are based upon xmlrpclib.py, the XML-RPC client -# interface included in the Python distribution. -# -# Copyright (c) 1999-2002 by Secret Labs AB -# Copyright (c) 1999-2002 by Fredrik Lundh -# -# By obtaining, using, and/or copying this software and/or its -# associated documentation, you agree that you have read, understood, -# and will comply with the following terms and conditions: -# -# Permission to use, copy, modify, and distribute this software and -# its associated documentation for any purpose and without fee is -# hereby granted, provided that the above copyright notice appears in -# all copies, and that both that copyright notice and this permission -# notice appear in supporting documentation, and that the name of -# Secret Labs AB or the author not be used in advertising or publicity -# pertaining to distribution of the software without specific, written -# prior permission. -# -# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD -# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- -# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR -# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY -# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, -# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS -# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE -# OF THIS SOFTWARE. -# -------------------------------------------------------------------- - - -""" -A fake XenAPI SDK. -""" - -import base64 -import pickle -import random -import six -from xml.sax import saxutils -import zlib - -from os_xenapi.client import session as xenapi_session -from os_xenapi.client import XenAPI -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import timeutils -from oslo_utils import units -from oslo_utils import uuidutils - - -from nova import exception -from nova.i18n import _ - - -_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD', - 'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task', - 'GPU_group', 'PGPU', 'VGPU_type'] -_after_create_functions = {} -_destroy_functions = {} - -_db_content = {} - -LOG = logging.getLogger(__name__) - - -def add_to_dict(functions): - """A decorator that adds a function to dictionary.""" - - def decorator(func): - functions[func.__name__] = func - return func - return decorator - - -def reset(): - for c in _CLASSES: - _db_content[c] = {} - create_host('fake') - create_vm('fake dom 0', - 'Running', - is_a_template=False, - is_control_domain=True, - domid='0') - - -def reset_table(table): - if table not in _CLASSES: - return - _db_content[table] = {} - - -def _create_pool(name_label): - return _create_object('pool', - {'name_label': name_label}) - - -def create_host(name_label, hostname='fake_name', address='fake_addr', - software_version={'platform_name': 'fake_platform', - 'platform_version': '1.0.0'}): - host_ref = _create_object('host', - {'name_label': name_label, - 'hostname': hostname, - 'address': address, - 'software_version': software_version}) - host_default_sr_ref = _create_local_srs(host_ref) - _create_local_pif(host_ref) - - # Create a pool if we don't have one already - if len(_db_content['pool']) == 0: - pool_ref = _create_pool('') - _db_content['pool'][pool_ref]['master'] = host_ref - _db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref - _db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref - - -def create_network(name_label, bridge): - return _create_object('network', - {'name_label': name_label, - 'bridge': bridge}) - - -def create_vm(name_label, status, **kwargs): - if status == 'Running': - domid = "%d" % random.randrange(1, 1 << 16) - resident_on = list(_db_content['host'])[0] - else: - domid = "-1" - resident_on = '' - - vm_rec = {'name_label': name_label, - 'domid': domid, - 'power_state': status, - 'blocked_operations': {}, - 'resident_on': resident_on} - vm_rec.update(kwargs.copy()) - vm_ref = _create_object('VM', vm_rec) - after_VM_create(vm_ref, vm_rec) - return vm_ref - - -@add_to_dict(_destroy_functions) -def destroy_vm(vm_ref): - vm_rec = _db_content['VM'][vm_ref] - - vbd_refs = vm_rec['VBDs'] - # NOTE(johannes): Shallow copy since destroy_vbd will remove itself - # from the list - for vbd_ref in vbd_refs[:]: - destroy_vbd(vbd_ref) - - del _db_content['VM'][vm_ref] - - -@add_to_dict(_destroy_functions) -def destroy_vbd(vbd_ref): - vbd_rec = _db_content['VBD'][vbd_ref] - - vm_ref = vbd_rec['VM'] - vm_rec = _db_content['VM'][vm_ref] - vm_rec['VBDs'].remove(vbd_ref) - - vdi_ref = vbd_rec['VDI'] - vdi_rec = _db_content['VDI'][vdi_ref] - vdi_rec['VBDs'].remove(vbd_ref) - - del _db_content['VBD'][vbd_ref] - - -@add_to_dict(_destroy_functions) -def destroy_vdi(vdi_ref): - vdi_rec = _db_content['VDI'][vdi_ref] - - vbd_refs = vdi_rec['VBDs'] - # NOTE(johannes): Shallow copy since destroy_vbd will remove itself - # from the list - for vbd_ref in vbd_refs[:]: - destroy_vbd(vbd_ref) - - del _db_content['VDI'][vdi_ref] - - -def create_vdi(name_label, sr_ref, **kwargs): - vdi_rec = { - 'SR': sr_ref, - 'read_only': False, - 'type': '', - 'name_label': name_label, - 'name_description': '', - 'sharable': False, - 'other_config': {}, - 'location': '', - 'xenstore_data': {}, - 'sm_config': {'vhd-parent': None}, - 'physical_utilisation': '123', - 'managed': True, - } - vdi_rec.update(kwargs) - vdi_ref = _create_object('VDI', vdi_rec) - after_VDI_create(vdi_ref, vdi_rec) - return vdi_ref - - -@add_to_dict(_after_create_functions) -def after_VDI_create(vdi_ref, vdi_rec): - vdi_rec.setdefault('VBDs', []) - - -def create_vbd(vm_ref, vdi_ref, userdevice=0, other_config=None): - if other_config is None: - other_config = {} - - vbd_rec = {'VM': vm_ref, - 'VDI': vdi_ref, - 'userdevice': str(userdevice), - 'currently_attached': False, - 'other_config': other_config} - vbd_ref = _create_object('VBD', vbd_rec) - after_VBD_create(vbd_ref, vbd_rec) - return vbd_ref - - -@add_to_dict(_after_create_functions) -def after_VBD_create(vbd_ref, vbd_rec): - """Create read-only fields and backref from VM and VDI to VBD when VBD - is created. - """ - vbd_rec['currently_attached'] = False - - # TODO(snikitin): Find a better way for generating of device name. - # Usually 'userdevice' has numeric values like '1', '2', '3', etc. - # Ideally they should be transformed to something like 'xvda', 'xvdb', - # 'xvdx', etc. But 'userdevice' also may be 'autodetect', 'fake' or even - # unset. We should handle it in future. - vbd_rec['device'] = vbd_rec.get('userdevice', '') - vbd_rec.setdefault('other_config', {}) - - vm_ref = vbd_rec['VM'] - vm_rec = _db_content['VM'][vm_ref] - vm_rec['VBDs'].append(vbd_ref) - - vm_name_label = _db_content['VM'][vm_ref]['name_label'] - vbd_rec['vm_name_label'] = vm_name_label - - vdi_ref = vbd_rec['VDI'] - if vdi_ref and vdi_ref != "OpaqueRef:NULL": - vdi_rec = _db_content['VDI'][vdi_ref] - vdi_rec['VBDs'].append(vbd_ref) - - -@add_to_dict(_after_create_functions) -def after_VIF_create(vif_ref, vif_rec): - """Create backref from VM to VIF when VIF is created. - """ - vm_ref = vif_rec['VM'] - vm_rec = _db_content['VM'][vm_ref] - vm_rec['VIFs'].append(vif_ref) - - -@add_to_dict(_after_create_functions) -def after_VM_create(vm_ref, vm_rec): - """Create read-only fields in the VM record.""" - vm_rec.setdefault('domid', "-1") - vm_rec.setdefault('is_control_domain', False) - vm_rec.setdefault('is_a_template', False) - vm_rec.setdefault('memory_static_max', str(8 * units.Gi)) - vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi)) - vm_rec.setdefault('VCPUs_max', str(4)) - vm_rec.setdefault('VBDs', []) - vm_rec.setdefault('VIFs', []) - vm_rec.setdefault('resident_on', '') - - -def create_pbd(host_ref, sr_ref, attached): - config = {'path': '/var/run/sr-mount/%s' % sr_ref} - return _create_object('PBD', - {'device_config': config, - 'host': host_ref, - 'SR': sr_ref, - 'currently_attached': attached}) - - -def create_task(name_label): - return _create_object('task', - {'name_label': name_label, - 'status': 'pending'}) - - -def _create_local_srs(host_ref): - """Create an SR that looks like the one created on the local disk by - default by the XenServer installer. Also, fake the installation of - an ISO SR. - """ - create_sr(name_label='Local storage ISO', - type='iso', - other_config={'i18n-original-value-name_label': - 'Local storage ISO', - 'i18n-key': 'local-storage-iso'}, - physical_size=80000, - physical_utilisation=40000, - virtual_allocation=80000, - host_ref=host_ref) - return create_sr(name_label='Local storage', - type='ext', - other_config={'i18n-original-value-name_label': - 'Local storage', - 'i18n-key': 'local-storage'}, - physical_size=40000, - physical_utilisation=20000, - virtual_allocation=10000, - host_ref=host_ref) - - -def create_sr(**kwargs): - sr_ref = _create_object( - 'SR', - {'name_label': kwargs.get('name_label'), - 'type': kwargs.get('type'), - 'content_type': kwargs.get('type', 'user'), - 'shared': kwargs.get('shared', False), - 'physical_size': kwargs.get('physical_size', str(1 << 30)), - 'physical_utilisation': str( - kwargs.get('physical_utilisation', 0)), - 'virtual_allocation': str(kwargs.get('virtual_allocation', 0)), - 'other_config': kwargs.get('other_config', {}), - 'VDIs': kwargs.get('VDIs', [])}) - pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True) - _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] - return sr_ref - - -def _create_local_pif(host_ref): - pif_ref = _create_object('PIF', - {'name-label': 'Fake PIF', - 'MAC': '00:11:22:33:44:55', - 'physical': True, - 'VLAN': -1, - 'device': 'fake0', - 'host_uuid': host_ref, - 'network': '', - 'IP': '10.1.1.1', - 'IPv6': '', - 'uuid': '', - 'management': 'true', - 'host': 'fake_host_ref'}) - _db_content['PIF'][pif_ref]['uuid'] = pif_ref - return pif_ref - - -def _create_object(table, obj): - ref = uuidutils.generate_uuid() - obj['uuid'] = uuidutils.generate_uuid() - obj['ref'] = ref - _db_content[table][ref] = obj - return ref - - -def _create_sr(table, obj): - sr_type = obj[6] - # Forces fake to support iscsi only - if sr_type != 'iscsi' and sr_type != 'nfs': - raise XenAPI.Failure(['SR_UNKNOWN_DRIVER', sr_type]) - host_ref = list(_db_content['host'])[0] - sr_ref = _create_object(table, obj[2]) - if sr_type == 'iscsi': - vdi_ref = create_vdi('', sr_ref) - pbd_ref = create_pbd(host_ref, sr_ref, True) - _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] - _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] - _db_content['VDI'][vdi_ref]['SR'] = sr_ref - _db_content['PBD'][pbd_ref]['SR'] = sr_ref - return sr_ref - - -def _create_vlan(pif_ref, vlan_num, network_ref): - pif_rec = get_record('PIF', pif_ref) - vlan_pif_ref = _create_object('PIF', - {'name-label': 'Fake VLAN PIF', - 'MAC': '00:11:22:33:44:55', - 'physical': True, - 'VLAN': vlan_num, - 'device': pif_rec['device'], - 'host_uuid': pif_rec['host_uuid']}) - return _create_object('VLAN', - {'tagged-pif': pif_ref, - 'untagged-pif': vlan_pif_ref, - 'tag': vlan_num}) - - -def get_all(table): - return list(_db_content[table].keys()) - - -def get_all_records(table): - return _db_content[table] - - -def _query_matches(record, query): - # Simple support for the XenServer query language: - # 'field "host"="" and field "SR"=""' - # Tested through existing tests (e.g. calls to find_network_with_bridge) - - and_clauses = query.split(" and ") - if len(and_clauses) > 1: - matches = True - for clause in and_clauses: - matches = matches and _query_matches(record, clause) - return matches - - or_clauses = query.split(" or ") - if len(or_clauses) > 1: - matches = False - for clause in or_clauses: - matches = matches or _query_matches(record, clause) - return matches - - if query.startswith('not '): - return not _query_matches(record, query[4:]) - - # Now it must be a single field - bad queries never match - if not query.startswith('field'): - return False - (field, value) = query[6:].split('=', 1) - - # Some fields (e.g. name_label, memory_overhead) have double - # underscores in the DB, but only single underscores when querying - - field = field.replace("__", "_").strip(" \"'") - value = value.strip(" \"'") - - # Strings should be directly compared - if isinstance(record[field], six.string_types): - return record[field] == value - - # But for all other value-checks, convert to a string first - # (Notably used for booleans - which can be lower or camel - # case and are interpreted/sanitised by XAPI) - return str(record[field]).lower() == value.lower() - - -def get_all_records_where(table_name, query): - matching_records = {} - table = _db_content[table_name] - for record in table: - if _query_matches(table[record], query): - matching_records[record] = table[record] - return matching_records - - -def get_record(table, ref): - if ref in _db_content[table]: - return _db_content[table].get(ref) - else: - raise XenAPI.Failure(['HANDLE_INVALID', table, ref]) - - -def check_for_session_leaks(): - if len(_db_content['session']) > 0: - raise exception.NovaException('Sessions have leaked: %s' % - _db_content['session']) - - -def as_value(s): - """Helper function for simulating XenAPI plugin responses. It - escapes and wraps the given argument. - """ - return '%s' % saxutils.escape(s) - - -def as_json(*args, **kwargs): - """Helper function for simulating XenAPI plugin responses for those - that are returning JSON. If this function is given plain arguments, - then these are rendered as a JSON list. If it's given keyword - arguments then these are rendered as a JSON dict. - """ - arg = args or kwargs - return jsonutils.dumps(arg) - - -class Failure(Exception): - def __init__(self, details): - self.details = details - - def __str__(self): - try: - return str(self.details) - except Exception: - return "XenAPI Fake Failure: %s" % str(self.details) - - -class SessionBase(object): - """Base class for Fake Sessions.""" - - def __init__(self, uri, user=None, passwd=None): - self._session = None - xenapi_session.apply_session_helpers(self) - if user is not None: - self.xenapi.login_with_password(user, passwd) - - def pool_get_default_SR(self, _1, pool_ref): - return list(_db_content['pool'].values())[0]['default-SR'] - - def VBD_insert(self, _1, vbd_ref, vdi_ref): - vbd_rec = get_record('VBD', vbd_ref) - get_record('VDI', vdi_ref) - vbd_rec['empty'] = False - vbd_rec['VDI'] = vdi_ref - - def VBD_plug(self, _1, ref): - rec = get_record('VBD', ref) - if rec['currently_attached']: - raise XenAPI.Failure(['DEVICE_ALREADY_ATTACHED', ref]) - rec['currently_attached'] = True - rec['device'] = 'fakedev' - - def VBD_unplug(self, _1, ref): - rec = get_record('VBD', ref) - if not rec['currently_attached']: - raise XenAPI.Failure(['DEVICE_ALREADY_DETACHED', ref]) - rec['currently_attached'] = False - rec['device'] = '' - - def VBD_add_to_other_config(self, _1, vbd_ref, key, value): - db_ref = _db_content['VBD'][vbd_ref] - if 'other_config' not in db_ref: - db_ref['other_config'] = {} - if key in db_ref['other_config']: - raise XenAPI.Failure( - ['MAP_DUPLICATE_KEY', 'VBD', 'other_config', vbd_ref, key]) - db_ref['other_config'][key] = value - - def VBD_get_other_config(self, _1, vbd_ref): - db_ref = _db_content['VBD'][vbd_ref] - if 'other_config' not in db_ref: - return {} - return db_ref['other_config'] - - def PBD_create(self, _1, pbd_rec): - pbd_ref = _create_object('PBD', pbd_rec) - _db_content['PBD'][pbd_ref]['currently_attached'] = False - return pbd_ref - - def PBD_plug(self, _1, pbd_ref): - rec = get_record('PBD', pbd_ref) - if rec['currently_attached']: - raise XenAPI.Failure(['DEVICE_ALREADY_ATTACHED', rec]) - rec['currently_attached'] = True - sr_ref = rec['SR'] - _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] - - def PBD_unplug(self, _1, pbd_ref): - rec = get_record('PBD', pbd_ref) - if not rec['currently_attached']: - raise XenAPI.Failure(['DEVICE_ALREADY_DETACHED', rec]) - rec['currently_attached'] = False - sr_ref = rec['SR'] - _db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref) - - def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type, - shared, sm_config): - for ref, rec in _db_content['SR'].items(): - if rec.get('uuid') == sr_uuid: - # make forgotten = 0 and return ref - _db_content['SR'][ref]['forgotten'] = 0 - return ref - # SR not found in db, so we create one - params = {'sr_uuid': sr_uuid, - 'label': label, - 'desc': desc, - 'type': type, - 'content_type': content_type, - 'shared': shared, - 'sm_config': sm_config} - sr_ref = _create_object('SR', params) - _db_content['SR'][sr_ref]['uuid'] = sr_uuid - _db_content['SR'][sr_ref]['forgotten'] = 0 - vdi_per_lun = False - if type == 'iscsi': - # Just to be clear - vdi_per_lun = True - if vdi_per_lun: - # we need to create a vdi because this introduce - # is likely meant for a single vdi - vdi_ref = create_vdi('', sr_ref) - _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] - _db_content['VDI'][vdi_ref]['SR'] = sr_ref - return sr_ref - - def SR_forget(self, _1, sr_ref): - _db_content['SR'][sr_ref]['forgotten'] = 1 - - def SR_scan(self, _1, sr_ref): - return - - def VM_get_xenstore_data(self, _1, vm_ref): - return _db_content['VM'][vm_ref].get('xenstore_data', {}) - - def VM_remove_from_xenstore_data(self, _1, vm_ref, key): - db_ref = _db_content['VM'][vm_ref] - if 'xenstore_data' not in db_ref: - return - if key in db_ref['xenstore_data']: - del db_ref['xenstore_data'][key] - - def VM_add_to_xenstore_data(self, _1, vm_ref, key, value): - db_ref = _db_content['VM'][vm_ref] - if 'xenstore_data' not in db_ref: - db_ref['xenstore_data'] = {} - db_ref['xenstore_data'][key] = value - - def VM_pool_migrate(self, _1, vm_ref, host_ref, options): - pass - - def VDI_remove_from_other_config(self, _1, vdi_ref, key): - db_ref = _db_content['VDI'][vdi_ref] - if 'other_config' not in db_ref: - return - if key in db_ref['other_config']: - del db_ref['other_config'][key] - - def VDI_add_to_other_config(self, _1, vdi_ref, key, value): - db_ref = _db_content['VDI'][vdi_ref] - if 'other_config' not in db_ref: - db_ref['other_config'] = {} - if key in db_ref['other_config']: - raise XenAPI.Failure( - ['MAP_DUPLICATE_KEY', 'VDI', 'other_config', vdi_ref, key]) - db_ref['other_config'][key] = value - - def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref): - db_ref = _db_content['VDI'][vdi_to_copy_ref] - name_label = db_ref['name_label'] - read_only = db_ref['read_only'] - sharable = db_ref['sharable'] - other_config = db_ref['other_config'].copy() - return create_vdi(name_label, sr_ref, sharable=sharable, - read_only=read_only, other_config=other_config) - - def VDI_clone(self, _1, vdi_to_clone_ref): - db_ref = _db_content['VDI'][vdi_to_clone_ref] - sr_ref = db_ref['SR'] - return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref) - - def host_compute_free_memory(self, _1, ref): - # Always return 12GB available - return 12 * units.Gi - - def _plugin_agent_version(self, method, args): - return as_json(returncode='0', message='1.0\\r\\n') - - def _plugin_agent_key_init(self, method, args): - return as_json(returncode='D0', message='1') - - def _plugin_agent_password(self, method, args): - return as_json(returncode='0', message='success') - - def _plugin_agent_inject_file(self, method, args): - return as_json(returncode='0', message='success') - - def _plugin_agent_resetnetwork(self, method, args): - return as_json(returncode='0', message='success') - - def _plugin_agent_agentupdate(self, method, args): - url = args["url"] - md5 = args["md5sum"] - message = "success with %(url)s and hash:%(md5)s" % dict(url=url, - md5=md5) - return as_json(returncode='0', message=message) - - def _plugin_noop(self, method, args): - return '' - - def _plugin_pickle_noop(self, method, args): - return pickle.dumps(None) - - def _plugin_migration_transfer_vhd(self, method, args): - kwargs = pickle.loads(args['params'])['kwargs'] - vdi_ref = self.xenapi_request('VDI.get_by_uuid', - (kwargs['vdi_uuid'], )) - assert vdi_ref - return pickle.dumps(None) - - _plugin_glance_upload_vhd2 = _plugin_pickle_noop - _plugin_kernel_copy_vdi = _plugin_noop - _plugin_kernel_create_kernel_ramdisk = _plugin_noop - _plugin_kernel_remove_kernel_ramdisk = _plugin_noop - _plugin_migration_move_vhds_into_sr = _plugin_noop - - def _plugin_xenhost_host_data(self, method, args): - return jsonutils.dumps({ - 'host_memory': {'total': 10, - 'overhead': 20, - 'free': 30, - 'free-computed': 40}, - 'host_uuid': 'fb97583b-baa1-452d-850e-819d95285def', - 'host_name-label': 'fake-xenhost', - 'host_name-description': 'Default install of XenServer', - 'host_hostname': 'fake-xenhost', - 'host_ip_address': '10.219.10.24', - 'enabled': 'true', - 'host_capabilities': ['xen-3.0-x86_64', - 'xen-3.0-x86_32p', - 'hvm-3.0-x86_32', - 'hvm-3.0-x86_32p', - 'hvm-3.0-x86_64'], - 'host_other-config': { - 'agent_start_time': '1412774967.', - 'iscsi_iqn': 'iqn.2014-10.org.example:39fa9ee3', - 'boot_time': '1412774885.', - }, - 'host_cpu_info': { - 'physical_features': '0098e3fd-bfebfbff-00000001-28100800', - 'modelname': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz', - 'vendor': 'GenuineIntel', - 'features': '0098e3fd-bfebfbff-00000001-28100800', - 'family': 6, - 'maskable': 'full', - 'cpu_count': 4, - 'socket_count': '1', - 'flags': 'fpu de tsc msr pae mce cx8 apic sep mtrr mca ' - 'cmov pat clflush acpi mmx fxsr sse sse2 ss ht ' - 'nx constant_tsc nonstop_tsc aperfmperf pni vmx ' - 'est ssse3 sse4_1 sse4_2 popcnt hypervisor ida ' - 'tpr_shadow vnmi flexpriority ept vpid', - 'stepping': 5, - 'model': 30, - 'features_after_reboot': '0098e3fd-bfebfbff-00000001-28100800', - 'speed': '2394.086' - }, - }) - - def _plugin_poweraction(self, method, args): - return jsonutils.dumps({"power_action": method[5:]}) - - _plugin_xenhost_host_reboot = _plugin_poweraction - _plugin_xenhost_host_startup = _plugin_poweraction - _plugin_xenhost_host_shutdown = _plugin_poweraction - - def _plugin_xenhost_set_host_enabled(self, method, args): - enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled' - return jsonutils.dumps({"status": enabled}) - - def _plugin_xenhost_host_uptime(self, method, args): - return jsonutils.dumps({"uptime": "fake uptime"}) - - def _plugin_xenhost_network_config(self, method, args): - return pickle.dumps({"fake_network": "fake conf"}) - - def _plugin_xenhost_get_pci_device_details(self, method, args): - """Simulate the ouput of three pci devices. - - Both of those devices are available for pci passtrough but - only one will match with the pci whitelist used in the - method test_pci_passthrough_devices_*(). - Return a single list. - - """ - # Driver is not pciback - dev_bad1 = ["Slot:\t0000:86:10.0", "Class:\t0604", "Vendor:\t10b5", - "Device:\t8747", "Rev:\tba", "Driver:\tpcieport", "\n"] - # Driver is pciback but vendor and device are bad - dev_bad2 = ["Slot:\t0000:88:00.0", "Class:\t0300", "Vendor:\t0bad", - "Device:\tcafe", "SVendor:\t10de", "SDevice:\t100d", - "Rev:\ta1", "Driver:\tpciback", "\n"] - # Driver is pciback and vendor, device are used for matching - dev_good = ["Slot:\t0000:87:00.0", "Class:\t0300", "Vendor:\t10de", - "Device:\t11bf", "SVendor:\t10de", "SDevice:\t100d", - "Rev:\ta1", "Driver:\tpciback", "\n"] - - lspci_output = "\n".join(dev_bad1 + dev_bad2 + dev_good) - return pickle.dumps(lspci_output) - - def _plugin_xenhost_get_pci_type(self, method, args): - return pickle.dumps("type-PCI") - - def _plugin_console_get_console_log(self, method, args): - dom_id = args["dom_id"] - if dom_id == 0: - raise XenAPI.Failure('Guest does not have a console') - return base64.b64encode( - zlib.compress(("dom_id: %s" % dom_id).encode('utf-8'))) - - def _plugin_dom0_plugin_version_get_version(self, method, args): - return pickle.dumps( - xenapi_session.XenAPISession.PLUGIN_REQUIRED_VERSION) - - def _plugin_xenhost_query_gc(self, method, args): - return pickle.dumps("False") - - def _plugin_partition_utils_make_partition(self, method, args): - return pickle.dumps(None) - - def host_call_plugin(self, _1, _2, plugin, method, args): - plugin = plugin.rstrip('.py') - - func = getattr(self, '_plugin_%s_%s' % (plugin, method), None) - if not func: - raise Exception('No simulation in host_call_plugin for %s,%s' % - (plugin, method)) - - return func(method, args) - - def VDI_get_virtual_size(self, *args): - return 1 * units.Gi - - def VDI_resize_online(self, *args): - return 'derp' - - VDI_resize = VDI_resize_online - - def _VM_reboot(self, session, vm_ref): - db_ref = _db_content['VM'][vm_ref] - if db_ref['power_state'] != 'Running': - raise XenAPI.Failure(['VM_BAD_POWER_STATE', 'fake-opaque-ref', - db_ref['power_state'].lower(), 'halted']) - db_ref['power_state'] = 'Running' - db_ref['domid'] = '%d' % (random.randrange(1, 1 << 16)) - - def VM_clean_reboot(self, session, vm_ref): - return self._VM_reboot(session, vm_ref) - - def VM_hard_reboot(self, session, vm_ref): - return self._VM_reboot(session, vm_ref) - - def VM_hard_shutdown(self, session, vm_ref): - db_ref = _db_content['VM'][vm_ref] - db_ref['power_state'] = 'Halted' - db_ref['domid'] = "-1" - VM_clean_shutdown = VM_hard_shutdown - - def VM_suspend(self, session, vm_ref): - db_ref = _db_content['VM'][vm_ref] - db_ref['power_state'] = 'Suspended' - - def VM_pause(self, session, vm_ref): - db_ref = _db_content['VM'][vm_ref] - db_ref['power_state'] = 'Paused' - - def VM_query_data_source(self, session, vm_ref, field): - vm = {'cpu0': 0.11, - 'cpu1': 0.22, - 'cpu2': 0.33, - 'cpu3': 0.44, - 'memory': 8 * units.Gi, # 8GB in bytes - 'memory_internal_free': 5 * units.Mi, # 5GB in kilobytes - 'vif_0_rx': 50, - 'vif_0_tx': 100, - 'vbd_0_read': 50, - 'vbd_0_write': 100} - return vm.get(field, 0) - - def pool_eject(self, session, host_ref): - pass - - def pool_join(self, session, hostname, username, password): - pass - - def pool_set_name_label(self, session, pool_ref, name): - pass - - def host_migrate_receive(self, session, destref, nwref, options): - return {"value": "fake_migrate_data"} - - def VM_assert_can_migrate(self, session, vmref, migrate_data, live, - vdi_map, vif_map, options): - pass - - def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map, - vif_map, options): - pass - - def VM_remove_from_blocked_operations(self, session, vm_ref, key): - # operation is idempotent, XenServer doesn't care if the key exists - _db_content['VM'][vm_ref]['blocked_operations'].pop(key, None) - - def xenapi_request(self, methodname, params): - if methodname.startswith('login'): - self._login(methodname, params) - return None - elif methodname == 'logout' or methodname == 'session.logout': - self._logout() - return None - else: - full_params = (self._session,) + params - meth = getattr(self, methodname, None) - if meth is None: - LOG.debug('Raising NotImplemented') - raise NotImplementedError( - _('xenapi.fake does not have an implementation for %s') % - methodname) - return meth(*full_params) - - def call_xenapi(self, *args): - return self.xenapi_request(args[0], args[1:]) - - def get_all_refs_and_recs(self, cls): - return get_all_records(cls).items() - - def get_rec(self, cls, ref): - return _db_content[cls].get(ref, None) - - def _login(self, method, params): - self._session = uuidutils.generate_uuid() - _session_info = {'uuid': uuidutils.generate_uuid(), - 'this_host': list(_db_content['host'])[0]} - _db_content['session'][self._session] = _session_info - self.host_ref = list(_db_content['host'])[0] - - def _logout(self): - s = self._session - self._session = None - if s not in _db_content['session']: - raise exception.NovaException( - "Logging out a session that is invalid or already logged " - "out: %s" % s) - del _db_content['session'][s] - - def __getattr__(self, name): - if name == 'handle': - return self._session - elif name == 'xenapi': - return _Dispatcher(self.xenapi_request, None) - elif name.startswith('login') or name.startswith('slave_local'): - return lambda *params: self._login(name, params) - elif name.startswith('Async'): - return lambda *params: self._async(name, params) - elif '.' in name: - impl = getattr(self, name.replace('.', '_')) - if impl is not None: - - def callit(*params): - LOG.debug('Calling %(name)s %(impl)s', - {'name': name, 'impl': impl}) - self._check_session(params) - return impl(*params) - return callit - if self._is_gettersetter(name, True): - LOG.debug('Calling getter %s', name) - return lambda *params: self._getter(name, params) - elif self._is_gettersetter(name, False): - LOG.debug('Calling setter %s', name) - return lambda *params: self._setter(name, params) - elif self._is_create(name): - return lambda *params: self._create(name, params) - elif self._is_destroy(name): - return lambda *params: self._destroy(name, params) - elif name == 'XenAPI': - return FakeXenAPI() - else: - return None - - def _is_gettersetter(self, name, getter): - bits = name.split('.') - return (len(bits) == 2 and - bits[0] in _CLASSES and - bits[1].startswith(getter and 'get_' or 'set_')) - - def _is_create(self, name): - return self._is_method(name, 'create') - - def _is_destroy(self, name): - return self._is_method(name, 'destroy') - - def _is_method(self, name, meth): - bits = name.split('.') - return (len(bits) == 2 and - bits[0] in _CLASSES and - bits[1] == meth) - - def _getter(self, name, params): - self._check_session(params) - (cls, func) = name.split('.') - if func == 'get_all': - self._check_arg_count(params, 1) - return get_all(cls) - - if func == 'get_all_records': - self._check_arg_count(params, 1) - return get_all_records(cls) - - if func == 'get_all_records_where': - self._check_arg_count(params, 2) - return get_all_records_where(cls, params[1]) - - if func == 'get_record': - self._check_arg_count(params, 2) - return get_record(cls, params[1]) - - if func in ('get_by_name_label', 'get_by_uuid'): - self._check_arg_count(params, 2) - return_singleton = (func == 'get_by_uuid') - return self._get_by_field( - _db_content[cls], func[len('get_by_'):], params[1], - return_singleton=return_singleton) - - if func == 'get_VIFs': - self._check_arg_count(params, 2) - # FIXME(mriedem): figure out how to use _get_by_field for VIFs, - # or just stop relying on this fake DB and use mock - return _db_content['VIF'].keys() - - if func == 'get_bridge': - self._check_arg_count(params, 2) - # FIXME(mriedem): figure out how to use _get_by_field for bridge, - # or just stop relying on this fake DB and use mock - return 'fake_bridge' - - if len(params) == 2: - field = func[len('get_'):] - ref = params[1] - if (ref in _db_content[cls]): - if (field in _db_content[cls][ref]): - return _db_content[cls][ref][field] - else: - raise XenAPI.Failure(['HANDLE_INVALID', cls, ref]) - - LOG.debug('Raising NotImplemented') - raise NotImplementedError( - _('xenapi.fake does not have an implementation for %s or it has ' - 'been called with the wrong number of arguments') % name) - - def _setter(self, name, params): - self._check_session(params) - (cls, func) = name.split('.') - - if len(params) == 3: - field = func[len('set_'):] - ref = params[1] - val = params[2] - - if (ref in _db_content[cls] and - field in _db_content[cls][ref]): - _db_content[cls][ref][field] = val - return - - LOG.debug('Raising NotImplemented') - raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s or it has ' - 'been called with the wrong number of arguments or the database ' - 'is missing that field' % name) - - def _create(self, name, params): - self._check_session(params) - is_sr_create = name == 'SR.create' - is_vlan_create = name == 'VLAN.create' - # Storage Repositories have a different API - expected = is_sr_create and 10 or is_vlan_create and 4 or 2 - self._check_arg_count(params, expected) - (cls, _) = name.split('.') - ref = (is_sr_create and - _create_sr(cls, params) or - is_vlan_create and - _create_vlan(params[1], params[2], params[3]) or - _create_object(cls, params[1])) - - # Call hook to provide any fixups needed (ex. creating backrefs) - after_hook = 'after_%s_create' % cls - try: - func = _after_create_functions[after_hook] - except KeyError: - pass - else: - func(ref, params[1]) - - obj = get_record(cls, ref) - - # Add RO fields - if cls == 'VM': - obj['power_state'] = 'Halted' - return ref - - def _destroy(self, name, params): - self._check_session(params) - self._check_arg_count(params, 2) - table = name.split('.')[0] - ref = params[1] - if ref not in _db_content[table]: - raise XenAPI.Failure(['HANDLE_INVALID', table, ref]) - - # Call destroy function (if exists) - destroy_func = _destroy_functions.get('destroy_%s' % table.lower()) - if destroy_func: - destroy_func(ref) - else: - del _db_content[table][ref] - - def _async(self, name, params): - task_ref = create_task(name) - task = _db_content['task'][task_ref] - func = name[len('Async.'):] - try: - result = self.xenapi_request(func, params[1:]) - if result: - result = as_value(result) - task['result'] = result - task['status'] = 'success' - except XenAPI.Failure as exc: - task['error_info'] = exc.details - task['status'] = 'failed' - task['finished'] = timeutils.utcnow() - return task_ref - - def _check_session(self, params): - if (self._session is None or - self._session not in _db_content['session']): - raise XenAPI.Failure( - ['HANDLE_INVALID', 'session', self._session]) - if len(params) == 0 or params[0] != self._session: - LOG.debug('Raising NotImplemented') - raise NotImplementedError('Call to XenAPI without using .xenapi') - - def _check_arg_count(self, params, expected): - actual = len(params) - if actual != expected: - raise XenAPI.Failure( - ['MESSAGE_PARAMETER_COUNT_MISMATCH', expected, actual]) - - def _get_by_field(self, recs, k, v, return_singleton): - result = [] - for ref, rec in recs.items(): - if rec.get(k) == v: - result.append(ref) - - if return_singleton: - try: - return result[0] - except IndexError: - raise XenAPI.Failure(['UUID_INVALID', v, result, recs, k]) - - return result - - -class FakeXenAPI(object): - def __init__(self): - self.Failure = XenAPI.Failure - - -# Based upon _Method from xmlrpclib. -class _Dispatcher(object): - def __init__(self, send, name): - self.__send = send - self.__name = name - - def __repr__(self): - if self.__name: - return '' % self.__name - else: - return '' - - def __getattr__(self, name): - if self.__name is None: - return _Dispatcher(self.__send, name) - else: - return _Dispatcher(self.__send, "%s.%s" % (self.__name, name)) - - def __call__(self, *args): - return self.__send(self.__name, args) diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py deleted file mode 100644 index 7bd5d6fa937b..000000000000 --- a/nova/virt/xenapi/host.py +++ /dev/null @@ -1,570 +0,0 @@ -# Copyright (c) 2012 Citrix Systems, Inc. -# Copyright 2010 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for host-related functions (start, reboot, etc). -""" - -import re - -from os_xenapi.client import host_management -from os_xenapi.client import XenAPI -from oslo_config import cfg -from oslo_log import log as logging -from oslo_serialization import jsonutils - - -from nova.compute import task_states -from nova.compute import vm_states -from nova import context -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.objects import fields as obj_fields -from nova.virt.xenapi import pool_states -from nova.virt.xenapi import vm_utils - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -class Host(object): - """Implements host related operations.""" - def __init__(self, session, virtapi): - self._session = session - self._virtapi = virtapi - - def host_power_action(self, action): - """Reboots or shuts down the host.""" - args = {"action": jsonutils.dumps(action)} - methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"} - response = call_xenhost(self._session, methods[action], args) - return response.get("power_action", response) - - def host_maintenance_mode(self, host, mode): - """Start/Stop host maintenance window. On start, it triggers - guest VMs evacuation. - """ - if not mode: - return 'off_maintenance' - host_list = [host_ref for host_ref in - self._session.host.get_all() - if host_ref != self._session.host_ref] - migrations_counter = vm_counter = 0 - ctxt = context.get_admin_context() - for vm_ref, vm_rec in vm_utils.list_vms(self._session): - for host_ref in host_list: - try: - # Ensure only guest instances are migrated - uuid = vm_rec['other_config'].get('nova_uuid') - if not uuid: - name = vm_rec['name_label'] - uuid = _uuid_find(ctxt, host, name) - if not uuid: - LOG.info('Instance %(name)s running on ' - '%(host)s could not be found in ' - 'the database: assuming it is a ' - 'worker VM and skip ping migration ' - 'to a new host', - {'name': name, 'host': host}) - continue - instance = objects.Instance.get_by_uuid(ctxt, uuid) - vm_counter = vm_counter + 1 - - aggregate = objects.AggregateList.get_by_host( - ctxt, host, key=pool_states.POOL_FLAG) - if not aggregate: - msg = _('Aggregate for host %(host)s count not be' - ' found.') % dict(host=host) - raise exception.NotFound(msg) - - dest = _host_find(ctxt, self._session, aggregate[0], - host_ref) - instance.host = dest - instance.task_state = task_states.MIGRATING - instance.save() - - self._session.VM.pool_migrate(vm_ref, host_ref, - {"live": "true"}) - migrations_counter = migrations_counter + 1 - - instance.vm_state = vm_states.ACTIVE - instance.save() - - break - except XenAPI.Failure: - LOG.exception( - 'Unable to migrate VM %(vm_refs from %(host)s', - {'vm_ref': vm_ref, 'host': host}, - ) - instance.host = host - instance.vm_state = vm_states.ACTIVE - instance.save() - - if vm_counter == migrations_counter: - return 'on_maintenance' - else: - raise exception.NoValidHost(reason=_('Unable to find suitable ' - 'host for VMs evacuation')) - - def set_host_enabled(self, enabled): - """Sets the compute host's ability to accept new instances.""" - # Since capabilities are gone, use service table to disable a node - # in scheduler - cntxt = context.get_admin_context() - service = objects.Service.get_by_args(cntxt, CONF.host, - 'nova-compute') - service.disabled = not enabled - service.disabled_reason = 'set by xenapi host_state' - service.save() - - response = _call_host_management(self._session, - host_management.set_host_enabled, - jsonutils.dumps(enabled)) - return response.get("status", response) - - def get_host_uptime(self): - """Returns the result of calling "uptime" on the target host.""" - response = _call_host_management(self._session, - host_management.get_host_uptime) - return response.get("uptime", response) - - -class HostState(object): - """Manages information about the XenServer host this compute - node is running on. - """ - def __init__(self, session): - super(HostState, self).__init__() - self._session = session - self._stats = {} - self.update_status() - - def _get_passthrough_devices(self): - """Get a list pci devices that are available for pci passthtough. - - We use a plugin to get the output of the lspci command runs on dom0. - From this list we will extract pci devices that are using the pciback - kernel driver. - - :returns: a list of pci devices on the node - """ - def _compile_hex(pattern): - r"""Return a compiled regular expression pattern into which we have - replaced occurrences of hex by [\da-fA-F]. - """ - return re.compile(pattern.replace("hex", r"[\da-fA-F]")) - - def _parse_pci_device_string(dev_string): - """Exctract information from the device string about the slot, the - vendor and the product ID. The string is as follow: - "Slot:\tBDF\nClass:\txxxx\nVendor:\txxxx\nDevice:\txxxx\n..." - Return a dictionary with information about the device. - """ - slot_regex = _compile_hex(r"Slot:\t" - r"((?:hex{4}:)?" # Domain: (optional) - r"hex{2}:" # Bus: - r"hex{2}\." # Device. - r"hex{1})") # Function - vendor_regex = _compile_hex(r"\nVendor:\t(hex+)") - product_regex = _compile_hex(r"\nDevice:\t(hex+)") - - slot_id = slot_regex.findall(dev_string) - vendor_id = vendor_regex.findall(dev_string) - product_id = product_regex.findall(dev_string) - - if not slot_id or not vendor_id or not product_id: - raise exception.NovaException( - _("Failed to parse information about" - " a pci device for passthrough")) - - type_pci = host_management.get_pci_type(self._session, slot_id[0]) - - return {'label': '_'.join(['label', - vendor_id[0], - product_id[0]]), - 'vendor_id': vendor_id[0], - 'product_id': product_id[0], - 'address': slot_id[0], - 'dev_id': '_'.join(['pci', slot_id[0]]), - 'dev_type': type_pci, - 'status': 'available'} - - # Devices are separated by a blank line. That is why we - # use "\n\n" as separator. - lspci_out = host_management.get_pci_device_details(self._session) - - pci_list = lspci_out.split("\n\n") - - # For each device of the list, check if it uses the pciback - # kernel driver and if it does, get information and add it - # to the list of passthrough_devices. Ignore it if the driver - # is not pciback. - passthrough_devices = [] - - for dev_string_info in pci_list: - if "Driver:\tpciback" in dev_string_info: - new_dev = _parse_pci_device_string(dev_string_info) - passthrough_devices.append(new_dev) - - return passthrough_devices - - def _get_vgpu_stats(self): - """Invoke XenAPI to get the stats for VGPUs. - - The return value is a dict which has GPU groups' uuid as - the keys: - dict(grp_uuid_1=dict_vgpu_stats_in_grp_1, - grp_uuid_2=dict_vgpu_stats_in_grp_2, - ..., - grp_uuid_n=dict_vgpu_stats_in_grp_n) - The `dict_vgpu_stats_in_grp_x` is a dict represents the - vGPU stats in GPU group x. For details, please refer to - the return value of the function of _get_vgpu_stats_in_group(). - """ - if not CONF.devices.enabled_vgpu_types: - return {} - - vgpu_stats = {} - - # NOTE(jianghuaw): If there are multiple vGPU types enabled in - # the configure option, we only choose the first one so that - # we support only one vGPU type per compute node at the moment. - # Once we switch to use the nested resource providers, we will - # remove these lines to allow multiple vGPU types within multiple - # GPU groups (each group has a different vGPU type enabled). - if len(CONF.devices.enabled_vgpu_types) > 1: - LOG.warning('XenAPI only supports one GPU type per compute node,' - ' only first type will be used.') - cfg_enabled_types = CONF.devices.enabled_vgpu_types[:1] - - vgpu_grp_refs = self._session.call_xenapi('GPU_group.get_all') - for ref in vgpu_grp_refs: - grp_uuid = self._session.call_xenapi('GPU_group.get_uuid', ref) - stat = self._get_vgpu_stats_in_group(ref, cfg_enabled_types) - if stat: - vgpu_stats[grp_uuid] = stat - - LOG.debug("Returning vGPU stats: %s", vgpu_stats) - - return vgpu_stats - - def _get_vgpu_stats_in_group(self, grp_ref, vgpu_types): - """Get stats for the specified vGPU types in a GPU group. - - NOTE(Jianghuaw): In XenAPI, a GPU group is the minimal unit - from where to create a vGPU for an instance. So here, we - report vGPU resources for a particular GPU group. When we use - nested resource providers to represent the vGPU resources, - each GPU group will be a child resource provider under the - compute node. - - The return value is a dict. For example: - {'uuid': '6444c6ee-3a49-42f5-bebb-606b52175e67', - 'type_name': 'Intel GVT-g', - 'max_heads': 1, - 'total': 7, - 'remaining': 7, - } - """ - type_refs_in_grp = self._session.call_xenapi( - 'GPU_group.get_enabled_VGPU_types', grp_ref) - - type_names_in_grp = {self._session.call_xenapi( - 'VGPU_type.get_model_name', - type_ref): type_ref - for type_ref in type_refs_in_grp} - # Get the vGPU types enabled both in this GPU group and in the - # nova conf. - enabled_types = set(vgpu_types) & set(type_names_in_grp) - if not enabled_types: - return - - stat = {} - # Get the sorted enabled types, so that we can always choose the same - # type when there are multiple enabled vGPU types. - sorted_types = sorted(enabled_types) - chosen_type = sorted_types[0] - if len(sorted_types) > 1: - LOG.warning('XenAPI only supports one vGPU type per GPU group,' - ' but enabled multiple vGPU types: %(available)s.' - ' Choosing the first one: %(chosen)s.', - dict(available=sorted_types, - chosen=chosen_type)) - type_ref = type_names_in_grp[chosen_type] - type_uuid = self._session.call_xenapi('VGPU_type.get_uuid', type_ref) - stat['uuid'] = type_uuid - stat['type_name'] = chosen_type - stat['max_heads'] = int(self._session.call_xenapi( - 'VGPU_type.get_max_heads', type_ref)) - - stat['total'] = self._get_total_vgpu_in_grp(grp_ref, type_ref) - stat['remaining'] = int(self._session.call_xenapi( - 'GPU_group.get_remaining_capacity', - grp_ref, - type_ref)) - return stat - - def _get_total_vgpu_in_grp(self, grp_ref, type_ref): - """Get the total capacity of vGPUs in the group.""" - pgpu_recs = self._session.call_xenapi( - 'PGPU.get_all_records_where', 'field "GPU_group" = "%s"' % grp_ref) - - total = 0 - for pgpu_ref in pgpu_recs: - pgpu_rec = pgpu_recs[pgpu_ref] - if type_ref in pgpu_rec['enabled_VGPU_types']: - cap = pgpu_rec['supported_VGPU_max_capacities'][type_ref] - total += int(cap) - return total - - def get_host_stats(self, refresh=False): - """Return the current state of the host. If 'refresh' is - True, run the update first. - """ - if refresh or not self._stats: - self.update_status() - return self._stats - - def get_disk_used(self, sr_ref): - """Since glance images are downloaded and snapshotted before they are - used, only a small proportion of its VDI will be in use and it will - never grow. We only need to count the virtual size for disks that - are attached to a VM - every other disk can count physical. - """ - - def _vdi_attached(vdi_ref): - try: - vbds = self._session.VDI.get_VBDs(vdi_ref) - for vbd in vbds: - if self._session.VBD.get_currently_attached(vbd): - return True - except self._session.XenAPI.Failure: - # VDI or VBD may no longer exist - in which case, it's - # not attached - pass - return False - - allocated = 0 - physical_used = 0 - - all_vdis = self._session.SR.get_VDIs(sr_ref) - for vdi_ref in all_vdis: - try: - vdi_physical = \ - int(self._session.VDI.get_physical_utilisation(vdi_ref)) - if _vdi_attached(vdi_ref): - allocated += \ - int(self._session.VDI.get_virtual_size(vdi_ref)) - else: - allocated += vdi_physical - physical_used += vdi_physical - except (ValueError, self._session.XenAPI.Failure): - LOG.exception('Unable to get size for vdi %s', vdi_ref) - - return (allocated, physical_used) - - def update_status(self): - """Since under Xenserver, a compute node runs on a given host, - we can get host status information using xenapi. - """ - LOG.debug("Updating host stats") - data = _call_host_management(self._session, - host_management.get_host_data) - if data: - sr_ref = vm_utils.scan_default_sr(self._session) - sr_rec = self._session.SR.get_record(sr_ref) - total = int(sr_rec["physical_size"]) - (allocated, used) = self.get_disk_used(sr_ref) - data["disk_total"] = total - data["disk_used"] = used - data["disk_allocated"] = allocated - data["disk_available"] = total - used - data["supported_instances"] = to_supported_instances( - data.get("host_capabilities") - ) - data["cpu_model"] = to_cpu_model( - data.get("host_cpu_info") - ) - host_memory = data.get('host_memory', None) - if host_memory: - data["host_memory_total"] = host_memory.get('total', 0) - data["host_memory_overhead"] = host_memory.get('overhead', 0) - data["host_memory_free"] = host_memory.get('free', 0) - data["host_memory_free_computed"] = host_memory.get( - 'free-computed', 0) - del data['host_memory'] - if (data['host_hostname'] != - self._stats.get('host_hostname', data['host_hostname'])): - LOG.error('Hostname has changed from %(old)s to %(new)s. ' - 'A restart is required to take effect.', - {'old': self._stats['host_hostname'], - 'new': data['host_hostname']}) - data['host_hostname'] = self._stats['host_hostname'] - data['hypervisor_hostname'] = data['host_hostname'] - vcpus_used = 0 - for vm_ref, vm_rec in vm_utils.list_vms(self._session): - vcpus_used = vcpus_used + int(vm_rec['VCPUs_max']) - data['vcpus_used'] = vcpus_used - data['pci_passthrough_devices'] = self._get_passthrough_devices() - data['vgpu_stats'] = self._get_vgpu_stats() - self._stats = data - - -def to_supported_instances(host_capabilities): - if not host_capabilities: - return [] - - result = [] - for capability in host_capabilities: - try: - # 'capability'is unicode but we want arch/ostype - # to be strings to match the standard constants - capability = str(capability) - - ostype, _version, guestarch = capability.split("-") - - guestarch = obj_fields.Architecture.canonicalize(guestarch) - ostype = obj_fields.VMMode.canonicalize(ostype) - - result.append((guestarch, obj_fields.HVType.XEN, ostype)) - except ValueError: - LOG.warning("Failed to extract instance support from %s", - capability) - - return result - - -def to_cpu_model(host_cpu_info): - # The XenAPI driver returns data in the format - # - # {"physical_features": "0098e3fd-bfebfbff-00000001-28100800", - # "modelname": "Intel(R) Xeon(R) CPU X3430 @ 2.40GHz", - # "vendor": "GenuineIntel", - # "features": "0098e3fd-bfebfbff-00000001-28100800", - # "family": 6, - # "maskable": "full", - # "cpu_count": 4, - # "socket_count": "1", - # "flags": "fpu de tsc msr pae mce cx8 apic sep mtrr mca cmov - # pat clflush acpi mmx fxsr sse sse2 ss ht nx - # constant_tsc nonstop_tsc aperfmperf pni vmx est - # ssse3 sse4_1 sse4_2 popcnt hypervisor ida - # tpr_shadow vnmi flexpriority ept vpid", - # "stepping": 5, - # "model": 30, - # "features_after_reboot": "0098e3fd-bfebfbff-00000001-28100800", - # "speed": "2394.086"} - - if host_cpu_info is None: - return None - - cpu_info = dict() - # TODO(berrange) the data we're putting in model is not - # exactly comparable to what libvirt puts in model. The - # libvirt model names are a well defined short string - # which is really an aliass for a particular set of - # feature flags. The Xen model names are raw printable - # strings from the kernel with no specific semantics - cpu_info["model"] = host_cpu_info["modelname"] - cpu_info["vendor"] = host_cpu_info["vendor"] - # TODO(berrange) perhaps we could fill in 'arch' field too - # by looking at 'host_capabilities' for the Xen host ? - - topology = dict() - topology["sockets"] = int(host_cpu_info["socket_count"]) - topology["cores"] = (int(host_cpu_info["cpu_count"]) / - int(host_cpu_info["socket_count"])) - # TODO(berrange): if 'ht' is present in the 'flags' list - # is it possible to infer that the 'cpu_count' is in fact - # sockets * cores * threads ? Unclear if 'ht' would remain - # visible when threads are disabled in BIOS ? - topology["threads"] = 1 - - cpu_info["topology"] = topology - - cpu_info["features"] = host_cpu_info["flags"].split(" ") - - return cpu_info - - -def call_xenhost(session, method, arg_dict): - """There will be several methods that will need this general - handling for interacting with the xenhost plugin, so this abstracts - out that behavior. - """ - # Create a task ID as something that won't match any instance ID - try: - result = session.call_plugin('xenhost.py', method, args=arg_dict) - if not result: - return '' - return jsonutils.loads(result) - except ValueError: - LOG.exception("Unable to get updated status") - return None - except session.XenAPI.Failure as e: - LOG.error("The call to %(method)s returned " - "an error: %(e)s.", {'method': method, 'e': e}) - return e.details[1] - - -def _call_host_management(session, method, *args): - """There will be several methods that will need this general - handling for interacting with the dom0 plugin, so this abstracts - out that behavior. the call_xenhost will be removed once we deprecated - those functions which are not needed anymore - """ - try: - result = method(session, *args) - if not result: - return '' - return jsonutils.loads(result) - except ValueError: - LOG.exception("Unable to get updated status") - return None - except session.XenAPI.Failure as e: - LOG.error("The call to %(method)s returned an error: %(e)s.", - {'method': method.__name__, 'e': e}) - return e.details[1] - - -def _uuid_find(context, host, name_label): - """Return instance uuid by name_label.""" - for i in objects.InstanceList.get_by_host(context, host): - if i.name == name_label: - return i.uuid - return None - - -def _host_find(context, session, src_aggregate, host_ref): - """Return the host from the xenapi host reference. - - :param src_aggregate: the aggregate that the compute host being put in - maintenance (source of VMs) belongs to - :param host_ref: the hypervisor host reference (destination of VMs) - - :return: the compute host that manages host_ref - """ - # NOTE: this would be a lot simpler if nova-compute stored - # CONF.host in the XenServer host's other-config map. - # TODO(armando-migliaccio): improve according the note above - uuid = session.host.get_uuid(host_ref) - for compute_host, host_uuid in src_aggregate.metadetails.items(): - if host_uuid == uuid: - return compute_host - raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found ' - 'from aggregate metadata: %(metadata)s.' % - {'host_uuid': uuid, - 'metadata': src_aggregate.metadetails}) diff --git a/nova/virt/xenapi/image/__init__.py b/nova/virt/xenapi/image/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/virt/xenapi/image/glance.py b/nova/virt/xenapi/image/glance.py deleted file mode 100644 index 96550a4bd65b..000000000000 --- a/nova/virt/xenapi/image/glance.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import functools -import sys - -from os_xenapi.client import exception as xenapi_exception -from os_xenapi.client import host_glance -from oslo_log import log as logging -import six - -from nova.compute import utils as compute_utils -import nova.conf -from nova import exception -from nova.image import glance -from nova import utils -from nova.virt.xenapi import vm_utils - -CONF = nova.conf.CONF -LOG = logging.getLogger(__name__) - - -class GlanceStore(object): - def _call_glance_plugin(self, context, instance, session, fn, image_id, - params): - glance_api_servers = glance.get_api_servers(context) - sr_path = vm_utils.get_sr_path(session) - extra_headers = glance.generate_identity_headers(context) - - def pick_glance(kwargs): - server = next(glance_api_servers) - kwargs['endpoint'] = server - kwargs['api_version'] = 2 - # NOTE(sdague): is the return significant here at all? - return server - - def retry_cb(context, instance, exc=None): - if exc: - exc_info = sys.exc_info() - LOG.debug(six.text_type(exc), exc_info=exc_info) - compute_utils.add_instance_fault_from_exc( - context, instance, exc, exc_info) - - cb = functools.partial(retry_cb, context, instance) - - return fn(session, CONF.glance.num_retries, pick_glance, cb, image_id, - sr_path, extra_headers, **params) - - def download_image(self, context, session, instance, image_id): - params = {'uuid_stack': vm_utils._make_uuid_stack()} - try: - vdis = self._call_glance_plugin(context, instance, session, - host_glance.download_vhd, image_id, - params) - except xenapi_exception.PluginRetriesExceeded: - raise exception.CouldNotFetchImage(image_id=image_id) - - return vdis - - def upload_image(self, context, session, instance, image_id, vdi_uuids): - params = {'vdi_uuids': vdi_uuids} - props = params['properties'] = {} - props['auto_disk_config'] = instance['auto_disk_config'] - props['os_type'] = instance.get('os_type', None) or ( - CONF.xenserver.default_os_type) - - compression_level = vm_utils.get_compression_level() - if compression_level: - props['xenapi_image_compression_level'] = compression_level - - auto_disk_config = utils.get_auto_disk_config_from_instance(instance) - if utils.is_auto_disk_config_disabled(auto_disk_config): - props["auto_disk_config"] = "disabled" - - try: - self._call_glance_plugin(context, instance, session, - host_glance.upload_vhd, image_id, params) - except xenapi_exception.PluginRetriesExceeded: - raise exception.CouldNotUploadImage(image_id=image_id) - except xenapi_exception.PluginImageNotFound: - raise exception.ImageNotFound(image_id=image_id) diff --git a/nova/virt/xenapi/image/utils.py b/nova/virt/xenapi/image/utils.py deleted file mode 100644 index 684eaea685b4..000000000000 --- a/nova/virt/xenapi/image/utils.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import shutil -import tarfile - -from oslo_utils import importutils - -from nova import exception -from nova.image import glance - -_VDI_FORMAT_RAW = 1 - -IMAGE_API = glance.API() -IMAGE_HANDLERS = {'direct_vhd': 'glance.GlanceStore', - 'vdi_local_dev': 'vdi_through_dev.VdiThroughDevStore', - 'vdi_remote_stream': 'vdi_stream.VdiStreamStore'} - - -def get_image_handler(handler_name): - if handler_name not in IMAGE_HANDLERS: - raise exception.ImageHandlerUnsupported(image_handler=handler_name) - return importutils.import_object('nova.virt.xenapi.image.' - '%s' % IMAGE_HANDLERS[handler_name]) - - -class GlanceImage(object): - def __init__(self, context, image_href_or_id): - self._context = context - self._image_id = image_href_or_id - self._cached_meta = None - - @property - def meta(self): - if self._cached_meta is None: - self._cached_meta = IMAGE_API.get(self._context, self._image_id) - return self._cached_meta - - def download_to(self, fileobj): - return IMAGE_API.download(self._context, self._image_id, fileobj) - - def is_raw_tgz(self): - return ['raw', 'tgz'] == [ - self.meta.get(key) for key in ('disk_format', 'container_format')] - - def data(self): - return IMAGE_API.download(self._context, self._image_id) - - -class RawImage(object): - def __init__(self, glance_image): - self.glance_image = glance_image - - def get_size(self): - return int(self.glance_image.meta['size']) - - def stream_to(self, fileobj): - return self.glance_image.download_to(fileobj) - - -class IterableToFileAdapter(object): - """A degenerate file-like so that an iterable could be read like a file. - - As Glance client returns an iterable, but tarfile requires a file like, - this is the adapter between the two. This allows tarfile to access the - glance stream. - """ - - def __init__(self, iterable): - self.iterator = iterable.__iter__() - self.remaining_data = '' - - def read(self, size): - chunk = self.remaining_data - try: - while not chunk: - chunk = next(self.iterator) - except StopIteration: - return '' - return_value = chunk[0:size] - self.remaining_data = chunk[size:] - return return_value - - -class RawTGZImage(object): - def __init__(self, glance_image): - self.glance_image = glance_image - self._tar_info = None - self._tar_file = None - - def _as_file(self): - return IterableToFileAdapter(self.glance_image.data()) - - def _as_tarfile(self): - return tarfile.open(mode='r|gz', fileobj=self._as_file()) - - def get_size(self): - if self._tar_file is None: - self._tar_file = self._as_tarfile() - self._tar_info = self._tar_file.next() - return self._tar_info.size - - def stream_to(self, target_file): - if self._tar_file is None: - self._tar_file = self._as_tarfile() - self._tar_info = self._tar_file.next() - source_file = self._tar_file.extractfile(self._tar_info) - shutil.copyfileobj(source_file, target_file) - self._tar_file.close() diff --git a/nova/virt/xenapi/image/vdi_stream.py b/nova/virt/xenapi/image/vdi_stream.py deleted file mode 100644 index 2c8a7f1f894d..000000000000 --- a/nova/virt/xenapi/image/vdi_stream.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2017 Citrix Systems -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" This class will stream image data directly between glance and VDI. -""" - -from os_xenapi.client import exception as xenapi_exception -from os_xenapi.client import image as xenapi_image -from oslo_log import log as logging - -import nova.conf -from nova import exception -from nova.image import glance -from nova import utils as nova_utils -from nova.virt.xenapi.image import utils -from nova.virt.xenapi import vm_utils - -CONF = nova.conf.CONF -LOG = logging.getLogger(__name__) - -IMAGE_API = glance.API() - - -class VdiStreamStore(object): - def download_image(self, context, session, instance, image_id): - try: - host_url = CONF.xenserver.connection_url - image_data = IMAGE_API.download(context, image_id) - image_stream = utils.IterableToFileAdapter(image_data) - sr_ref = vm_utils.safe_find_sr(session) - vdis = xenapi_image.stream_to_vdis(context, session, - instance, host_url, - sr_ref, image_stream) - except xenapi_exception.OsXenApiException as e: - LOG.error("Image download failed with exception: %s", e) - raise exception.CouldNotFetchImage(image_id=image_id) - return vdis - - def _get_metadata(self, context, instance, image_id): - metadata = IMAGE_API.get(context, image_id) - metadata['disk_format'] = 'vhd' - metadata['container_format'] = 'ovf' - metadata['auto_disk_config'] = str(instance['auto_disk_config']) - metadata['os_type'] = instance.get('os_type') or ( - CONF.xenserver.default_os_type) - # Set size as zero, so that it will update the size in the end - # based on the uploaded image data. - metadata['size'] = 0 - - # Adjust the auto_disk_config value basing on instance's - # system metadata. - # TODO(mriedem): Consider adding an abstract base class for the - # various image handlers to contain common code like this. - auto_disk = nova_utils.get_auto_disk_config_from_instance(instance) - if nova_utils.is_auto_disk_config_disabled(auto_disk): - metadata['auto_disk_config'] = "disabled" - - return metadata - - def upload_image(self, context, session, instance, image_id, vdi_uuids): - try: - host_url = CONF.xenserver.connection_url - level = vm_utils.get_compression_level() - metadata = self._get_metadata(context, instance, image_id) - image_chunks = xenapi_image.stream_from_vdis( - context, session, instance, host_url, vdi_uuids, - compresslevel=level) - image_stream = utils.IterableToFileAdapter(image_chunks) - IMAGE_API.update(context, image_id, metadata, - data=image_stream) - except xenapi_exception.OsXenApiException as e: - LOG.error("Image upload failed with exception: %s", e) - raise exception.CouldNotUploadImage(image_id=image_id) diff --git a/nova/virt/xenapi/image/vdi_through_dev.py b/nova/virt/xenapi/image/vdi_through_dev.py deleted file mode 100644 index 0a5aba99ac9a..000000000000 --- a/nova/virt/xenapi/image/vdi_through_dev.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import os -import tarfile - -import eventlet -from eventlet import greenio - -from nova.image import glance -from nova import utils -from nova.virt.xenapi import vm_utils - - -class VdiThroughDevStore(object): - """Deal with virtual disks by attaching them to the OS domU. - - At the moment it supports upload to Glance, and the upload format is a raw - disk inside a tgz. - """ - - def upload_image(self, context, session, instance, image_id, vdi_uuids): - command = UploadToGlanceAsRawTgz( - context, session, instance, image_id, vdi_uuids) - return command.upload_image() - - def download_image(self, context, session, instance, image_id): - # TODO(matelakat) Move through-dev image download functionality to this - # method. - raise NotImplementedError() - - -class UploadToGlanceAsRawTgz(object): - def __init__(self, context, session, instance, image_id, vdi_uuids): - self.context = context - self.image_id = image_id - self.session = session - self.vdi_uuids = vdi_uuids - - def _get_virtual_size(self): - return self.session.call_xenapi( - 'VDI.get_virtual_size', self._get_vdi_ref()) - - def _get_vdi_ref(self): - return self.session.call_xenapi('VDI.get_by_uuid', self.vdi_uuids[0]) - - def _perform_upload(self, devpath): - readfile, writefile = self._create_pipe() - size = self._get_virtual_size() - producer = TarGzProducer(devpath, writefile, size, 'disk.raw') - consumer = glance.UpdateGlanceImage( - self.context, self.image_id, producer.get_metadata(), readfile) - pool = eventlet.GreenPool() - pool.spawn(producer.start) - pool.spawn(consumer.start) - pool.waitall() - - def _create_pipe(self): - rpipe, wpipe = os.pipe() - rfile = greenio.GreenPipe(rpipe, 'rb', 0) - wfile = greenio.GreenPipe(wpipe, 'wb', 0) - return rfile, wfile - - def upload_image(self): - vdi_ref = self._get_vdi_ref() - with vm_utils.vdi_attached(self.session, vdi_ref, - read_only=True) as dev: - devpath = utils.make_dev_path(dev) - with utils.temporary_chown(devpath): - self._perform_upload(devpath) - - -class TarGzProducer(object): - def __init__(self, devpath, writefile, size, fname): - self.fpath = devpath - self.output = writefile - self.size = size - self.fname = fname - - def get_metadata(self): - return { - 'disk_format': 'raw', - 'container_format': 'tgz' - } - - def start(self): - with contextlib.closing(self.output): - tinfo = tarfile.TarInfo(name=self.fname) - tinfo.size = int(self.size) - with tarfile.open(fileobj=self.output, mode='w|gz') as tfile: - with self._open_file(self.fpath, 'rb') as input_file: - tfile.addfile(tinfo, fileobj=input_file) - - def _open_file(self, *args): - return open(*args) diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py deleted file mode 100644 index 28b857c08f36..000000000000 --- a/nova/virt/xenapi/network_utils.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper methods for operations related to the management of network -records and their attributes like bridges, PIFs, QoS, as well as -their lookup functions. -""" - -from nova import exception -from nova.i18n import _ - - -def find_network_with_name_label(session, name_label): - networks = session.network.get_by_name_label(name_label) - if len(networks) == 1: - return networks[0] - elif len(networks) > 1: - raise exception.NovaException( - _('Found non-unique network for name_label %s') % - name_label) - else: - return None - - -def find_network_with_bridge(session, bridge): - """Return the network on which the bridge is attached, if found. - The bridge is defined in the nova db and can be found either in the - 'bridge' or 'name_label' fields of the XenAPI network record. - """ - expr = ('field "name__label" = "%s" or field "bridge" = "%s"' % - (bridge, bridge)) - networks = session.network.get_all_records_where(expr) - if len(networks) == 1: - return list(networks.keys())[0] - elif len(networks) > 1: - raise exception.NovaException( - _('Found non-unique network for bridge %s') % bridge) - else: - raise exception.NovaException( - _('Found no network for bridge %s') % bridge) diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py deleted file mode 100644 index 1265ec9975fb..000000000000 --- a/nova/virt/xenapi/pool.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (c) 2012 Citrix Systems, Inc. -# Copyright 2010 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for Pool-related functions (join, eject, etc). -""" - -from oslo_log import log as logging -from oslo_serialization import jsonutils -import six -import six.moves.urllib.parse as urlparse - -from nova.compute import rpcapi as compute_rpcapi -import nova.conf -from nova import exception -from nova.i18n import _ -from nova.virt.xenapi import pool_states -from nova.virt.xenapi import vm_utils - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - - -class ResourcePool(object): - """Implements resource pool operations.""" - def __init__(self, session, virtapi): - host_rec = session.host.get_record(session.host_ref) - self._host_name = host_rec['hostname'] - self._host_addr = host_rec['address'] - self._host_uuid = host_rec['uuid'] - self._session = session - self._virtapi = virtapi - self.compute_rpcapi = compute_rpcapi.ComputeAPI() - - def undo_aggregate_operation(self, context, op, aggregate, - host, set_error): - """Undo aggregate operation when pool error raised.""" - try: - if set_error: - metadata = {pool_states.KEY: pool_states.ERROR} - aggregate.update_metadata(metadata) - op(host) - except Exception: - LOG.exception('Aggregate %(aggregate_id)s: unrecoverable ' - 'state during operation on %(host)s', - {'aggregate_id': aggregate.id, 'host': host}) - - def add_to_aggregate(self, context, aggregate, host, slave_info=None): - """Add a compute host to an aggregate.""" - if not pool_states.is_hv_pool(aggregate.metadata): - return - - if CONF.xenserver.independent_compute: - raise exception.NotSupportedWithOption( - operation='adding to a XenServer pool', - option='CONF.xenserver.independent_compute') - - invalid = {pool_states.CHANGING: _('setup in progress'), - pool_states.DISMISSED: _('aggregate deleted'), - pool_states.ERROR: _('aggregate in error')} - - if (aggregate.metadata[pool_states.KEY] in invalid.keys()): - raise exception.InvalidAggregateActionAdd( - aggregate_id=aggregate.id, - reason=invalid[aggregate.metadata[pool_states.KEY]]) - - if (aggregate.metadata[pool_states.KEY] == pool_states.CREATED): - aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) - if len(aggregate.hosts) == 1: - # this is the first host of the pool -> make it master - self._init_pool(aggregate.id, aggregate.name) - # save metadata so that we can find the master again - metadata = {'master_compute': host, - host: self._host_uuid, - pool_states.KEY: pool_states.ACTIVE} - aggregate.update_metadata(metadata) - else: - # the pool is already up and running, we need to figure out - # whether we can serve the request from this host or not. - master_compute = aggregate.metadata['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> do a pool-join - # To this aim, nova compute on the slave has to go down. - # NOTE: it is assumed that ONLY nova compute is running now - self._join_slave(aggregate.id, host, - slave_info.get('compute_uuid'), - slave_info.get('url'), slave_info.get('user'), - slave_info.get('passwd')) - metadata = {host: slave_info.get('xenhost_uuid'), } - aggregate.update_metadata(metadata) - elif master_compute and master_compute != host: - # send rpc cast to master, asking to add the following - # host with specified credentials. - slave_info = self._create_slave_info() - - self.compute_rpcapi.add_aggregate_host( - context, host, aggregate, master_compute, slave_info) - - def remove_from_aggregate(self, context, aggregate, host, slave_info=None): - """Remove a compute host from an aggregate.""" - slave_info = slave_info or dict() - if not pool_states.is_hv_pool(aggregate.metadata): - return - - invalid = {pool_states.CREATED: _('no hosts to remove'), - pool_states.CHANGING: _('setup in progress'), - pool_states.DISMISSED: _('aggregate deleted')} - if aggregate.metadata[pool_states.KEY] in invalid.keys(): - raise exception.InvalidAggregateActionDelete( - aggregate_id=aggregate.id, - reason=invalid[aggregate.metadata[pool_states.KEY]]) - - master_compute = aggregate.metadata['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> instruct it to eject a host from the pool - host_uuid = aggregate.metadata[host] - self._eject_slave(aggregate.id, - slave_info.get('compute_uuid'), host_uuid) - aggregate.update_metadata({host: None}) - elif master_compute == host: - # Remove master from its own pool -> destroy pool only if the - # master is on its own, otherwise raise fault. Destroying a - # pool made only by master is fictional - if len(aggregate.hosts) > 1: - # NOTE: this could be avoided by doing a master - # re-election, but this is simpler for now. - raise exception.InvalidAggregateActionDelete( - aggregate_id=aggregate.id, - reason=_('Unable to eject %s ' - 'from the pool; pool not empty') - % host) - self._clear_pool(aggregate.id) - aggregate.update_metadata({'master_compute': None, host: None}) - elif master_compute and master_compute != host: - # A master exists -> forward pool-eject request to master - slave_info = self._create_slave_info() - - self.compute_rpcapi.remove_aggregate_host( - context, host, aggregate.id, master_compute, slave_info) - else: - # this shouldn't have happened - raise exception.AggregateError(aggregate_id=aggregate.id, - action='remove_from_aggregate', - reason=_('Unable to eject %s ' - 'from the pool; No master found') - % host) - - def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): - """Joins a slave into a XenServer resource pool.""" - try: - args = {'compute_uuid': compute_uuid, - 'url': url, - 'user': user, - 'password': passwd, - 'force': jsonutils.dumps(CONF.xenserver.use_join_force), - 'master_addr': self._host_addr, - 'master_user': CONF.xenserver.connection_username, - 'master_pass': CONF.xenserver.connection_password, } - self._session.call_plugin('xenhost.py', 'host_join', args) - except self._session.XenAPI.Failure as e: - LOG.error("Pool-Join failed: %s", e) - raise exception.AggregateError(aggregate_id=aggregate_id, - action='add_to_aggregate', - reason=_('Unable to join %s ' - 'in the pool') % host) - - def _eject_slave(self, aggregate_id, compute_uuid, host_uuid): - """Eject a slave from a XenServer resource pool.""" - try: - # shutdown nova-compute; if there are other VMs running, e.g. - # guest instances, the eject will fail. That's a precaution - # to deal with the fact that the admin should evacuate the host - # first. The eject wipes out the host completely. - vm_ref = self._session.VM.get_by_uuid(compute_uuid) - self._session.VM.clean_shutdown(vm_ref) - - host_ref = self._session.host.get_by_uuid(host_uuid) - self._session.pool.eject(host_ref) - except self._session.XenAPI.Failure as e: - LOG.error("Pool-eject failed: %s", e) - raise exception.AggregateError(aggregate_id=aggregate_id, - action='remove_from_aggregate', - reason=six.text_type(e.details)) - - def _init_pool(self, aggregate_id, aggregate_name): - """Set the name label of a XenServer pool.""" - try: - pool_ref = self._session.pool.get_all()[0] - self._session.pool.set_name_label(pool_ref, aggregate_name) - except self._session.XenAPI.Failure as e: - LOG.error("Unable to set up pool: %s.", e) - raise exception.AggregateError(aggregate_id=aggregate_id, - action='add_to_aggregate', - reason=six.text_type(e.details)) - - def _clear_pool(self, aggregate_id): - """Clear the name label of a XenServer pool.""" - try: - pool_ref = self._session.pool.get_all()[0] - self._session.pool.set_name_label(pool_ref, '') - except self._session.XenAPI.Failure as e: - LOG.error("Pool-set_name_label failed: %s", e) - raise exception.AggregateError(aggregate_id=aggregate_id, - action='remove_from_aggregate', - reason=six.text_type(e.details)) - - def _create_slave_info(self): - """XenServer specific info needed to join the hypervisor pool.""" - # replace the address from the xenapi connection url - # because this might be 169.254.0.1, i.e. xenapi - # NOTE: password in clear is not great, but it'll do for now - sender_url = swap_xapi_host( - CONF.xenserver.connection_url, self._host_addr) - - return { - "url": sender_url, - "user": CONF.xenserver.connection_username, - "passwd": CONF.xenserver.connection_password, - "compute_uuid": vm_utils.get_this_vm_uuid(None), - "xenhost_uuid": self._host_uuid, - } - - -def swap_xapi_host(url, host_addr): - """Replace the XenServer address present in 'url' with 'host_addr'.""" - temp_url = urlparse.urlparse(url) - return url.replace(temp_url.hostname, '%s' % host_addr) diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py deleted file mode 100644 index ae431ddecb4d..000000000000 --- a/nova/virt/xenapi/pool_states.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2010 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Possible states for xen resource pools. - -A pool may be 'created', in which case the admin has triggered its -creation, but the underlying hypervisor pool has not actually being set up -yet. A pool may be 'changing', meaning that the underlying hypervisor -pool is being setup. A pool may be 'active', in which case the underlying -hypervisor pool is up and running. A pool may be 'dismissed' when it has -no hosts and it has been deleted. A pool may be in 'error' in all other -cases. -A 'created' pool becomes 'changing' during the first request of -adding a host. During a 'changing' status no other requests will be accepted; -this is to allow the hypervisor layer to instantiate the underlying pool -without any potential race condition that may incur in master/slave-based -configurations. The pool goes into the 'active' state when the underlying -pool has been correctly instantiated. -All other operations (e.g. add/remove hosts) that succeed will keep the -pool in the 'active' state. If a number of continuous requests fail, -an 'active' pool goes into an 'error' state. To recover from such a state, -admin intervention is required. Currently an error state is irreversible, -that is, in order to recover from it a pool must be deleted. -""" - -CREATED = 'created' -CHANGING = 'changing' -ACTIVE = 'active' -ERROR = 'error' -DISMISSED = 'dismissed' - -# Metadata keys -KEY = 'operational_state' -POOL_FLAG = 'hypervisor_pool' - - -def is_hv_pool(metadata): - """Checks if aggregate is a hypervisor_pool.""" - return POOL_FLAG in metadata.keys() diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py deleted file mode 100644 index 8b6377e95525..000000000000 --- a/nova/virt/xenapi/vif.py +++ /dev/null @@ -1,443 +0,0 @@ -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack Foundation -# Copyright (C) 2011 Nicira, Inc -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""VIF drivers for XenAPI.""" - -from os_xenapi.client import host_network -from oslo_log import log as logging - -from nova.compute import power_state -import nova.conf -from nova import exception -from nova.i18n import _ -from nova.network import model as network_model -from nova.virt.xenapi import network_utils -from nova.virt.xenapi import vm_utils - - -CONF = nova.conf.CONF - -LOG = logging.getLogger(__name__) - - -class XenVIFDriver(object): - def __init__(self, xenapi_session): - self._session = xenapi_session - - def _get_vif_ref(self, vif, vm_ref): - vif_refs = self._session.call_xenapi("VM.get_VIFs", vm_ref) - for vif_ref in vif_refs: - try: - vif_rec = self._session.call_xenapi('VIF.get_record', vif_ref) - if vif_rec['MAC'] == vif['address']: - return vif_ref - except Exception: - # When got exception here, maybe the vif is removed during the - # loop, ignore this vif and continue - continue - return None - - def _create_vif(self, vif, vif_rec, vm_ref): - try: - vif_ref = self._session.call_xenapi('VIF.create', vif_rec) - except Exception as e: - LOG.warning("Failed to create vif, exception:%(exception)s, " - "vif:%(vif)s", {'exception': e, 'vif': vif}) - raise exception.NovaException( - reason=_("Failed to create vif %s") % vif) - - LOG.debug("create vif %(vif)s for vm %(vm_ref)s successfully", - {'vif': vif, 'vm_ref': vm_ref}) - return vif_ref - - def unplug(self, instance, vif, vm_ref): - try: - LOG.debug("unplug vif, vif:%(vif)s, vm_ref:%(vm_ref)s", - {'vif': vif, 'vm_ref': vm_ref}, instance=instance) - vif_ref = self._get_vif_ref(vif, vm_ref) - if not vif_ref: - LOG.debug("vif didn't exist, no need to unplug vif %s", - vif, instance=instance) - return - # hot unplug the VIF first - self.hot_unplug(vif, instance, vm_ref, vif_ref) - self._session.call_xenapi('VIF.destroy', vif_ref) - except Exception as e: - LOG.warning( - "Fail to unplug vif:%(vif)s, exception:%(exception)s", - {'vif': vif, 'exception': e}, instance=instance) - raise exception.NovaException( - reason=_("Failed to unplug vif %s") % vif) - - def get_vif_interim_net_name(self, vif_id): - return ("net-" + vif_id)[:network_model.NIC_NAME_LEN] - - def hot_plug(self, vif, instance, vm_ref, vif_ref): - """hotplug virtual interface to running instance. - :param nova.network.model.VIF vif: - The object which has the information about the interface to attach. - :param nova.objects.instance.Instance instance: - The instance which will get an additional network interface. - :param string vm_ref: - The instance's reference from hypervisor's point of view. - :param string vif_ref: - The interface's reference from hypervisor's point of view. - :return: None - """ - pass - - def hot_unplug(self, vif, instance, vm_ref, vif_ref): - """hot unplug virtual interface from running instance. - :param nova.network.model.VIF vif: - The object which has the information about the interface to detach. - :param nova.objects.instance.Instance instance: - The instance which will remove additional network interface. - :param string vm_ref: - The instance's reference from hypervisor's point of view. - :param string vif_ref: - The interface's reference from hypervisor's point of view. - :return: None - """ - pass - - def post_start_actions(self, instance, vif_ref): - """post actions when the instance is power on. - :param nova.objects.instance.Instance instance: - The instance which will execute extra actions after power on - :param string vif_ref: - The interface's reference from hypervisor's point of view. - :return: None - """ - pass - - def create_vif_interim_network(self, vif): - pass - - def delete_network_and_bridge(self, instance, vif_id): - pass - - -class XenAPIOpenVswitchDriver(XenVIFDriver): - """VIF driver for Open vSwitch with XenAPI.""" - - def plug(self, instance, vif, vm_ref=None, device=None): - """create an interim network for this vif; and build - the vif_rec which will be used by xapi to create VM vif - """ - if not vm_ref: - vm_ref = vm_utils.lookup(self._session, instance['name']) - if not vm_ref: - raise exception.VirtualInterfacePlugException( - "Cannot find instance %s, discard vif plug" % instance['name']) - - # if VIF already exists, return this vif_ref directly - vif_ref = self._get_vif_ref(vif, vm_ref) - if vif_ref: - LOG.debug("VIF %s already exists when plug vif", - vif_ref, instance=instance) - return vif_ref - - if not device: - device = 0 - - # Create an interim network for each VIF, so dom0 has a single - # bridge for each device (the emulated and PV ethernet devices - # will both be on this bridge. - network_ref = self.create_vif_interim_network(vif) - vif_rec = {} - vif_rec['device'] = str(device) - vif_rec['network'] = network_ref - vif_rec['VM'] = vm_ref - vif_rec['MAC'] = vif['address'] - vif_rec['MTU'] = '1500' - vif_rec['qos_algorithm_type'] = '' - vif_rec['qos_algorithm_params'] = {} - vif_rec['other_config'] = {'neutron-port-id': vif['id']} - vif_ref = self._create_vif(vif, vif_rec, vm_ref) - - # call XenAPI to plug vif - self.hot_plug(vif, instance, vm_ref, vif_ref) - - return vif_ref - - def unplug(self, instance, vif, vm_ref): - super(XenAPIOpenVswitchDriver, self).unplug(instance, vif, vm_ref) - self.delete_network_and_bridge(instance, vif['id']) - - def delete_network_and_bridge(self, instance, vif_id): - """Delete network and bridge: - 1. delete the patch port pair between the integration bridge and - the qbr linux bridge(if exist) and the interim network. - 2. destroy the interim network - 3. delete the OVS bridge service for the interim network - 4. delete linux bridge qbr and related ports if exist - """ - network = self._get_network_by_vif(vif_id) - if not network: - return - vifs = self._session.network.get_VIFs(network) - bridge_name = self._session.network.get_bridge(network) - if vifs: - # Still has vifs attached to this network - for remain_vif in vifs: - # if the remain vifs are on the local server, give up all the - # operations. If the remain vifs are on the remote hosts, keep - # the network and delete the bridge - if self._get_host_by_vif(remain_vif) == self._session.host_ref: - return - else: - # No vif left, delete the network - try: - self._session.network.destroy(network) - except Exception as e: - LOG.warning("Failed to destroy network for vif (id=%(if)s), " - "exception:%(exception)s", - {'if': vif_id, 'exception': e}, instance=instance) - raise exception.VirtualInterfaceUnplugException( - reason=_("Failed to destroy network")) - # Two cases: - # 1) No vif left, just delete the bridge - # 2) For resize/intra-pool migrate, vifs on both of the - # source and target VM will be connected to the same - # interim network. If the VM is resident on a remote host, - # linux bridge on current host will be deleted. - self.delete_bridge(instance, vif_id, bridge_name) - - def delete_bridge(self, instance, vif_id, bridge_name): - LOG.debug('destroying patch port pair for vif id: vif_id=%(vif_id)s', - {'vif_id': vif_id}) - patch_port1, tap_name = self._get_patch_port_pair_names(vif_id) - try: - # delete the patch port pair - host_network.ovs_del_port(self._session, bridge_name, patch_port1) - except Exception as e: - LOG.warning("Failed to delete patch port pair for vif id %(if)s," - " exception:%(exception)s", - {'if': vif_id, 'exception': e}, instance=instance) - raise exception.VirtualInterfaceUnplugException( - reason=_("Failed to delete patch port pair")) - - LOG.debug('destroying bridge: bridge=%(br)s', {'br': bridge_name}) - try: - # delete bridge if it still exists. - # As there are patch ports existing on this bridge when - # destroying won't be destroyed automatically by XAPI, let's - # destroy it at here. - host_network.ovs_del_br(self._session, bridge_name) - qbr_name = self._get_qbr_name(vif_id) - qvb_name, qvo_name = self._get_veth_pair_names(vif_id) - if self._device_exists(qbr_name): - # delete tap port, qvb port and qbr - LOG.debug( - "destroy linux bridge %(qbr)s when unplug vif id" - " %(vif_id)s", {'qbr': qbr_name, 'vif_id': vif_id}) - self._delete_linux_port(qbr_name, tap_name) - self._delete_linux_port(qbr_name, qvb_name) - self._delete_linux_bridge(qbr_name) - host_network.ovs_del_port(self._session, - CONF.xenserver.ovs_integration_bridge, - qvo_name) - except Exception as e: - LOG.warning("Failed to delete bridge for vif id %(if)s, " - "exception:%(exception)s", - {'if': vif_id, 'exception': e}, instance=instance) - raise exception.VirtualInterfaceUnplugException( - reason=_("Failed to delete bridge")) - - def _get_network_by_vif(self, vif_id): - net_name = self.get_vif_interim_net_name(vif_id) - network = network_utils.find_network_with_name_label( - self._session, net_name) - if network is None: - LOG.debug("Failed to find network for vif id %(if)s", - {'if': vif_id}) - return - return network - - def _get_host_by_vif(self, vif_id): - network = self._get_network_by_vif(vif_id) - if not network: - return - vif_info = self._session.VIF.get_all_records_where( - 'field "network" = "%s"' % network) - if not vif_info or len(vif_info) != 1: - raise exception.NovaException( - "Couldn't find vif id information in network %s" - % network) - vm_ref = self._session.VIF.get_VM(list(vif_info.keys())[0]) - return self._session.VM.get_resident_on(vm_ref) - - def hot_plug(self, vif, instance, vm_ref, vif_ref): - # hot plug vif only when VM's power state is running - LOG.debug("Hot plug vif, vif: %s", vif, instance=instance) - state = vm_utils.get_power_state(self._session, vm_ref) - if state != power_state.RUNNING: - LOG.debug("Skip hot plug VIF, VM is not running, vif: %s", vif, - instance=instance) - return - - self._session.VIF.plug(vif_ref) - self.post_start_actions(instance, vif_ref) - - def hot_unplug(self, vif, instance, vm_ref, vif_ref): - # hot unplug vif only when VM's power state is running - LOG.debug("Hot unplug vif, vif: %s", vif, instance=instance) - state = vm_utils.get_power_state(self._session, vm_ref) - if state != power_state.RUNNING: - LOG.debug("Skip hot unplug VIF, VM is not running, vif: %s", vif, - instance=instance) - return - - self._session.VIF.unplug(vif_ref) - - def _get_qbr_name(self, iface_id): - return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN] - - def _get_veth_pair_names(self, iface_id): - return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN], - ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN]) - - def _device_exists(self, device): - """Check if ethernet device exists.""" - try: - host_network.ip_link_get_dev(self._session, device) - return True - except Exception: - # Swallow exception from plugin, since this indicates the device - # doesn't exist - return False - - def _delete_net_dev(self, dev): - """Delete a network device only if it exists.""" - if self._device_exists(dev): - LOG.debug("delete network device '%s'", dev) - host_network.ip_link_del_dev(self._session, dev) - - def _create_veth_pair(self, dev1_name, dev2_name): - """Create a pair of veth devices with the specified names, - deleting any previous devices with those names. - """ - LOG.debug("Create veth pair, port1:%(qvb)s, port2:%(qvo)s", - {'qvb': dev1_name, 'qvo': dev2_name}) - for dev in [dev1_name, dev2_name]: - self._delete_net_dev(dev) - host_network.ip_link_add_veth_pair(self._session, dev1_name, dev2_name) - for dev in [dev1_name, dev2_name]: - host_network.ip_link_set_dev(self._session, dev, 'up') - host_network.ip_link_set_promisc(self._session, dev, 'on') - - def _create_linux_bridge(self, vif_rec): - """create a qbr linux bridge for neutron security group - """ - iface_id = vif_rec['other_config']['neutron-port-id'] - linux_br_name = self._get_qbr_name(iface_id) - if not self._device_exists(linux_br_name): - LOG.debug("Create linux bridge %s", linux_br_name) - host_network.brctl_add_br(self._session, linux_br_name) - host_network.brctl_set_fd(self._session, linux_br_name, '0') - host_network.brctl_set_stp(self._session, linux_br_name, 'off') - host_network.ip_link_set_dev(self._session, linux_br_name, 'up') - - qvb_name, qvo_name = self._get_veth_pair_names(iface_id) - if not self._device_exists(qvo_name): - self._create_veth_pair(qvb_name, qvo_name) - host_network.brctl_add_if(self._session, linux_br_name, qvb_name) - host_network.ovs_create_port( - self._session, CONF.xenserver.ovs_integration_bridge, - qvo_name, iface_id, vif_rec['MAC'], 'active') - return linux_br_name - - def _delete_linux_port(self, qbr_name, port_name): - try: - # delete port in linux bridge - host_network.brctl_del_if(self._session, qbr_name, port_name) - self._delete_net_dev(port_name) - except Exception: - LOG.debug("Fail to delete linux port %(port_name)s on bridge " - "%(qbr_name)s", - {'port_name': port_name, 'qbr_name': qbr_name}) - - def _delete_linux_bridge(self, qbr_name): - try: - # delete linux bridge qbrxxx - host_network.ip_link_set_dev(self._session, qbr_name, 'down') - host_network.brctl_del_br(self._session, qbr_name) - except Exception: - LOG.debug("Fail to delete linux bridge %s", qbr_name) - - def post_start_actions(self, instance, vif_ref): - """Do needed actions post vif start: - plug the interim ovs bridge to the integration bridge; - set external_ids to the int-br port which will service - for this vif. - """ - vif_rec = self._session.VIF.get_record(vif_ref) - network_ref = vif_rec['network'] - bridge_name = self._session.network.get_bridge(network_ref) - network_uuid = self._session.network.get_uuid(network_ref) - iface_id = vif_rec['other_config']['neutron-port-id'] - patch_port1, tap_name = self._get_patch_port_pair_names(iface_id) - LOG.debug('plug_ovs_bridge: port1=%(port1)s, port2=%(port2)s,' - 'network_uuid=%(uuid)s, bridge_name=%(bridge_name)s', - {'port1': patch_port1, 'port2': tap_name, - 'uuid': network_uuid, 'bridge_name': bridge_name}) - if bridge_name is None: - raise exception.VirtualInterfacePlugException( - _("Failed to find bridge for vif")) - - # Create Linux bridge qbrXXX - linux_br_name = self._create_linux_bridge(vif_rec) - if not self._device_exists(tap_name): - LOG.debug("create veth pair for interim bridge %(interim_bridge)s " - "and linux bridge %(linux_bridge)s", - {'interim_bridge': bridge_name, - 'linux_bridge': linux_br_name}) - self._create_veth_pair(tap_name, patch_port1) - host_network.brctl_add_if(self._session, linux_br_name, tap_name) - # Add port to interim bridge - host_network.ovs_add_port(self._session, bridge_name, patch_port1) - - def create_vif_interim_network(self, vif): - net_name = self.get_vif_interim_net_name(vif['id']) - # In a pooled environment, make the network shared in order to ensure - # it can also be used in the target host while live migrating. - # "assume_network_is_shared" flag does not affect environments where - # storage pools are not used. - network_rec = {'name_label': net_name, - 'name_description': "interim network for vif[%s]" - % vif['id'], - 'other_config': {'assume_network_is_shared': 'true'}} - network_ref = network_utils.find_network_with_name_label( - self._session, net_name) - if network_ref: - # already exist, just return - # in some scenarios: e..g resize/migrate, it won't create new - # interim network. - return network_ref - try: - network_ref = self._session.network.create(network_rec) - except Exception as e: - LOG.warning("Failed to create interim network for vif %(if)s, " - "exception:%(exception)s", - {'if': vif, 'exception': e}) - raise exception.VirtualInterfacePlugException( - _("Failed to create the interim network for vif")) - return network_ref - - def _get_patch_port_pair_names(self, iface_id): - return (("vif%s" % iface_id)[:network_model.NIC_NAME_LEN], - ("tap%s" % iface_id)[:network_model.NIC_NAME_LEN]) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py deleted file mode 100644 index 79594af5a913..000000000000 --- a/nova/virt/xenapi/vm_utils.py +++ /dev/null @@ -1,2607 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright 2011 Piston Cloud Computing, Inc. -# Copyright 2012 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper methods for operations related to the management of VM records and -their attributes like VDIs, VIFs, as well as their lookup functions. -""" - -import contextlib -import math -import os -import time -from xml.dom import minidom -from xml.parsers import expat - -from eventlet import greenthread -from os_xenapi.client import disk_management -from os_xenapi.client import host_network -from os_xenapi.client import vm_management -from oslo_concurrency import processutils -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import units -from oslo_utils import uuidutils -from oslo_utils import versionutils -import six -from six.moves import range -import six.moves.urllib.parse as urlparse -import six.moves.urllib.request as urlrequest - -from nova.api.metadata import base as instance_metadata -from nova.compute import power_state -from nova.compute import task_states -from nova.compute import utils as compute_utils -import nova.conf -from nova import exception -from nova.i18n import _ -from nova.network import model as network_model -from nova.objects import diagnostics -from nova.objects import fields as obj_fields -import nova.privsep.fs -import nova.privsep.xenapi -from nova import utils -from nova.virt import configdrive -from nova.virt.disk import api as disk -from nova.virt.disk.vfs import localfs as vfsimpl -from nova.virt import hardware -from nova.virt.image import model as imgmodel -from nova.virt import netutils -from nova.virt.xenapi import agent -from nova.virt.xenapi.image import utils as image_utils -from nova.virt.xenapi import volume_utils - -LOG = logging.getLogger(__name__) - -CONF = nova.conf.CONF - -XENAPI_POWER_STATE = { - 'Halted': power_state.SHUTDOWN, - 'Running': power_state.RUNNING, - 'Paused': power_state.PAUSED, - 'Suspended': power_state.SUSPENDED, - 'Crashed': power_state.CRASHED} - - -SECTOR_SIZE = 512 -MBR_SIZE_SECTORS = 63 -MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE -MAX_VDI_CHAIN_SIZE = 16 -PROGRESS_INTERVAL_SECONDS = 300 -DD_BLOCKSIZE = 65536 - -# Fudge factor to allow for the VHD chain to be slightly larger than -# the partitioned space. Otherwise, legitimate images near their -# maximum allowed size can fail on build with FlavorDiskSmallerThanImage. -VHD_SIZE_CHECK_FUDGE_FACTOR_GB = 10 - - -class ImageType(object): - """Enumeration class for distinguishing different image types - - | 0 - kernel image (goes on dom0's filesystem) - | 1 - ramdisk image (goes on dom0's filesystem) - | 2 - disk image (local SR, partitioned by objectstore plugin) - | 3 - raw disk image (local SR, NOT partitioned by plugin) - | 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for - | linux, HVM assumed for Windows) - | 5 - ISO disk image (local SR, NOT partitioned by plugin) - | 6 - config drive - """ - - KERNEL = 0 - RAMDISK = 1 - DISK = 2 - DISK_RAW = 3 - DISK_VHD = 4 - DISK_ISO = 5 - DISK_CONFIGDRIVE = 6 - _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO, - DISK_CONFIGDRIVE) - - KERNEL_STR = "kernel" - RAMDISK_STR = "ramdisk" - DISK_STR = "root" - DISK_RAW_STR = "os_raw" - DISK_VHD_STR = "vhd" - DISK_ISO_STR = "iso" - DISK_CONFIGDRIVE_STR = "configdrive" - _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR, - DISK_ISO_STR, DISK_CONFIGDRIVE_STR) - - @classmethod - def to_string(cls, image_type): - return dict(zip(cls._ids, ImageType._strs)).get(image_type) - - @classmethod - def get_role(cls, image_type_id): - """Get the role played by the image, based on its type.""" - return { - cls.KERNEL: 'kernel', - cls.RAMDISK: 'ramdisk', - cls.DISK: 'root', - cls.DISK_RAW: 'root', - cls.DISK_VHD: 'root', - cls.DISK_ISO: 'iso', - cls.DISK_CONFIGDRIVE: 'configdrive' - }.get(image_type_id) - - -def get_vm_device_id(session, image_meta): - # NOTE: device_id should be 2 for windows VMs which run new xentools - # (>=6.1). Refer to http://support.citrix.com/article/CTX135099 for more - # information. - device_id = image_meta.properties.get('hw_device_id') - - # The device_id is required to be set for hypervisor version 6.1 and above - if device_id: - hypervisor_version = session.product_version - if _hypervisor_supports_device_id(hypervisor_version): - return device_id - else: - msg = _("Device id %(id)s specified is not supported by " - "hypervisor version %(version)s") % {'id': device_id, - 'version': hypervisor_version} - raise exception.NovaException(msg) - - -def _hypervisor_supports_device_id(version): - version_as_string = '.'.join(str(v) for v in version) - return versionutils.is_compatible('6.1', version_as_string) - - -def create_vm(session, instance, name_label, kernel, ramdisk, - use_pv_kernel=False, device_id=None): - """Create a VM record. Returns new VM reference. - the use_pv_kernel flag indicates whether the guest is HVM or PV - - There are 3 scenarios: - - 1. Using paravirtualization, kernel passed in - - 2. Using paravirtualization, kernel within the image - - 3. Using hardware virtualization - """ - flavor = instance.get_flavor() - mem = str(int(flavor.memory_mb) * units.Mi) - vcpus = str(flavor.vcpus) - - vcpu_weight = flavor.vcpu_weight - vcpu_params = {} - if vcpu_weight is not None: - # NOTE(johngarbutt) bug in XenServer 6.1 and 6.2 means - # we need to specify both weight and cap for either to apply - vcpu_params = {"weight": str(vcpu_weight), "cap": "0"} - - cpu_mask_list = hardware.get_vcpu_pin_set() - if cpu_mask_list: - cpu_mask = hardware.format_cpu_spec(cpu_mask_list, - allow_ranges=False) - vcpu_params["mask"] = cpu_mask - - viridian = 'true' if instance['os_type'] == 'windows' else 'false' - - rec = { - 'actions_after_crash': 'destroy', - 'actions_after_reboot': 'restart', - 'actions_after_shutdown': 'destroy', - 'affinity': '', - 'blocked_operations': {}, - 'ha_always_run': False, - 'ha_restart_priority': '', - 'HVM_boot_params': {}, - 'HVM_boot_policy': '', - 'is_a_template': False, - 'memory_dynamic_min': mem, - 'memory_dynamic_max': mem, - 'memory_static_min': '0', - 'memory_static_max': mem, - 'memory_target': mem, - 'name_description': '', - 'name_label': name_label, - 'other_config': {'nova_uuid': str(instance['uuid'])}, - 'PCI_bus': '', - 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true', - 'viridian': viridian, 'timeoffset': '0'}, - 'PV_args': '', - 'PV_bootloader': '', - 'PV_bootloader_args': '', - 'PV_kernel': '', - 'PV_legacy_args': '', - 'PV_ramdisk': '', - 'recommendations': '', - 'tags': [], - 'user_version': '0', - 'VCPUs_at_startup': vcpus, - 'VCPUs_max': vcpus, - 'VCPUs_params': vcpu_params, - 'xenstore_data': {'vm-data/allowvssprovider': 'false'}} - - # Complete VM configuration record according to the image type - # non-raw/raw with PV kernel/raw in HVM mode - if use_pv_kernel: - rec['platform']['nx'] = 'false' - if instance['kernel_id']: - # 1. Kernel explicitly passed in, use that - rec['PV_args'] = 'root=/dev/xvda1' - rec['PV_kernel'] = kernel - rec['PV_ramdisk'] = ramdisk - else: - # 2. Use kernel within the image - rec['PV_bootloader'] = 'pygrub' - else: - # 3. Using hardware virtualization - rec['platform']['nx'] = 'true' - rec['HVM_boot_params'] = {'order': 'dc'} - rec['HVM_boot_policy'] = 'BIOS order' - - if device_id: - rec['platform']['device_id'] = str(device_id).zfill(4) - - vm_ref = session.VM.create(rec) - LOG.debug('Created VM', instance=instance) - return vm_ref - - -def destroy_vm(session, instance, vm_ref): - """Destroys a VM record.""" - try: - session.VM.destroy(vm_ref) - except session.XenAPI.Failure: - LOG.exception('Destroy VM failed') - return - - LOG.debug("VM destroyed", instance=instance) - - -def clean_shutdown_vm(session, instance, vm_ref): - if is_vm_shutdown(session, vm_ref): - LOG.warning("VM already halted, skipping shutdown...", - instance=instance) - return True - - LOG.debug("Shutting down VM (cleanly)", instance=instance) - try: - session.call_xenapi('VM.clean_shutdown', vm_ref) - except session.XenAPI.Failure: - LOG.exception('Shutting down VM (cleanly) failed.') - return False - return True - - -def hard_shutdown_vm(session, instance, vm_ref): - if is_vm_shutdown(session, vm_ref): - LOG.warning("VM already halted, skipping shutdown...", - instance=instance) - return True - - LOG.debug("Shutting down VM (hard)", instance=instance) - try: - session.call_xenapi('VM.hard_shutdown', vm_ref) - except session.XenAPI.Failure: - LOG.exception('Shutting down VM (hard) failed') - return False - return True - - -def is_vm_shutdown(session, vm_ref): - state = get_power_state(session, vm_ref) - if state == power_state.SHUTDOWN: - return True - return False - - -def is_enough_free_mem(session, instance): - flavor = instance.get_flavor() - mem = int(flavor.memory_mb) * units.Mi - host_free_mem = int(session.call_xenapi("host.compute_free_memory", - session.host_ref)) - return host_free_mem >= mem - - -def _should_retry_unplug_vbd(err): - """Retry if failed with some specific errors. - - The retrable errors include: - 1. DEVICE_DETACH_REJECTED - For reasons which we don't understand, we're seeing the device - still in use, even when all processes using the device should - be dead. - 2. INTERNAL_ERROR - Since XenServer 6.2, we also need to retry if we get INTERNAL_ERROR, - as that error goes away when you retry. - 3. VM_MISSING_PV_DRIVERS - NOTE(jianghuaw): It requires some time for PV(Paravirtualization) - driver to be connected at VM booting, so retry if unplug failed - with VM_MISSING_PV_DRIVERS. - """ - - can_retry_errs = ( - 'DEVICE_DETACH_REJECTED', - 'INTERNAL_ERROR', - 'VM_MISSING_PV_DRIVERS', - ) - - return err in can_retry_errs - - -def unplug_vbd(session, vbd_ref, this_vm_ref): - # make sure that perform at least once - max_attempts = max(0, CONF.xenserver.num_vbd_unplug_retries) + 1 - for num_attempt in range(1, max_attempts + 1): - try: - if num_attempt > 1: - greenthread.sleep(1) - - session.VBD.unplug(vbd_ref, this_vm_ref) - return - except session.XenAPI.Failure as exc: - err = len(exc.details) > 0 and exc.details[0] - if err == 'DEVICE_ALREADY_DETACHED': - LOG.info('VBD %s already detached', vbd_ref) - return - elif _should_retry_unplug_vbd(err): - LOG.info('VBD %(vbd_ref)s unplug failed with "%(err)s", ' - 'attempt %(num_attempt)d/%(max_attempts)d', - {'vbd_ref': vbd_ref, 'num_attempt': num_attempt, - 'max_attempts': max_attempts, 'err': err}) - else: - LOG.exception('Unable to unplug VBD') - raise exception.StorageError( - reason=_('Unable to unplug VBD %s') % vbd_ref) - - raise exception.StorageError( - reason=_('Reached maximum number of retries ' - 'trying to unplug VBD %s') - % vbd_ref) - - -def destroy_vbd(session, vbd_ref): - """Destroy VBD from host database.""" - try: - session.call_xenapi('VBD.destroy', vbd_ref) - except session.XenAPI.Failure: - LOG.exception('Unable to destroy VBD') - raise exception.StorageError( - reason=_('Unable to destroy VBD %s') % vbd_ref) - - -def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk', - read_only=False, bootable=False, osvol=False, - empty=False, unpluggable=True): - """Create a VBD record and returns its reference.""" - vbd_rec = {} - vbd_rec['VM'] = vm_ref - if vdi_ref is None: - vdi_ref = 'OpaqueRef:NULL' - vbd_rec['VDI'] = vdi_ref - vbd_rec['userdevice'] = str(userdevice) - vbd_rec['bootable'] = bootable - vbd_rec['mode'] = read_only and 'RO' or 'RW' - vbd_rec['type'] = vbd_type - vbd_rec['unpluggable'] = unpluggable - vbd_rec['empty'] = empty - vbd_rec['other_config'] = {} - vbd_rec['qos_algorithm_type'] = '' - vbd_rec['qos_algorithm_params'] = {} - vbd_rec['qos_supported_algorithms'] = [] - LOG.debug('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,' - ' VDI %(vdi_ref)s ... ', - {'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref}) - vbd_ref = session.call_xenapi('VBD.create', vbd_rec) - LOG.debug('Created VBD %(vbd_ref)s for VM %(vm_ref)s,' - ' VDI %(vdi_ref)s.', - {'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref}) - if osvol: - # set osvol=True in other-config to indicate this is an - # attached nova (or cinder) volume - session.call_xenapi('VBD.add_to_other_config', - vbd_ref, 'osvol', 'True') - return vbd_ref - - -def attach_cd(session, vm_ref, vdi_ref, userdevice): - """Create an empty VBD, then insert the CD.""" - vbd_ref = create_vbd(session, vm_ref, None, userdevice, - vbd_type='cd', read_only=True, - bootable=True, empty=True, - unpluggable=False) - session.call_xenapi('VBD.insert', vbd_ref, vdi_ref) - return vbd_ref - - -def destroy_vdi(session, vdi_ref): - try: - session.call_xenapi('VDI.destroy', vdi_ref) - except session.XenAPI.Failure: - LOG.debug("Unable to destroy VDI %s", vdi_ref, exc_info=True) - msg = _("Unable to destroy VDI %s") % vdi_ref - LOG.error(msg) - raise exception.StorageError(reason=msg) - - -def safe_destroy_vdis(session, vdi_refs): - """Tries to destroy the requested VDIs, but ignores any errors.""" - for vdi_ref in vdi_refs: - try: - destroy_vdi(session, vdi_ref) - except exception.StorageError: - LOG.debug("Ignoring error while destroying VDI: %s", vdi_ref) - - -def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size, - read_only=False): - """Create a VDI record and returns its reference.""" - vdi_ref = session.call_xenapi("VDI.create", - {'name_label': name_label, - 'name_description': disk_type, - 'SR': sr_ref, - 'virtual_size': str(virtual_size), - 'type': 'User', - 'sharable': False, - 'read_only': read_only, - 'xenstore_data': {}, - 'other_config': _get_vdi_other_config(disk_type, instance=instance), - 'sm_config': {}, - 'tags': []}) - LOG.debug('Created VDI %(vdi_ref)s (%(name_label)s,' - ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.', - {'vdi_ref': vdi_ref, 'name_label': name_label, - 'virtual_size': virtual_size, 'read_only': read_only, - 'sr_ref': sr_ref}) - return vdi_ref - - -@contextlib.contextmanager -def _dummy_vm(session, instance, vdi_ref): - """This creates a temporary VM so that we can snapshot a VDI. - - VDI's can't be snapshotted directly since the API expects a `vm_ref`. To - work around this, we need to create a temporary VM and then map the VDI to - the VM using a temporary VBD. - """ - name_label = "dummy" - vm_ref = create_vm(session, instance, name_label, None, None) - try: - vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect', - read_only=True) - try: - yield vm_ref - finally: - try: - destroy_vbd(session, vbd_ref) - except exception.StorageError: - # destroy_vbd() will log error - pass - finally: - destroy_vm(session, instance, vm_ref) - - -def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref): - """Copy a VDI and return the new VDIs reference. - - This function differs from the XenAPI `VDI.copy` call in that the copy is - atomic and isolated, meaning we don't see half-downloaded images. It - accomplishes this by copying the VDI's into a temporary directory and then - atomically renaming them into the SR when the copy is completed. - - The correct long term solution is to fix `VDI.copy` so that it is atomic - and isolated. - """ - with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref: - label = "snapshot" - with snapshot_attached_here( - session, instance, vm_ref, label) as vdi_uuids: - sr_path = get_sr_path(session, sr_ref=sr_ref) - uuid_stack = _make_uuid_stack() - imported_vhds = disk_management.safe_copy_vdis( - session, sr_path, vdi_uuids, uuid_stack) - root_uuid = imported_vhds['root']['uuid'] - - # rescan to discover new VHDs - scan_default_sr(session) - vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid) - return vdi_ref - - -def _clone_vdi(session, vdi_to_clone_ref): - """Clones a VDI and return the new VDIs reference.""" - vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref) - LOG.debug('Cloned VDI %(vdi_ref)s from VDI ' - '%(vdi_to_clone_ref)s', - {'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref}) - return vdi_ref - - -def _get_vdi_other_config(disk_type, instance=None): - """Return metadata to store in VDI's other_config attribute. - - `nova_instance_uuid` is used to associate a VDI with a particular instance - so that, if it becomes orphaned from an unclean shutdown of a - compute-worker, we can safely detach it. - """ - other_config = {'nova_disk_type': disk_type} - - # create_vdi may be called simply while creating a volume - # hence information about instance may or may not be present - if instance: - other_config['nova_instance_uuid'] = instance['uuid'] - - return other_config - - -def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description, - instance): - existing_other_config = session.call_xenapi('VDI.get_other_config', - vdi_ref) - - session.call_xenapi('VDI.set_name_label', vdi_ref, name_label) - session.call_xenapi('VDI.set_name_description', vdi_ref, description) - - other_config = _get_vdi_other_config(vdi_type, instance=instance) - for key, value in other_config.items(): - if key not in existing_other_config: - session.call_xenapi( - "VDI.add_to_other_config", vdi_ref, key, value) - - -def _vm_get_vbd_refs(session, vm_ref): - return session.call_xenapi("VM.get_VBDs", vm_ref) - - -def _vbd_get_rec(session, vbd_ref): - return session.call_xenapi("VBD.get_record", vbd_ref) - - -def _vdi_get_rec(session, vdi_ref): - return session.call_xenapi("VDI.get_record", vdi_ref) - - -def _vdi_get_uuid(session, vdi_ref): - return session.call_xenapi("VDI.get_uuid", vdi_ref) - - -def _vdi_snapshot(session, vdi_ref): - return session.call_xenapi("VDI.snapshot", vdi_ref, {}) - - -def get_vdi_for_vm_safely(session, vm_ref, userdevice='0'): - """Retrieves the primary VDI for a VM.""" - vbd_refs = _vm_get_vbd_refs(session, vm_ref) - for vbd_ref in vbd_refs: - vbd_rec = _vbd_get_rec(session, vbd_ref) - # Convention dictates the primary VDI will be userdevice 0 - if vbd_rec['userdevice'] == userdevice: - vdi_ref = vbd_rec['VDI'] - vdi_rec = _vdi_get_rec(session, vdi_ref) - return vdi_ref, vdi_rec - raise exception.NovaException(_("No primary VDI found for %s") % vm_ref) - - -def get_all_vdi_uuids_for_vm(session, vm_ref, min_userdevice=0): - vbd_refs = _vm_get_vbd_refs(session, vm_ref) - for vbd_ref in vbd_refs: - vbd_rec = _vbd_get_rec(session, vbd_ref) - if int(vbd_rec['userdevice']) >= min_userdevice: - vdi_ref = vbd_rec['VDI'] - yield _vdi_get_uuid(session, vdi_ref) - - -def _try_strip_base_mirror_from_vdi(session, vdi_ref): - try: - session.call_xenapi("VDI.remove_from_sm_config", vdi_ref, - "base_mirror") - except session.XenAPI.Failure: - LOG.debug("Error while removing sm_config", exc_info=True) - - -def strip_base_mirror_from_vdis(session, vm_ref): - # NOTE(johngarbutt) part of workaround for XenServer bug CA-98606 - vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref) - for vbd_ref in vbd_refs: - vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) - _try_strip_base_mirror_from_vdi(session, vdi_ref) - - -def _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref): - possible_snapshot_parents = vdi_uuid_chain[1:] - - if len(possible_snapshot_parents) == 0: - LOG.debug("No VHD chain.", instance=instance) - return - - snapshot_uuids = _child_vhds(session, sr_ref, possible_snapshot_parents, - old_snapshots_only=True) - number_of_snapshots = len(snapshot_uuids) - - if number_of_snapshots <= 0: - LOG.debug("No snapshots to remove.", instance=instance) - return - - vdi_refs = [session.VDI.get_by_uuid(vdi_uuid) - for vdi_uuid in snapshot_uuids] - safe_destroy_vdis(session, vdi_refs) - - # ensure garbage collector has been run - _scan_sr(session, sr_ref) - - LOG.info("Deleted %s snapshots.", number_of_snapshots, instance=instance) - - -def remove_old_snapshots(session, instance, vm_ref): - """See if there is an snapshot present that should be removed.""" - LOG.debug("Starting remove_old_snapshots for VM", instance=instance) - vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) - chain = _walk_vdi_chain(session, vm_vdi_rec['uuid']) - vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain] - sr_ref = vm_vdi_rec["SR"] - _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref) - - -@contextlib.contextmanager -def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0', - post_snapshot_callback=None): - # impl method allow easier patching for tests - return _snapshot_attached_here_impl(session, instance, vm_ref, label, - userdevice, post_snapshot_callback) - - -def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice, - post_snapshot_callback): - """Snapshot the root disk only. Return a list of uuids for the vhds - in the chain. - """ - LOG.debug("Starting snapshot for VM", instance=instance) - - # Memorize the VDI chain so we can poll for coalesce - vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref, - userdevice) - chain = _walk_vdi_chain(session, vm_vdi_rec['uuid']) - vdi_uuid_chain = [vdi_rec['uuid'] for vdi_rec in chain] - sr_ref = vm_vdi_rec["SR"] - - # clean up after any interrupted snapshot attempts - _delete_snapshots_in_vdi_chain(session, instance, vdi_uuid_chain, sr_ref) - - snapshot_ref = _vdi_snapshot(session, vm_vdi_ref) - if post_snapshot_callback is not None: - post_snapshot_callback(task_state=task_states.IMAGE_PENDING_UPLOAD) - try: - # When the VDI snapshot is taken a new parent is introduced. - # If we have taken a snapshot before, the new parent can be coalesced. - # We need to wait for this to happen before trying to copy the chain. - _wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref, - vdi_uuid_chain) - - snapshot_uuid = _vdi_get_uuid(session, snapshot_ref) - chain = _walk_vdi_chain(session, snapshot_uuid) - vdi_uuids = [vdi_rec['uuid'] for vdi_rec in chain] - yield vdi_uuids - finally: - safe_destroy_vdis(session, [snapshot_ref]) - # TODO(johngarbut) we need to check the snapshot has been coalesced - # now its associated VDI has been deleted. - - -def get_sr_path(session, sr_ref=None): - """Return the path to our storage repository - - This is used when we're dealing with VHDs directly, either by taking - snapshots or by restoring an image in the DISK_VHD format. - """ - if sr_ref is None: - sr_ref = safe_find_sr(session) - pbd_rec = session.call_xenapi("PBD.get_all_records_where", - 'field "host"="%s" and ' - 'field "SR"="%s"' % - (session.host_ref, sr_ref)) - - # NOTE(bobball): There can only be one PBD for a host/SR pair, but path is - # not always present - older versions of XS do not set it. - pbd_ref = list(pbd_rec.keys())[0] - device_config = pbd_rec[pbd_ref]['device_config'] - if 'path' in device_config: - return device_config['path'] - - sr_rec = session.call_xenapi("SR.get_record", sr_ref) - sr_uuid = sr_rec["uuid"] - if sr_rec["type"] not in ["ext", "nfs"]: - raise exception.NovaException( - _("Only file-based SRs (ext/NFS) are supported by this feature." - " SR %(uuid)s is of type %(type)s") % - {"uuid": sr_uuid, "type": sr_rec["type"]}) - - return os.path.join(CONF.xenserver.sr_base_path, sr_uuid) - - -def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False, - keep_days=0): - """Destroy used or unused cached images. - - A cached image that is being used by at least one VM is said to be 'used'. - - In the case of an 'unused' image, the cached image will be the only - descendent of the base-copy. So when we delete the cached-image, the - refcount will drop to zero and XenServer will automatically destroy the - base-copy for us. - - The default behavior of this function is to destroy only 'unused' cached - images. To destroy all cached images, use the `all_cached=True` kwarg. - - `keep_days` is used to destroy images based on when they were created. - Only the images which were created `keep_days` ago will be deleted if the - argument has been set. - """ - cached_images = _find_cached_images(session, sr_ref) - destroyed = set() - - def destroy_cached_vdi(vdi_uuid, vdi_ref): - LOG.debug("Destroying cached VDI '%s'", vdi_uuid) - if not dry_run: - destroy_vdi(session, vdi_ref) - destroyed.add(vdi_uuid) - - for vdi_dict in cached_images.values(): - vdi_ref = vdi_dict['vdi_ref'] - vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref) - - if all_cached: - destroy_cached_vdi(vdi_uuid, vdi_ref) - continue - - # Unused-Only: Search for siblings - - # Chain length greater than two implies a VM must be holding a ref to - # the base-copy (otherwise it would have coalesced), so consider this - # cached image used. - chain = list(_walk_vdi_chain(session, vdi_uuid)) - if len(chain) > 2: - continue - elif len(chain) == 2: - # Siblings imply cached image is used - root_vdi_rec = chain[-1] - children = _child_vhds(session, sr_ref, [root_vdi_rec['uuid']]) - if len(children) > 1: - continue - - cached_time = vdi_dict.get('cached_time') - if cached_time is not None: - if (int(time.time()) - int(cached_time)) / (3600 * 24) \ - >= keep_days: - destroy_cached_vdi(vdi_uuid, vdi_ref) - else: - LOG.debug("vdi %s can't be destroyed because the cached time is" - " not specified", vdi_uuid) - - return destroyed - - -def _find_cached_images(session, sr_ref): - """Return a dict {image_id: {'vdi_ref': vdi_ref, 'cached_time': - cached_time}} representing all cached images. - """ - cached_images = {} - for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref): - try: - image_id = vdi_rec['other_config']['image-id'] - except KeyError: - continue - - cached_time = vdi_rec['other_config'].get('cached-time') - cached_images[image_id] = {'vdi_ref': vdi_ref, - 'cached_time': cached_time} - - return cached_images - - -def _find_cached_image(session, image_id, sr_ref): - """Returns the vdi-ref of the cached image.""" - name_label = _get_image_vdi_label(image_id) - # For not pooled hosts, only name_lable is enough to get a cached image. - # When in a xapi pool, each host may have a cached image using the - # same name while xapi api will search all of them. Add SR to the filter - # to ensure only one image returns. - expr = ('field "name__label"="%(name_label)s" and field "SR" = "%(SR)s"' - % {'name_label': name_label, 'SR': sr_ref}) - recs = session.call_xenapi("VDI.get_all_records_where", expr) - - number_found = len(recs) - if number_found > 0: - if number_found > 1: - LOG.warning("Multiple base images for image: %s", image_id) - return list(recs.keys())[0] - - -def _get_resize_func_name(session): - brand = session.product_brand - version = session.product_version - - # To maintain backwards compatibility. All recent versions - # should use VDI.resize - if version and brand: - xcp = brand == 'XCP' - r1_2_or_above = (version[0] == 1 and version[1] > 1) or version[0] > 1 - - xenserver = brand == 'XenServer' - r6_or_above = version[0] > 5 - - if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above): - return 'VDI.resize_online' - - return 'VDI.resize' - - -def _vdi_get_virtual_size(session, vdi_ref): - size = session.call_xenapi('VDI.get_virtual_size', vdi_ref) - return int(size) - - -def _vdi_resize(session, vdi_ref, new_size): - resize_func_name = _get_resize_func_name(session) - session.call_xenapi(resize_func_name, vdi_ref, str(new_size)) - - -def update_vdi_virtual_size(session, instance, vdi_ref, new_gb): - virtual_size = _vdi_get_virtual_size(session, vdi_ref) - new_disk_size = new_gb * units.Gi - - msg = ("Resizing up VDI %(vdi_ref)s from %(virtual_size)d " - "to %(new_disk_size)d") - LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size, - 'new_disk_size': new_disk_size}, - instance=instance) - - if virtual_size < new_disk_size: - # For resize up. Simple VDI resize will do the trick - _vdi_resize(session, vdi_ref, new_disk_size) - - elif virtual_size == new_disk_size: - LOG.debug("No need to change vdi virtual size.", - instance=instance) - - else: - # NOTE(johngarbutt): we should never get here - # but if we don't raise an exception, a user might be able to use - # more storage than allowed by their chosen instance flavor - msg = _("VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger " - "than flavor size of %(new_disk_size)d bytes.") - msg = msg % {'vdi_ref': vdi_ref, 'virtual_size': virtual_size, - 'new_disk_size': new_disk_size} - LOG.debug(msg, instance=instance) - raise exception.ResizeError(reason=msg) - - -def resize_disk(session, instance, vdi_ref, flavor): - size_gb = flavor.root_gb - if size_gb == 0: - reason = _("Can't resize a disk to 0 GB.") - raise exception.ResizeError(reason=reason) - - sr_ref = safe_find_sr(session) - clone_ref = _clone_vdi(session, vdi_ref) - - try: - # Resize partition and filesystem down - _auto_configure_disk(session, clone_ref, size_gb) - - # Create new VDI - vdi_size = size_gb * units.Gi - # NOTE(johannes): No resizing allowed for rescue instances, so - # using instance['name'] is safe here - new_ref = create_vdi(session, sr_ref, instance, instance['name'], - 'root', vdi_size) - - new_uuid = session.call_xenapi('VDI.get_uuid', new_ref) - - # Manually copy contents over - virtual_size = size_gb * units.Gi - _copy_partition(session, clone_ref, new_ref, 1, virtual_size) - - return new_ref, new_uuid - finally: - destroy_vdi(session, clone_ref) - - -def _auto_configure_disk(session, vdi_ref, new_gb): - """Partition and resize FS to match the size specified by - flavors.root_gb. - - This is a fail-safe to prevent accidentally destroying data on a disk - erroneously marked as auto_disk_config=True. - - The criteria for allowing resize are: - - 1. 'auto_disk_config' must be true for the instance (and image). - (If we've made it here, then auto_disk_config=True.) - - 2. The disk must have only one partition. - - 3. The file-system on the one partition must be ext3 or ext4. - - 4. We are not running in independent_compute mode (checked by - vdi_attached) - """ - if new_gb == 0: - LOG.debug("Skipping auto_config_disk as destination size is 0GB") - return - - with vdi_attached(session, vdi_ref, read_only=False) as dev: - partitions = _get_partitions(dev) - - if len(partitions) != 1: - reason = _('Disk must have only one partition.') - raise exception.CannotResizeDisk(reason=reason) - - num, start, old_sectors, fstype, name, flags = partitions[0] - if fstype not in ('ext3', 'ext4'): - reason = _('Disk contains a filesystem ' - 'we are unable to resize: %s') - raise exception.CannotResizeDisk(reason=(reason % fstype)) - - if num != 1: - reason = _('The only partition should be partition 1.') - raise exception.CannotResizeDisk(reason=reason) - - new_sectors = new_gb * units.Gi / SECTOR_SIZE - _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags) - - -def try_auto_configure_disk(session, vdi_ref, new_gb): - if CONF.xenserver.independent_compute: - raise exception.NotSupportedWithOption( - operation='auto_configure_disk', - option='CONF.xenserver.independent_compute') - try: - _auto_configure_disk(session, vdi_ref, new_gb) - except exception.CannotResizeDisk as e: - LOG.warning('Attempted auto_configure_disk failed because: %s', e) - - -def _make_partition(session, dev, partition_start, partition_end): - dev_path = utils.make_dev_path(dev) - - # NOTE(bobball) If this runs in Dom0, parted will error trying - # to re-read the partition table and return a generic error - nova.privsep.fs.create_partition_table( - dev_path, 'msdos', check_exit_code=not session.is_local_connection) - nova.privsep.fs.create_partition( - dev_path, 'primary', partition_start, partition_end, - check_exit_code=not session.is_local_connection) - - partition_path = utils.make_dev_path(dev, partition=1) - if session.is_local_connection: - # Need to refresh the partitions - nova.privsep.fs.create_device_maps(dev_path) - - # Sometimes the partition gets created under /dev/mapper, depending - # on the setup in dom0. - mapper_path = '/dev/mapper/%s' % os.path.basename(partition_path) - if os.path.exists(mapper_path): - return mapper_path - - return partition_path - - -def _generate_disk(session, instance, vm_ref, userdevice, name_label, - disk_type, size_mb, fs_type, fs_label=None): - """Steps to programmatically generate a disk: - - 1. Create VDI of desired size - - 2. Attach VDI to Dom0 - - 3. Create partition - 3.a. If the partition type is supported by dom0 (currently ext3, - swap) then create it while the VDI is attached to dom0. - 3.b. If the partition type is not supported by dom0, attach the - VDI to the domU and create there. - This split between DomU/Dom0 ensures that we can create most - VM types in the "isolated compute" case. - - 4. Create VBD between instance VM and VDI - - """ - # 1. Create VDI - sr_ref = safe_find_sr(session) - ONE_MEG = units.Mi - virtual_size = size_mb * ONE_MEG - vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type, - virtual_size) - - try: - # 2. Attach VDI to Dom0 (VBD hotplug) - mkfs_in_dom0 = fs_type in ('ext3', 'swap') - with vdi_attached(session, vdi_ref, read_only=False, - dom0=True) as dev: - # 3. Create partition - partition_start = "2048" - partition_end = "-" - - disk_management.make_partition(session, dev, partition_start, - partition_end) - - if mkfs_in_dom0: - disk_management.mkfs(session, dev, '1', fs_type, fs_label) - - # 3.a. dom0 does not support nfs/ext4, so may have to mkfs in domU - if fs_type is not None and not mkfs_in_dom0: - with vdi_attached(session, vdi_ref, read_only=False) as dev: - partition_path = utils.make_dev_path(dev, partition=1) - nova.privsep.fs.mkfs(fs_type, partition_path, fs_label) - - # 4. Create VBD between instance VM and VDI - if vm_ref: - create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False) - except Exception: - with excutils.save_and_reraise_exception(): - msg = "Error while generating disk number: %s" % userdevice - LOG.debug(msg, instance=instance, exc_info=True) - safe_destroy_vdis(session, [vdi_ref]) - - return vdi_ref - - -def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb): - # NOTE(jk0): We use a FAT32 filesystem for the Windows swap - # partition because that is what parted supports. - is_windows = instance['os_type'] == "windows" - fs_type = "vfat" if is_windows else "swap" - - if CONF.xenserver.independent_compute and fs_type != "swap": - raise exception.NotSupportedWithOption( - operation='swap drives for Windows', - option='CONF.xenserver.independent_compute') - - _generate_disk(session, instance, vm_ref, userdevice, name_label, - 'swap', swap_mb, fs_type) - - -def get_ephemeral_disk_sizes(total_size_gb): - if not total_size_gb: - return - - max_size_gb = 2000 - if total_size_gb % 1024 == 0: - max_size_gb = 1024 - - left_to_allocate = total_size_gb - while left_to_allocate > 0: - size_gb = min(max_size_gb, left_to_allocate) - yield size_gb - left_to_allocate -= size_gb - - -def generate_single_ephemeral(session, instance, vm_ref, userdevice, - size_gb, instance_name_label=None): - if instance_name_label is None: - instance_name_label = instance["name"] - - name_label = "%s ephemeral" % instance_name_label - fs_label = "ephemeral" - # TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here - label_number = int(userdevice) - 4 - if label_number > 0: - name_label = "%s (%d)" % (name_label, label_number) - fs_label = "ephemeral%d" % label_number - - return _generate_disk(session, instance, vm_ref, str(userdevice), - name_label, 'ephemeral', size_gb * 1024, - CONF.default_ephemeral_format, fs_label) - - -def generate_ephemeral(session, instance, vm_ref, first_userdevice, - instance_name_label, total_size_gb): - # NOTE(johngarbutt): max possible size of a VHD disk is 2043GB - sizes = get_ephemeral_disk_sizes(total_size_gb) - first_userdevice = int(first_userdevice) - - vdi_refs = [] - try: - for userdevice, size_gb in enumerate(sizes, start=first_userdevice): - ref = generate_single_ephemeral(session, instance, vm_ref, - userdevice, size_gb, - instance_name_label) - vdi_refs.append(ref) - except Exception as exc: - with excutils.save_and_reraise_exception(): - LOG.debug("Error when generating ephemeral disk. " - "Device: %(userdevice)s Size GB: %(size_gb)s " - "Error: %(exc)s", { - 'userdevice': userdevice, - 'size_gb': size_gb, - 'exc': exc}) - safe_destroy_vdis(session, vdi_refs) - - -def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice, - name_label, size_gb): - _generate_disk(session, instance, vm_ref, userdevice, name_label, - 'user', size_gb * 1024, CONF.default_ephemeral_format) - - -def generate_configdrive(session, context, instance, vm_ref, userdevice, - network_info, admin_password=None, files=None): - sr_ref = safe_find_sr(session) - vdi_ref = create_vdi(session, sr_ref, instance, 'config-2', - 'configdrive', configdrive.CONFIGDRIVESIZE_BYTES) - try: - extra_md = {} - if admin_password: - extra_md['admin_pass'] = admin_password - inst_md = instance_metadata.InstanceMetadata( - instance, content=files, extra_md=extra_md, - network_info=network_info, request_context=context) - with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: - with utils.tempdir() as tmp_path: - tmp_file = os.path.join(tmp_path, 'configdrive') - cdb.make_drive(tmp_file) - # XAPI can only import a VHD file, so convert to vhd format - vhd_file = '%s.vhd' % tmp_file - with compute_utils.disk_ops_semaphore: - processutils.execute('qemu-img', 'convert', '-Ovpc', - tmp_file, vhd_file) - vhd_file_size = os.path.getsize(vhd_file) - with open(vhd_file) as file_obj: - volume_utils.stream_to_vdi( - session, instance, 'vhd', file_obj, - vhd_file_size, vdi_ref) - create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False, - read_only=True) - except Exception: - with excutils.save_and_reraise_exception(): - msg = "Error while generating config drive" - LOG.debug(msg, instance=instance, exc_info=True) - safe_destroy_vdis(session, [vdi_ref]) - - -def _create_kernel_image(context, session, instance, name_label, image_id, - image_type): - """Creates kernel/ramdisk file from the image stored in the cache. - If the image is not present in the cache, fetch it from glance. - - Returns: A list of dictionaries that describe VDIs - """ - if CONF.xenserver.independent_compute: - raise exception.NotSupportedWithOption( - operation='Non-VHD images', - option='CONF.xenserver.independent_compute') - - filename = "" - if CONF.xenserver.cache_images != 'none': - new_image_uuid = uuidutils.generate_uuid() - filename = disk_management.create_kernel_ramdisk( - session, image_id, new_image_uuid) - - if filename == "": - return _fetch_disk_image(context, session, instance, name_label, - image_id, image_type) - else: - vdi_type = ImageType.to_string(image_type) - return {vdi_type: dict(uuid=None, file=filename)} - - -def create_kernel_and_ramdisk(context, session, instance, name_label): - kernel_file = None - ramdisk_file = None - if instance['kernel_id']: - vdis = _create_kernel_image(context, session, - instance, name_label, instance['kernel_id'], - ImageType.KERNEL) - kernel_file = vdis['kernel'].get('file') - - if instance['ramdisk_id']: - vdis = _create_kernel_image(context, session, - instance, name_label, instance['ramdisk_id'], - ImageType.RAMDISK) - ramdisk_file = vdis['ramdisk'].get('file') - - return kernel_file, ramdisk_file - - -def destroy_kernel_ramdisk(session, instance, kernel, ramdisk): - if kernel or ramdisk: - LOG.debug("Removing kernel/ramdisk files from dom0", - instance=instance) - disk_management.remove_kernel_ramdisk( - session, kernel_file=kernel, ramdisk_file=ramdisk) - - -def _get_image_vdi_label(image_id): - return 'Glance Image %s' % image_id - - -def _create_cached_image(context, session, instance, name_label, - image_id, image_type, image_handler): - sr_ref = safe_find_sr(session) - sr_type = session.call_xenapi('SR.get_type', sr_ref) - - if CONF.use_cow_images and sr_type != "ext": - LOG.warning("Fast cloning is only supported on default local SR " - "of type ext. SR on this system was found to be of " - "type %s. Ignoring the cow flag.", sr_type) - - @utils.synchronized('xenapi-image-cache' + image_id) - def _create_cached_image_impl(context, session, instance, name_label, - image_id, image_type, sr_ref): - cache_vdi_ref = _find_cached_image(session, image_id, sr_ref) - downloaded = False - if cache_vdi_ref is None: - downloaded = True - vdis = _fetch_image(context, session, instance, name_label, - image_id, image_type, image_handler) - - cache_vdi_ref = session.call_xenapi( - 'VDI.get_by_uuid', vdis['root']['uuid']) - - session.call_xenapi('VDI.set_name_label', cache_vdi_ref, - _get_image_vdi_label(image_id)) - session.call_xenapi('VDI.set_name_description', cache_vdi_ref, - 'root') - session.call_xenapi('VDI.add_to_other_config', - cache_vdi_ref, 'image-id', str(image_id)) - session.call_xenapi('VDI.add_to_other_config', - cache_vdi_ref, - 'cached-time', - str(int(time.time()))) - - if CONF.use_cow_images: - new_vdi_ref = _clone_vdi(session, cache_vdi_ref) - elif sr_type == 'ext': - new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, - cache_vdi_ref) - else: - new_vdi_ref = session.call_xenapi("VDI.copy", cache_vdi_ref, - sr_ref) - - session.call_xenapi('VDI.set_name_label', new_vdi_ref, '') - session.call_xenapi('VDI.set_name_description', new_vdi_ref, '') - session.call_xenapi('VDI.remove_from_other_config', - new_vdi_ref, 'image-id') - - vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref) - return downloaded, vdi_uuid - - downloaded, vdi_uuid = _create_cached_image_impl(context, session, - instance, name_label, - image_id, image_type, - sr_ref) - - vdis = {} - vdi_type = ImageType.get_role(image_type) - vdis[vdi_type] = dict(uuid=vdi_uuid, file=None) - return downloaded, vdis - - -def create_image(context, session, instance, name_label, image_id, - image_type, image_handler): - """Creates VDI from the image stored in the local cache. If the image - is not present in the cache, it streams it from glance. - - Returns: A list of dictionaries that describe VDIs - """ - cache_images = CONF.xenserver.cache_images.lower() - - # Determine if the image is cacheable - if image_type == ImageType.DISK_ISO: - cache = False - elif cache_images == 'all': - cache = True - elif cache_images == 'some': - sys_meta = utils.instance_sys_meta(instance) - try: - cache = strutils.bool_from_string(sys_meta['image_cache_in_nova']) - except KeyError: - cache = False - elif cache_images == 'none': - cache = False - else: - LOG.warning("Unrecognized cache_images value '%s', defaulting to True", - CONF.xenserver.cache_images) - cache = True - - # Fetch (and cache) the image - start_time = timeutils.utcnow() - if cache: - downloaded, vdis = _create_cached_image(context, session, instance, - name_label, image_id, - image_type, image_handler) - else: - vdis = _fetch_image(context, session, instance, name_label, - image_id, image_type, image_handler) - downloaded = True - duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) - - LOG.info("Image creation data, cacheable: %(cache)s, " - "downloaded: %(downloaded)s duration: %(duration).2f secs " - "for image %(image_id)s", - {'image_id': image_id, 'cache': cache, 'downloaded': downloaded, - 'duration': duration}) - - for vdi_type, vdi in vdis.items(): - vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid']) - _set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type, - instance) - - return vdis - - -def _fetch_image(context, session, instance, name_label, image_id, image_type, - image_handler): - """Fetch image from glance based on image type. - - Returns: A single filename if image_type is KERNEL or RAMDISK - A list of dictionaries that describe VDIs, otherwise - """ - if image_type == ImageType.DISK_VHD: - vdis = _fetch_vhd_image(context, session, instance, image_id, - image_handler) - else: - if CONF.xenserver.independent_compute: - raise exception.NotSupportedWithOption( - operation='Non-VHD images', - option='CONF.xenserver.independent_compute') - vdis = _fetch_disk_image(context, session, instance, name_label, - image_id, image_type) - - for vdi_type, vdi in vdis.items(): - vdi_uuid = vdi['uuid'] - LOG.debug("Fetched VDIs of type '%(vdi_type)s' with UUID" - " '%(vdi_uuid)s'", - {'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid}, - instance=instance) - - return vdis - - -def _make_uuid_stack(): - # NOTE(sirp): The XenAPI plugins run under Python 2.4 - # which does not have the `uuid` module. To work around this, - # we generate the uuids here (under Python 2.6+) and - # pass them as arguments - return [uuidutils.generate_uuid() for i in range(MAX_VDI_CHAIN_SIZE)] - - -def get_compression_level(): - level = CONF.xenserver.image_compression_level - if level is not None and (level < 1 or level > 9): - LOG.warning("Invalid value '%d' for image_compression_level", level) - return None - return level - - -def _fetch_vhd_image(context, session, instance, image_id, image_handler): - """Tell glance to download an image and put the VHDs into the SR - - Returns: A list of dictionaries that describe VDIs - """ - LOG.debug("Asking xapi to fetch vhd image %s", image_id, - instance=instance) - vdis = image_handler.download_image( - context, session, instance, image_id) - - # Ensure we can see the import VHDs as VDIs - scan_default_sr(session) - - vdi_uuid = vdis['root']['uuid'] - try: - _check_vdi_size(context, session, instance, vdi_uuid) - except Exception: - with excutils.save_and_reraise_exception(): - msg = "Error while checking vdi size" - LOG.debug(msg, instance=instance, exc_info=True) - for vdi in vdis.values(): - vdi_uuid = vdi['uuid'] - vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - safe_destroy_vdis(session, [vdi_ref]) - - return vdis - - -def _get_vdi_chain_size(session, vdi_uuid): - """Compute the total size of a VDI chain, starting with the specified - VDI UUID. - - This will walk the VDI chain to the root, add the size of each VDI into - the total. - """ - size_bytes = 0 - for vdi_rec in _walk_vdi_chain(session, vdi_uuid): - cur_vdi_uuid = vdi_rec['uuid'] - vdi_size_bytes = int(vdi_rec['physical_utilisation']) - LOG.debug('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=' - '%(vdi_size_bytes)d', - {'cur_vdi_uuid': cur_vdi_uuid, - 'vdi_size_bytes': vdi_size_bytes}) - size_bytes += vdi_size_bytes - return size_bytes - - -def _check_vdi_size(context, session, instance, vdi_uuid): - flavor = instance.get_flavor() - allowed_size = (flavor.root_gb + - VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi - if not flavor.root_gb: - # root_gb=0 indicates that we're disabling size checks - return - - size = _get_vdi_chain_size(session, vdi_uuid) - if size > allowed_size: - LOG.error("Image size %(size)d exceeded flavor " - "allowed size %(allowed_size)d", - {'size': size, 'allowed_size': allowed_size}, - instance=instance) - - raise exception.FlavorDiskSmallerThanImage( - flavor_size=(flavor.root_gb * units.Gi), - image_size=(size * units.Gi)) - - -def _fetch_disk_image(context, session, instance, name_label, image_id, - image_type): - """Fetch the image from Glance - - NOTE: - Unlike _fetch_vhd_image, this method does not use the Glance - plugin; instead, it streams the disks through domU to the VDI - directly. - - Returns: A single filename if image_type is KERNEL_RAMDISK - A list of dictionaries that describe VDIs, otherwise - """ - # FIXME(sirp): Since the Glance plugin seems to be required for the - # VHD disk, it may be worth using the plugin for both VHD and RAW and - # DISK restores - image_type_str = ImageType.to_string(image_type) - LOG.debug("Fetching image %(image_id)s, type %(image_type_str)s", - {'image_id': image_id, 'image_type_str': image_type_str}, - instance=instance) - - if image_type == ImageType.DISK_ISO: - sr_ref = _safe_find_iso_sr(session) - else: - sr_ref = safe_find_sr(session) - - glance_image = image_utils.GlanceImage(context, image_id) - if glance_image.is_raw_tgz(): - image = image_utils.RawTGZImage(glance_image) - else: - image = image_utils.RawImage(glance_image) - - virtual_size = image.get_size() - vdi_size = virtual_size - LOG.debug("Size for image %(image_id)s: %(virtual_size)d", - {'image_id': image_id, 'virtual_size': virtual_size}, - instance=instance) - if image_type == ImageType.DISK: - # Make room for MBR. - vdi_size += MBR_SIZE_BYTES - elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and - vdi_size > CONF.xenserver.max_kernel_ramdisk_size): - max_size = CONF.xenserver.max_kernel_ramdisk_size - raise exception.NovaException( - _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " - "max %(max_size)d bytes") % - {'vdi_size': vdi_size, 'max_size': max_size}) - - vdi_ref = create_vdi(session, sr_ref, instance, name_label, - image_type_str, vdi_size) - # From this point we have a VDI on Xen host; - # If anything goes wrong, we need to remember its uuid. - try: - filename = None - vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) - - with vdi_attached(session, vdi_ref, read_only=False) as dev: - _stream_disk( - session, image.stream_to, image_type, virtual_size, dev) - - if image_type in (ImageType.KERNEL, ImageType.RAMDISK): - # We need to invoke a plugin for copying the - # content of the VDI into the proper path. - LOG.debug("Copying VDI %s to /boot/guest on dom0", - vdi_ref, instance=instance) - - cache_image = None - if CONF.xenserver.cache_images != 'none': - cache_image = image_id - filename = disk_management.copy_vdi(session, vdi_ref, vdi_size, - image_id=cache_image) - - # Remove the VDI as it is not needed anymore. - destroy_vdi(session, vdi_ref) - LOG.debug("Kernel/Ramdisk VDI %s destroyed", vdi_ref, - instance=instance) - vdi_role = ImageType.get_role(image_type) - return {vdi_role: dict(uuid=None, file=filename)} - else: - vdi_role = ImageType.get_role(image_type) - return {vdi_role: dict(uuid=vdi_uuid, file=None)} - except (session.XenAPI.Failure, IOError, OSError) as e: - # We look for XenAPI and OS failures. - LOG.exception("Failed to fetch glance image", instance=instance) - e.args = e.args + ([dict(type=ImageType.to_string(image_type), - uuid=vdi_uuid, - file=filename)],) - raise - - -def determine_disk_image_type(image_meta): - """Disk Image Types are used to determine where the kernel will reside - within an image. To figure out which type we're dealing with, we use - the following rules: - - 1. If we're using Glance, we can use the image_type field to - determine the image_type - - 2. If we're not using Glance, then we need to deduce this based on - whether a kernel_id is specified. - """ - if not image_meta.obj_attr_is_set("disk_format"): - return None - - disk_format_map = { - 'ami': ImageType.DISK, - 'aki': ImageType.KERNEL, - 'ari': ImageType.RAMDISK, - 'raw': ImageType.DISK_RAW, - 'vhd': ImageType.DISK_VHD, - 'iso': ImageType.DISK_ISO, - } - - try: - image_type = disk_format_map[image_meta.disk_format] - except KeyError: - raise exception.InvalidDiskFormat(disk_format=image_meta.disk_format) - - LOG.debug("Detected %(type)s format for image %(image)s", - {'type': ImageType.to_string(image_type), - 'image': image_meta}) - - return image_type - - -def determine_vm_mode(instance, disk_image_type): - current_mode = obj_fields.VMMode.get_from_instance(instance) - if (current_mode == obj_fields.VMMode.XEN or - current_mode == obj_fields.VMMode.HVM): - return current_mode - - os_type = instance['os_type'] - if os_type == "linux": - return obj_fields.VMMode.XEN - if os_type == "windows": - return obj_fields.VMMode.HVM - - # disk_image_type specific default for backwards compatibility - if disk_image_type == ImageType.DISK_VHD or \ - disk_image_type == ImageType.DISK: - return obj_fields.VMMode.XEN - - # most images run OK as HVM - return obj_fields.VMMode.HVM - - -def set_vm_name_label(session, vm_ref, name_label): - session.call_xenapi("VM.set_name_label", vm_ref, name_label) - - -def list_vms(session): - vms = session.call_xenapi("VM.get_all_records_where", - 'field "is_control_domain"="false" and ' - 'field "is_a_template"="false" and ' - 'field "resident_on"="%s"' % session.host_ref) - for vm_ref in vms.keys(): - yield vm_ref, vms[vm_ref] - - -def lookup_vm_vdis(session, vm_ref): - """Look for the VDIs that are attached to the VM.""" - # Firstly we get the VBDs, then the VDIs. - # TODO(Armando): do we leave the read-only devices? - vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref) - vdi_refs = [] - if vbd_refs: - for vbd_ref in vbd_refs: - try: - vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) - # Test valid VDI - vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) - LOG.debug('VDI %s is still available', vdi_uuid) - vbd_other_config = session.call_xenapi("VBD.get_other_config", - vbd_ref) - if not vbd_other_config.get('osvol'): - # This is not an attached volume - vdi_refs.append(vdi_ref) - except session.XenAPI.Failure: - LOG.exception('Look for the VDIs failed') - return vdi_refs - - -def lookup(session, name_label, check_rescue=False): - """Look the instance up and return it if available. - :param:check_rescue: if True will return the 'name'-rescue vm if it - exists, instead of just 'name' - """ - if check_rescue: - result = lookup(session, name_label + '-rescue', False) - if result: - return result - vm_refs = session.call_xenapi("VM.get_by_name_label", name_label) - n = len(vm_refs) - if n == 0: - return None - elif n > 1: - raise exception.InstanceExists(name=name_label) - else: - return vm_refs[0] - - -def preconfigure_instance(session, instance, vdi_ref, network_info): - """Makes alterations to the image before launching as part of spawn. - """ - key = str(instance['key_data']) - net = netutils.get_injected_network_template(network_info) - metadata = instance['metadata'] - - # As mounting the image VDI is expensive, we only want do it once, - # if at all, so determine whether it's required first, and then do - # everything - mount_required = key or net or metadata - if not mount_required: - return - - with vdi_attached(session, vdi_ref, read_only=False) as dev: - _mounted_processing(dev, key, net, metadata) - - -def lookup_kernel_ramdisk(session, vm): - vm_rec = session.call_xenapi("VM.get_record", vm) - if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec: - return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk']) - else: - return (None, None) - - -def is_snapshot(session, vm): - vm_rec = session.call_xenapi("VM.get_record", vm) - if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec: - return vm_rec['is_a_template'] and vm_rec['is_a_snapshot'] - else: - return False - - -def get_power_state(session, vm_ref): - xapi_state = session.call_xenapi("VM.get_power_state", vm_ref) - return XENAPI_POWER_STATE[xapi_state] - - -def _vm_query_data_source(session, *args): - """We're getting diagnostics stats from the RRDs which are updated every - 5 seconds. It means that diagnostics information may be incomplete during - first 5 seconds of VM life. In such cases method ``query_data_source()`` - may raise a ``XenAPI.Failure`` exception or may return a `NaN` value. - """ - - try: - value = session.VM.query_data_source(*args) - except session.XenAPI.Failure: - return None - - if math.isnan(value): - return None - return value - - -def compile_info(session, vm_ref): - """Fill record with VM status information.""" - return hardware.InstanceInfo(state=get_power_state(session, vm_ref)) - - -def compile_instance_diagnostics(session, instance, vm_ref): - xen_power_state = session.VM.get_power_state(vm_ref) - vm_power_state = power_state.STATE_MAP[XENAPI_POWER_STATE[xen_power_state]] - config_drive = configdrive.required_by(instance) - - diags = diagnostics.Diagnostics(state=vm_power_state, - driver='xenapi', - config_drive=config_drive) - _add_cpu_usage(session, vm_ref, diags) - _add_nic_usage(session, vm_ref, diags) - _add_disk_usage(session, vm_ref, diags) - _add_memory_usage(session, vm_ref, diags) - - return diags - - -def _add_cpu_usage(session, vm_ref, diag_obj): - cpu_num = int(session.VM.get_VCPUs_max(vm_ref)) - for cpu_num in range(0, cpu_num): - utilisation = _vm_query_data_source(session, vm_ref, "cpu%d" % cpu_num) - if utilisation is not None: - utilisation *= 100 - diag_obj.add_cpu(id=cpu_num, utilisation=utilisation) - - -def _add_nic_usage(session, vm_ref, diag_obj): - vif_refs = session.VM.get_VIFs(vm_ref) - for vif_ref in vif_refs: - vif_rec = session.VIF.get_record(vif_ref) - rx_rate = _vm_query_data_source(session, vm_ref, - "vif_%s_rx" % vif_rec['device']) - tx_rate = _vm_query_data_source(session, vm_ref, - "vif_%s_tx" % vif_rec['device']) - diag_obj.add_nic(mac_address=vif_rec['MAC'], - rx_rate=rx_rate, - tx_rate=tx_rate) - - -def _add_disk_usage(session, vm_ref, diag_obj): - vbd_refs = session.VM.get_VBDs(vm_ref) - for vbd_ref in vbd_refs: - vbd_rec = session.VBD.get_record(vbd_ref) - read_bytes = _vm_query_data_source(session, vm_ref, - "vbd_%s_read" % vbd_rec['device']) - write_bytes = _vm_query_data_source(session, vm_ref, - "vbd_%s_write" % vbd_rec['device']) - diag_obj.add_disk(read_bytes=read_bytes, write_bytes=write_bytes) - - -def _add_memory_usage(session, vm_ref, diag_obj): - total_mem = _vm_query_data_source(session, vm_ref, "memory") - free_mem = _vm_query_data_source(session, vm_ref, "memory_internal_free") - used_mem = None - if total_mem is not None: - # total_mem provided from XenServer is in Bytes. Converting it to MB. - total_mem /= units.Mi - - if free_mem is not None: - # free_mem provided from XenServer is in KB. Converting it to MB. - used_mem = total_mem - free_mem / units.Ki - - diag_obj.memory_details = diagnostics.MemoryDiagnostics( - maximum=total_mem, used=used_mem) - - -def compile_diagnostics(vm_rec): - """Compile VM diagnostics data.""" - try: - keys = [] - diags = {} - vm_uuid = vm_rec["uuid"] - xml = _get_rrd(_get_rrd_server(), vm_uuid) - if xml: - rrd = minidom.parseString(xml) - for i, node in enumerate(rrd.firstChild.childNodes): - # Provide the last update of the information - if node.localName == 'lastupdate': - diags['last_update'] = node.firstChild.data - - # Create a list of the diagnostic keys (in their order) - if node.localName == 'ds': - ref = node.childNodes - # Name and Value - if len(ref) > 6: - keys.append(ref[0].firstChild.data) - - # Read the last row of the first RRA to get the latest info - if node.localName == 'rra': - rows = node.childNodes[4].childNodes - last_row = rows[rows.length - 1].childNodes - for j, value in enumerate(last_row): - diags[keys[j]] = value.firstChild.data - break - - return diags - except expat.ExpatError as e: - LOG.exception('Unable to parse rrd of %s', e) - return {"Unable to retrieve diagnostics": e} - - -def fetch_bandwidth(session): - bw = host_network.fetch_all_bandwidth(session) - return bw - - -def _scan_sr(session, sr_ref=None, max_attempts=4): - if sr_ref: - # NOTE(johngarbutt) xenapi will collapse any duplicate requests - # for SR.scan if there is already a scan in progress. - # However, we don't want that, because the scan may have started - # before we modified the underlying VHDs on disk through a plugin. - # Using our own mutex will reduce cases where our periodic SR scan - # in host.update_status starts racing the sr.scan after a plugin call. - @utils.synchronized('sr-scan-' + sr_ref) - def do_scan(sr_ref): - LOG.debug("Scanning SR %s", sr_ref) - - attempt = 1 - while True: - try: - return session.call_xenapi('SR.scan', sr_ref) - except session.XenAPI.Failure as exc: - with excutils.save_and_reraise_exception() as ctxt: - if exc.details[0] == 'SR_BACKEND_FAILURE_40': - if attempt < max_attempts: - ctxt.reraise = False - LOG.warning("Retry SR scan due to error: %s", - exc) - greenthread.sleep(2 ** attempt) - attempt += 1 - do_scan(sr_ref) - - -def scan_default_sr(session): - """Looks for the system default SR and triggers a re-scan.""" - sr_ref = safe_find_sr(session) - _scan_sr(session, sr_ref) - return sr_ref - - -def safe_find_sr(session): - """Same as _find_sr except raises a NotFound exception if SR cannot be - determined - """ - sr_ref = _find_sr(session) - if sr_ref is None: - raise exception.StorageRepositoryNotFound() - return sr_ref - - -def _find_sr(session): - """Return the storage repository to hold VM images.""" - host = session.host_ref - try: - tokens = CONF.xenserver.sr_matching_filter.split(':') - filter_criteria = tokens[0] - filter_pattern = tokens[1] - except IndexError: - # oops, flag is invalid - LOG.warning("Flag sr_matching_filter '%s' does not respect " - "formatting convention", - CONF.xenserver.sr_matching_filter) - return None - - if filter_criteria == 'other-config': - key, value = filter_pattern.split('=', 1) - for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'): - if not (key in sr_rec['other_config'] and - sr_rec['other_config'][key] == value): - continue - for pbd_ref in sr_rec['PBDs']: - pbd_rec = session.get_rec('PBD', pbd_ref) - if pbd_rec and pbd_rec['host'] == host: - return sr_ref - elif filter_criteria == 'default-sr' and filter_pattern == 'true': - pool_ref = session.call_xenapi('pool.get_all')[0] - sr_ref = session.call_xenapi('pool.get_default_SR', pool_ref) - if sr_ref: - return sr_ref - # No SR found! - LOG.error("XenAPI is unable to find a Storage Repository to " - "install guest instances on. Please check your " - "configuration (e.g. set a default SR for the pool) " - "and/or configure the flag 'sr_matching_filter'.") - return None - - -def _safe_find_iso_sr(session): - """Same as _find_iso_sr except raises a NotFound exception if SR - cannot be determined - """ - sr_ref = _find_iso_sr(session) - if sr_ref is None: - raise exception.NotFound(_('Cannot find SR of content-type ISO')) - return sr_ref - - -def _find_iso_sr(session): - """Return the storage repository to hold ISO images.""" - host = session.host_ref - for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'): - LOG.debug("ISO: looking at SR %s", sr_rec) - if not sr_rec['content_type'] == 'iso': - LOG.debug("ISO: not iso content") - continue - if 'i18n-key' not in sr_rec['other_config']: - LOG.debug("ISO: iso content_type, no 'i18n-key' key") - continue - if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso': - LOG.debug("ISO: iso content_type, i18n-key value not " - "'local-storage-iso'") - continue - - LOG.debug("ISO: SR MATCHing our criteria") - for pbd_ref in sr_rec['PBDs']: - LOG.debug("ISO: ISO, looking to see if it is host local") - pbd_rec = session.get_rec('PBD', pbd_ref) - if not pbd_rec: - LOG.debug("ISO: PBD %s disappeared", pbd_ref) - continue - pbd_rec_host = pbd_rec['host'] - LOG.debug("ISO: PBD matching, want %(pbd_rec)s, have %(host)s", - {'pbd_rec': pbd_rec, 'host': host}) - if pbd_rec_host == host: - LOG.debug("ISO: SR with local PBD") - return sr_ref - return None - - -def _get_rrd_server(): - """Return server's scheme and address to use for retrieving RRD XMLs.""" - xs_url = urlparse.urlparse(CONF.xenserver.connection_url) - return [xs_url.scheme, xs_url.netloc] - - -def _get_rrd(server, vm_uuid): - """Return the VM RRD XML as a string.""" - try: - xml = urlrequest.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % ( - server[0], - CONF.xenserver.connection_username, - CONF.xenserver.connection_password, - server[1], - vm_uuid)) - return xml.read() - except IOError: - LOG.exception('Unable to obtain RRD XML for VM %(vm_uuid)s with ' - 'server details: %(server)s.', - {'vm_uuid': vm_uuid, 'server': server}) - return None - - -def _get_all_vdis_in_sr(session, sr_ref): - for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref): - vdi_rec = session.get_rec('VDI', vdi_ref) - # Check to make sure the record still exists. It may have - # been deleted between the get_all call and get_rec call - if vdi_rec: - yield vdi_ref, vdi_rec - - -def get_instance_vdis_for_sr(session, vm_ref, sr_ref): - """Return opaqueRef for all the vdis which live on sr.""" - for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref): - try: - vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref) - if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref): - yield vdi_ref - except session.XenAPI.Failure: - continue - - -def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None): - if vdi_rec is None: - vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) - - if 'vhd-parent' not in vdi_rec['sm_config']: - return None - - parent_uuid = vdi_rec['sm_config']['vhd-parent'] - vdi_uuid = vdi_rec['uuid'] - LOG.debug('VHD %(vdi_uuid)s has parent %(parent_uuid)s', - {'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid}) - return parent_uuid - - -def _walk_vdi_chain(session, vdi_uuid): - """Yield vdi_recs for each element in a VDI chain.""" - scan_default_sr(session) - while True: - vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid) - vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) - yield vdi_rec - - parent_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec) - if not parent_uuid: - break - - vdi_uuid = parent_uuid - - -def _is_vdi_a_snapshot(vdi_rec): - """Ensure VDI is a snapshot, and not cached image.""" - is_a_snapshot = vdi_rec['is_a_snapshot'] - image_id = vdi_rec['other_config'].get('image-id') - return is_a_snapshot and not image_id - - -def _child_vhds(session, sr_ref, vdi_uuid_list, old_snapshots_only=False): - """Return the immediate children of a given VHD. - - This is not recursive, only the immediate children are returned. - """ - children = set() - for ref, rec in _get_all_vdis_in_sr(session, sr_ref): - rec_uuid = rec['uuid'] - - if rec_uuid in vdi_uuid_list: - continue - - parent_uuid = _get_vhd_parent_uuid(session, ref, rec) - if parent_uuid not in vdi_uuid_list: - continue - - if old_snapshots_only and not _is_vdi_a_snapshot(rec): - continue - - children.add(rec_uuid) - - return list(children) - - -def _count_children(session, parent_vdi_uuid, sr_ref): - # Search for any other vdi which has the same parent as us to work out - # whether we have siblings and therefore if coalesce is possible - children = 0 - for _ref, rec in _get_all_vdis_in_sr(session, sr_ref): - if (rec['sm_config'].get('vhd-parent') == parent_vdi_uuid): - children = children + 1 - return children - - -def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref, - vdi_uuid_list): - """Spin until the parent VHD is coalesced into one of the VDIs in the list - - vdi_uuid_list is a list of acceptable final parent VDIs for vdi_ref; once - the parent of vdi_ref is in vdi_uuid_chain we consider the coalesce over. - - The use case is there are any number of VDIs between those in - vdi_uuid_list and vdi_ref that we expect to be coalesced, but any of those - in vdi_uuid_list may also be coalesced (except the base UUID - which is - guaranteed to remain) - """ - # If the base disk was a leaf node, there will be no coalescing - # after a VDI snapshot. - if len(vdi_uuid_list) == 1: - LOG.debug("Old chain is single VHD, coalesce not possible.", - instance=instance) - return - - # If the parent of the original disk has other children, - # there will be no coalesce because of the VDI snapshot. - # For example, the first snapshot for an instance that has been - # spawned from a cached image, will not coalesce, because of this rule. - parent_vdi_uuid = vdi_uuid_list[1] - if _count_children(session, parent_vdi_uuid, sr_ref) > 1: - LOG.debug("Parent has other children, coalesce is unlikely.", - instance=instance) - return - - # When the VDI snapshot is taken, a new parent is created. - # Assuming it is not one of the above cases, that new parent - # can be coalesced, so we need to wait for that to happen. - max_attempts = CONF.xenserver.vhd_coalesce_max_attempts - # Remove the leaf node from list, to get possible good parents - # when the coalesce has completed. - # Its possible that other coalesce operation happen, so we need - # to consider the full chain, rather than just the most recent parent. - good_parent_uuids = vdi_uuid_list[1:] - for i in range(max_attempts): - # NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config` - # matches the underlying VHDs. - # This can also kick XenServer into performing a pending coalesce. - _scan_sr(session, sr_ref) - parent_uuid = _get_vhd_parent_uuid(session, vdi_ref) - if parent_uuid and (parent_uuid not in good_parent_uuids): - LOG.debug("Parent %(parent_uuid)s not yet in parent list" - " %(good_parent_uuids)s, waiting for coalesce...", - {'parent_uuid': parent_uuid, - 'good_parent_uuids': good_parent_uuids}, - instance=instance) - else: - LOG.debug("Coalesce detected, because parent is: %s", parent_uuid, - instance=instance) - return - - greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval) - - msg = (_("VHD coalesce attempts exceeded (%d)" - ", giving up...") % max_attempts) - raise exception.NovaException(msg) - - -def _wait_for_device(session, dev, dom0, max_seconds): - """Wait for device node to appear.""" - dev_path = utils.make_dev_path(dev) - found_path = None - if dom0: - found_path = disk_management.wait_for_dev(session, dev_path, - max_seconds) - else: - for i in range(0, max_seconds): - if os.path.exists(dev_path): - found_path = dev_path - break - time.sleep(1) - - if found_path is None: - raise exception.StorageError( - reason=_('Timeout waiting for device %s to be created') % dev) - - -def cleanup_attached_vdis(session): - """Unplug any instance VDIs left after an unclean restart.""" - this_vm_ref = _get_this_vm_ref(session) - - vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref) - for vbd_ref in vbd_refs: - try: - vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref) - vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref) - except session.XenAPI.Failure as e: - if e.details[0] != 'HANDLE_INVALID': - raise - continue - - if 'nova_instance_uuid' in vdi_rec['other_config']: - # Belongs to an instance and probably left over after an - # unclean restart - LOG.info('Disconnecting stale VDI %s from compute domU', - vdi_rec['uuid']) - unplug_vbd(session, vbd_ref, this_vm_ref) - destroy_vbd(session, vbd_ref) - - -@contextlib.contextmanager -def vdi_attached(session, vdi_ref, read_only=False, dom0=False): - if dom0: - this_vm_ref = _get_dom0_ref(session) - else: - # Make sure we are running as a domU. - ensure_correct_host(session) - this_vm_ref = _get_this_vm_ref(session) - - vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect', - read_only=read_only, bootable=False) - try: - LOG.debug('Plugging VBD %s ... ', vbd_ref) - session.VBD.plug(vbd_ref, this_vm_ref) - try: - LOG.debug('Plugging VBD %s done.', vbd_ref) - dev = session.call_xenapi("VBD.get_device", vbd_ref) - LOG.debug('VBD %(vbd_ref)s plugged as %(dev)s', - {'vbd_ref': vbd_ref, 'dev': dev}) - _wait_for_device(session, dev, dom0, - CONF.xenserver.block_device_creation_timeout) - yield dev - finally: - # As we can not have filesystems mounted here (we cannot - # destroy the VBD with filesystems mounted), it is not - # useful to call sync. - LOG.debug('Destroying VBD for VDI %s ... ', vdi_ref) - unplug_vbd(session, vbd_ref, this_vm_ref) - finally: - try: - destroy_vbd(session, vbd_ref) - except exception.StorageError: - # destroy_vbd() will log error - pass - LOG.debug('Destroying VBD for VDI %s done.', vdi_ref) - - -def _get_sys_hypervisor_uuid(): - with open('/sys/hypervisor/uuid') as f: - return f.readline().strip() - - -def _get_dom0_ref(session): - vms = session.call_xenapi("VM.get_all_records_where", - 'field "domid"="0" and ' - 'field "resident_on"="%s"' % - session.host_ref) - return list(vms.keys())[0] - - -def get_this_vm_uuid(session): - if CONF.xenserver.independent_compute: - LOG.error("This host has been configured with the independent " - "compute flag. An operation has been attempted which is " - "incompatible with this flag, but should have been " - "caught earlier. Please raise a bug against the " - "OpenStack Nova project") - raise exception.NotSupportedWithOption( - operation='uncaught operation', - option='CONF.xenserver.independent_compute') - if session and session.is_local_connection: - # UUID is the control domain running on this host - vms = session.call_xenapi("VM.get_all_records_where", - 'field "domid"="0" and ' - 'field "resident_on"="%s"' % - session.host_ref) - return vms[list(vms.keys())[0]]['uuid'] - try: - return _get_sys_hypervisor_uuid() - except IOError: - # Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25) - # cannot read from uuid after a reboot. Fall back to trying xenstore. - # See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182 - domid, _ = nova.privsep.xenapi.xenstore_read('domid') - vm_key, _ = nova.privsep.xenapi.xenstore_read( - '/local/domain/%s/vm' % domid.strip()) - return vm_key.strip()[4:] - - -def _get_this_vm_ref(session): - return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid(session)) - - -def _get_partitions(dev): - return nova.privsep.fs.list_partitions(utils.make_dev_path(dev)) - - -def _stream_disk(session, image_service_func, image_type, virtual_size, dev): - offset = 0 - if image_type == ImageType.DISK: - offset = MBR_SIZE_BYTES - _write_partition(session, virtual_size, dev) - - dev_path = utils.make_dev_path(dev) - - with utils.temporary_chown(dev_path): - with open(dev_path, 'wb') as f: - f.seek(offset) - image_service_func(f) - - -def _write_partition(session, virtual_size, dev): - dev_path = utils.make_dev_path(dev) - primary_first = MBR_SIZE_SECTORS - primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 - - LOG.debug('Writing partition table %(primary_first)d %(primary_last)d' - ' to %(dev_path)s...', - {'primary_first': primary_first, 'primary_last': primary_last, - 'dev_path': dev_path}) - - _make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last) - LOG.debug('Writing partition table %s done.', dev_path) - - -def _resize_part_and_fs(dev, start, old_sectors, new_sectors, flags): - """Resize partition and fileystem. - - This assumes we are dealing with a single primary partition and using - ext3 or ext4. - """ - size = new_sectors - start - end = new_sectors - 1 - - dev_path = utils.make_dev_path(dev) - partition_path = utils.make_dev_path(dev, partition=1) - - # Replay journal if FS wasn't cleanly unmounted - nova.privsep.fs.e2fsck(partition_path) - - # Remove ext3 journal (making it ext2) - nova.privsep.fs.ext_journal_disable(partition_path) - - if new_sectors < old_sectors: - # Resizing down, resize filesystem before partition resize - try: - nova.privsep.fs.resize2fs(partition_path, [0], size='%ds' % size) - except processutils.ProcessExecutionError as exc: - LOG.error(six.text_type(exc)) - reason = _("Shrinking the filesystem down with resize2fs " - "has failed, please check if you have " - "enough free space on your disk.") - raise exception.ResizeError(reason=reason) - - nova.privsep.fs.resize_partition(dev_path, start, end, - 'boot' in flags.lower()) - - if new_sectors > old_sectors: - # Resizing up, resize filesystem after partition resize - nova.privsep.fs.resize2fs(partition_path, [0]) - - # Add back journal - nova.privsep.fs.ext_journal_enable(partition_path) - - -def _log_progress_if_required(left, last_log_time, virtual_size): - if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS): - last_log_time = timeutils.utcnow() - complete_pct = float(virtual_size - left) / virtual_size * 100 - LOG.debug("Sparse copy in progress, " - "%(complete_pct).2f%% complete. " - "%(left)s bytes left to copy", - {"complete_pct": complete_pct, "left": left}) - return last_log_time - - -def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096): - """Copy data, skipping long runs of zeros to create a sparse file.""" - start_time = last_log_time = timeutils.utcnow() - EMPTY_BLOCK = '\0' * block_size - bytes_read = 0 - skipped_bytes = 0 - left = virtual_size - - LOG.debug("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " - "virtual_size=%(virtual_size)d block_size=%(block_size)d", - {'src_path': src_path, 'dst_path': dst_path, - 'virtual_size': virtual_size, 'block_size': block_size}) - - # NOTE(sirp): we need read/write access to the devices; since we don't have - # the luxury of shelling out to a sudo'd command, we temporarily take - # ownership of the devices. - with utils.temporary_chown(src_path): - with utils.temporary_chown(dst_path): - with open(src_path, "r") as src: - with open(dst_path, "w") as dst: - data = src.read(min(block_size, left)) - while data: - if data == EMPTY_BLOCK: - dst.seek(block_size, os.SEEK_CUR) - left -= block_size - bytes_read += block_size - skipped_bytes += block_size - else: - dst.write(data) - data_len = len(data) - left -= data_len - bytes_read += data_len - - if left <= 0: - break - - data = src.read(min(block_size, left)) - greenthread.sleep(0) - last_log_time = _log_progress_if_required( - left, last_log_time, virtual_size) - - duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) - compression_pct = float(skipped_bytes) / bytes_read * 100 - - LOG.debug("Finished sparse_copy in %(duration).2f secs, " - "%(compression_pct).2f%% reduction in size", - {'duration': duration, 'compression_pct': compression_pct}) - - -def _copy_partition(session, src_ref, dst_ref, partition, virtual_size): - # Part of disk taken up by MBR - virtual_size -= MBR_SIZE_BYTES - - with vdi_attached(session, src_ref, read_only=True) as src: - src_path = utils.make_dev_path(src, partition=partition) - - with vdi_attached(session, dst_ref, read_only=False) as dst: - dst_path = utils.make_dev_path(dst, partition=partition) - - _write_partition(session, virtual_size, dst) - - if CONF.xenserver.sparse_copy: - _sparse_copy(src_path, dst_path, virtual_size) - else: - num_blocks = virtual_size / SECTOR_SIZE - nova.privsep.xenapi.block_copy( - src_path, dst_path, DD_BLOCKSIZE, num_blocks) - - -def _mount_filesystem(dev_path, mount_point): - """mounts the device specified by dev_path in mount_point.""" - try: - _out, err = nova.privsep.fs.mount('ext2,ext3,ext4,reiserfs', - dev_path, mount_point, None) - except processutils.ProcessExecutionError as e: - err = six.text_type(e) - return err - - -def _mounted_processing(device, key, net, metadata): - """Callback which runs with the image VDI attached.""" - # NB: Partition 1 hardcoded - dev_path = utils.make_dev_path(device, partition=1) - with utils.tempdir() as tmpdir: - # Mount only Linux filesystems, to avoid disturbing NTFS images - err = _mount_filesystem(dev_path, tmpdir) - if not err: - try: - # This try block ensures that the umount occurs - if not agent.find_guest_agent(tmpdir): - # TODO(berrange) passing in a None filename is - # rather dubious. We shouldn't be re-implementing - # the mount/unmount logic here either, when the - # VFSLocalFS impl has direct support for mount - # and unmount handling if it were passed a - # non-None filename - vfs = vfsimpl.VFSLocalFS( - imgmodel.LocalFileImage(None, imgmodel.FORMAT_RAW), - imgdir=tmpdir) - LOG.info('Manipulating interface files directly') - # for xenapi, we don't 'inject' admin_password here, - # it's handled at instance startup time, nor do we - # support injecting arbitrary files here. - disk.inject_data_into_fs(vfs, - key, net, metadata, None, None) - finally: - nova.privsep.fs.umount(dev_path) - else: - LOG.info('Failed to mount filesystem (expected for ' - 'non-linux instances): %s', err) - - -def ensure_correct_host(session): - """Ensure we're connected to the host we're running on. This is the - required configuration for anything that uses vdi_attached without - the dom0 flag. - """ - if session.host_checked: - return - - this_vm_uuid = get_this_vm_uuid(session) - - try: - session.call_xenapi('VM.get_by_uuid', this_vm_uuid) - session.host_checked = True - except session.XenAPI.Failure as exc: - if exc.details[0] != 'UUID_INVALID': - raise - raise Exception(_('This domU must be running on the host ' - 'specified by connection_url')) - - -def import_all_migrated_disks(session, instance, import_root=True): - root_vdi = None - if import_root: - root_vdi = _import_migrated_root_disk(session, instance) - eph_vdis = _import_migrate_ephemeral_disks(session, instance) - return {'root': root_vdi, 'ephemerals': eph_vdis} - - -def _import_migrated_root_disk(session, instance): - chain_label = instance['uuid'] - vdi_label = instance['name'] - return _import_migrated_vhds(session, instance, chain_label, "root", - vdi_label) - - -def _import_migrate_ephemeral_disks(session, instance): - ephemeral_vdis = {} - instance_uuid = instance['uuid'] - ephemeral_gb = instance.old_flavor.ephemeral_gb - disk_sizes = get_ephemeral_disk_sizes(ephemeral_gb) - for chain_number, _size in enumerate(disk_sizes, start=1): - chain_label = instance_uuid + "_ephemeral_%d" % chain_number - vdi_label = "%(name)s ephemeral (%(number)d)" % dict( - name=instance['name'], number=chain_number) - ephemeral_vdi = _import_migrated_vhds(session, instance, - chain_label, "ephemeral", - vdi_label) - userdevice = 3 + chain_number - ephemeral_vdis[str(userdevice)] = ephemeral_vdi - return ephemeral_vdis - - -def _import_migrated_vhds(session, instance, chain_label, disk_type, - vdi_label): - """Move and possibly link VHDs via the XAPI plugin.""" - imported_vhds = vm_management.receive_vhd(session, chain_label, - get_sr_path(session), - _make_uuid_stack()) - - # Now we rescan the SR so we find the VHDs - scan_default_sr(session) - - vdi_uuid = imported_vhds['root']['uuid'] - vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - - # Set name-label so we can find if we need to clean up a failed migration - _set_vdi_info(session, vdi_ref, disk_type, vdi_label, - disk_type, instance) - - return {'uuid': vdi_uuid, 'ref': vdi_ref} - - -def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num, - ephemeral_number=0): - LOG.debug("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d", - {'vdi_uuid': vdi_uuid, 'seq_num': seq_num}, - instance=instance) - chain_label = instance['uuid'] - if ephemeral_number: - chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number - try: - vm_management.transfer_vhd(session, chain_label, dest, vdi_uuid, - sr_path, seq_num) - except session.XenAPI.Failure: - msg = "Failed to transfer vhd to new host" - LOG.debug(msg, instance=instance, exc_info=True) - raise exception.MigrationError(reason=msg) - - -def vm_ref_or_raise(session, instance_name): - vm_ref = lookup(session, instance_name) - if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance_name) - return vm_ref - - -def handle_ipxe_iso(session, instance, cd_vdi, network_info): - """iPXE ISOs are a mechanism to allow the customer to roll their own - image. - - To use this feature, a service provider needs to configure the - appropriate Nova flags, roll an iPXE ISO, then distribute that image - to customers via Glance. - - NOTE: `mkisofs` is not present by default in the Dom0, so the service - provider can either add that package manually to Dom0 or include the - `mkisofs` binary in the image itself. - """ - boot_menu_url = CONF.xenserver.ipxe_boot_menu_url - if not boot_menu_url: - LOG.warning('ipxe_boot_menu_url not set, user will have to' - ' enter URL manually...', instance=instance) - return - - network_name = CONF.xenserver.ipxe_network_name - if not network_name: - LOG.warning('ipxe_network_name not set, user will have to' - ' enter IP manually...', instance=instance) - return - - network = None - for vif in network_info: - if vif['network']['label'] == network_name: - network = vif['network'] - break - - if not network: - LOG.warning("Unable to find network matching '%(network_name)s', " - "user will have to enter IP manually...", - {'network_name': network_name}, instance=instance) - return - - sr_path = get_sr_path(session) - - # Unpack IPv4 network info - subnet = [sn for sn in network['subnets'] - if sn['version'] == 4][0] - ip = subnet['ips'][0] - - ip_address = ip['address'] - netmask = network_model.get_netmask(ip, subnet) - gateway = subnet['gateway']['address'] - dns = subnet['dns'][0]['address'] - - try: - disk_management.inject_ipxe_config(session, sr_path, cd_vdi['uuid'], - boot_menu_url, ip_address, netmask, - gateway, dns, - CONF.xenserver.ipxe_mkisofs_cmd) - except session.XenAPI.Failure as exc: - _type, _method, error = exc.details[:3] - if error == 'CommandNotFound': - LOG.warning("ISO creation tool '%s' does not exist.", - CONF.xenserver.ipxe_mkisofs_cmd, instance=instance) - else: - raise - - -def set_other_config_pci(session, vm_ref, params): - """Set the pci key of other-config parameter to params.""" - other_config = session.call_xenapi("VM.get_other_config", vm_ref) - other_config['pci'] = params - session.call_xenapi("VM.set_other_config", vm_ref, other_config) - - -def host_in_this_pool(session, host_ref): - rec_dict = session.host.get_all_records() - return host_ref in rec_dict.keys() diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py deleted file mode 100644 index 3cf67b9bfc79..000000000000 --- a/nova/virt/xenapi/vmops.py +++ /dev/null @@ -1,2725 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright 2010 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for VM-related functions (spawn, reboot, etc). -""" - -import base64 -import functools -import time -import zlib - -import eventlet -from eventlet import greenthread -import netaddr -from os_xenapi.client import host_xenstore -from os_xenapi.client import vm_management -from os_xenapi.client import XenAPI -from oslo_log import log as logging -from oslo_serialization import jsonutils -from oslo_utils import excutils -from oslo_utils import importutils -from oslo_utils import netutils -from oslo_utils import strutils -from oslo_utils import timeutils -from oslo_utils import units -from oslo_utils import versionutils -import six - -from nova import block_device -from nova.compute import api as compute -from nova.compute import power_state -from nova.compute import task_states -from nova.compute import vm_states -import nova.conf -from nova.console import type as ctype -from nova import context as nova_context -from nova import exception -from nova.i18n import _ -from nova import objects -from nova.objects import fields as obj_fields -from nova.pci import manager as pci_manager -from nova import utils -from nova.virt import configdrive -from nova.virt import driver as virt_driver -from nova.virt.xenapi import agent as xapi_agent -from nova.virt.xenapi.image import utils as image_utils -from nova.virt.xenapi import vif as xapi_vif -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import volume_utils -from nova.virt.xenapi import volumeops - - -LOG = logging.getLogger(__name__) - - -CONF = nova.conf.CONF - -RESIZE_TOTAL_STEPS = 5 - -DEVICE_ROOT = '0' -DEVICE_RESCUE = '1' -DEVICE_SWAP = '2' -DEVICE_CONFIGDRIVE = '3' -# Note(johngarbutt) HVM guests only support four devices -# until the PV tools activate, when others before available -# As such, ephemeral disk only available once PV tools load -# Note(johngarbutt) When very large ephemeral storage is required, -# multiple disks may be added. In this case the device id below -# is the used for the first disk. The second disk will be given -# next device id, i.e. 5, and so on, until enough space is added. -DEVICE_EPHEMERAL = '4' -# Note(johngarbutt) Currently don't support ISO boot during rescue -# and we must have the ISO visible before the PV drivers start -DEVICE_CD = '1' - - -def make_step_decorator(context, instance, update_instance_progress, - total_offset=0): - """Factory to create a decorator that records instance progress as a series - of discrete steps. - - Each time the decorator is invoked we bump the total-step-count, so after:: - - @step - def step1(): - ... - - @step - def step2(): - ... - - we have a total-step-count of 2. - - Each time the step-function (not the step-decorator!) is invoked, we bump - the current-step-count by 1, so after:: - - step1() - - the current-step-count would be 1 giving a progress of ``1 / 2 * - 100`` or 50%. - """ - step_info = dict(total=total_offset, current=0) - - def bump_progress(): - step_info['current'] += 1 - update_instance_progress(context, instance, - step_info['current'], step_info['total']) - - def step_decorator(f): - step_info['total'] += 1 - - @functools.wraps(f) - def inner(*args, **kwargs): - rv = f(*args, **kwargs) - bump_progress() - return rv - - return inner - - return step_decorator - - -class VMOps(object): - """Management class for VM-related tasks.""" - def __init__(self, session, virtapi): - self.compute_api = compute.API() - self._session = session - self._virtapi = virtapi - self._volumeops = volumeops.VolumeOps(self._session) - self.vif_driver = xapi_vif.XenAPIOpenVswitchDriver( - xenapi_session=self._session) - self.default_root_dev = '/dev/sda' - - image_handler_cfg = CONF.xenserver.image_handler - self.image_handler = image_utils.get_image_handler(image_handler_cfg) - # TODO(jianghuaw): Remove these lines relative to the deprecated - # option of "image_upload_handler" in the next release - Stein. - self.image_upload_handler = None - image_upload_handler_cfg = CONF.xenserver.image_upload_handler - if image_upload_handler_cfg: - # If *image_upload_handler* is explicitly configured, it - # means it indends to use non-default image upload handler. - # In order to avoid mis-using the default image_handler which - # may have different behavor than the explicitly configured - # handler, we keep using *image_upload_handler*. - LOG.warning("Deprecated: importing image upload handler: %s", - image_upload_handler_cfg) - self.image_upload_handler = importutils.import_object( - image_upload_handler_cfg) - - def agent_enabled(self, instance): - if CONF.xenserver.disable_agent: - return False - - return xapi_agent.should_use_agent(instance) - - def _get_agent(self, instance, vm_ref): - if self.agent_enabled(instance): - return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi, - instance, vm_ref) - raise exception.NovaException(_("Error: Agent is disabled")) - - def instance_exists(self, name_label): - return vm_utils.lookup(self._session, name_label) is not None - - def list_instances(self): - """List VM instances.""" - # TODO(justinsb): Should we just always use the details method? - # Seems to be the same number of API calls.. - name_labels = [] - for vm_ref, vm_rec in vm_utils.list_vms(self._session): - name_labels.append(vm_rec["name_label"]) - - return name_labels - - def list_instance_uuids(self): - """Get the list of nova instance uuids for VMs found on the - hypervisor. - """ - nova_uuids = [] - for vm_ref, vm_rec in vm_utils.list_vms(self._session): - other_config = vm_rec['other_config'] - nova_uuid = other_config.get('nova_uuid') - if nova_uuid: - nova_uuids.append(nova_uuid) - return nova_uuids - - def confirm_migration(self, migration, instance, network_info): - self._destroy_orig_vm(instance, network_info) - - def _destroy_orig_vm(self, instance, network_info): - name_label = self._get_orig_vm_name_label(instance) - vm_ref = vm_utils.lookup(self._session, name_label) - return self._destroy(instance, vm_ref, network_info=network_info) - - def _attach_mapped_block_devices(self, instance, block_device_info): - # We are attaching these volumes before start (no hotplugging) - # because some guests (windows) don't load PV drivers quickly - block_device_mapping = virt_driver.block_device_info_get_mapping( - block_device_info) - for vol in block_device_mapping: - if vol['mount_device'] == instance['root_device_name']: - # NOTE(alaski): The root device should be attached already - continue - connection_info = vol['connection_info'] - mount_device = vol['mount_device'].rpartition("/")[2] - self._volumeops.attach_volume(connection_info, - instance['name'], - mount_device, - hotplug=False) - - def finish_revert_migration(self, context, instance, - block_device_info=None, - power_on=True): - self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info, - power_on) - - def _restore_orig_vm_and_cleanup_orphan(self, instance, - block_device_info=None, - power_on=True): - # NOTE(sirp): the original vm was suffixed with '-orig'; find it using - # the old suffix, remove the suffix, then power it back on. - name_label = self._get_orig_vm_name_label(instance) - vm_ref = vm_utils.lookup(self._session, name_label) - - # NOTE(danms): if we're reverting migration in the failure case, - # make sure we don't have a conflicting vm still running here, - # as might be the case in a failed migrate-to-same-host situation - new_ref = vm_utils.lookup(self._session, instance['name']) - if vm_ref is not None: - if new_ref is not None: - self._destroy(instance, new_ref) - # Remove the '-orig' suffix (which was added in case the - # resized VM ends up on the source host, common during - # testing) - name_label = instance['name'] - vm_utils.set_vm_name_label(self._session, vm_ref, name_label) - self._attach_mapped_block_devices(instance, block_device_info) - elif new_ref is not None: - # We crashed before the -orig backup was made - vm_ref = new_ref - - if power_on and vm_utils.is_vm_shutdown(self._session, vm_ref): - self._start(instance, vm_ref) - - def finish_migration(self, context, migration, instance, disk_info, - network_info, image_meta, resize_instance, - block_device_info=None, power_on=True): - - def null_step_decorator(f): - return f - - def create_disks_step(undo_mgr, disk_image_type, image_meta, - name_label): - import_root = True - root_vol_vdi = None - if block_device_info: - LOG.debug("Block device information present: %s", - block_device_info, instance=instance) - # NOTE(alaski): Follows the basic procedure of - # vm_utils.get_vdis_for_instance() used by spawn() - for bdm in block_device_info['block_device_mapping']: - if bdm['mount_device'] == instance['root_device_name']: - connection_info = bdm['connection_info'] - _sr, root_vol_vdi = self._volumeops.connect_volume( - connection_info) - import_root = False - break - - # TODO(johngarbutt) clean up if this is not run - vdis = vm_utils.import_all_migrated_disks(self._session, instance, - import_root=import_root) - - if root_vol_vdi: - vol_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - root_vol_vdi) - vdis['root'] = dict(uuid=root_vol_vdi, file=None, - ref=vol_vdi_ref, osvol=True) - - def undo_create_disks(): - eph_vdis = vdis['ephemerals'] - root_vdi = vdis['root'] - vdi_refs = [vdi['ref'] for vdi in eph_vdis.values()] - if not root_vdi.get('osvol', False): - vdi_refs.append(root_vdi['ref']) - else: - self._volumeops.safe_cleanup_from_vdis(root_vdi['ref']) - vm_utils.safe_destroy_vdis(self._session, vdi_refs) - - undo_mgr.undo_with(undo_create_disks) - return vdis - - def completed_callback(): - self._update_instance_progress(context, instance, - step=5, - total_steps=RESIZE_TOTAL_STEPS) - - self._spawn(context, instance, image_meta, null_step_decorator, - create_disks_step, first_boot=False, injected_files=None, - admin_password=None, network_info=network_info, - block_device_info=block_device_info, name_label=None, - rescue=False, power_on=power_on, resize=resize_instance, - completed_callback=completed_callback) - - def _start(self, instance, vm_ref=None, bad_volumes_callback=None, - start_pause=False): - """Power on a VM instance.""" - vm_ref = vm_ref or self._get_vm_opaque_ref(instance) - LOG.debug("Starting instance", instance=instance) - - # Attached volumes that have become non-responsive will prevent a VM - # from starting, so scan for these before attempting to start - # - # In order to make sure this detach is consistent (virt, BDM, cinder), - # we only detach in the virt-layer if a callback is provided. - if bad_volumes_callback: - bad_devices = self._volumeops.find_bad_volumes(vm_ref) - for device_name in bad_devices: - self._volumeops.detach_volume( - None, instance['name'], device_name) - - self._session.call_xenapi('VM.start_on', vm_ref, - self._session.host_ref, - start_pause, False) - - # Allow higher-layers a chance to detach bad-volumes as well (in order - # to cleanup BDM entries and detach in Cinder) - if bad_volumes_callback and bad_devices: - bad_volumes_callback(bad_devices) - - # Do some operations which have to be done after start: - # e.g. The vif's interim bridge won't be created until VM starts. - # So the operations on the interim bridge have be done after - # start. - self._post_start_actions(instance) - - def _post_start_actions(self, instance): - vm_ref = vm_utils.lookup(self._session, instance['name']) - vif_refs = self._session.call_xenapi("VM.get_VIFs", vm_ref) - for vif_ref in vif_refs: - self.vif_driver.post_start_actions(instance, vif_ref) - - def _get_vdis_for_instance(self, context, instance, name_label, - image_meta, image_type, block_device_info): - """Create or connect to all virtual disks for this instance.""" - - vdis = self._connect_cinder_volumes(instance, block_device_info) - - # If we didn't get a root VDI from volumes, - # then use the Glance image as the root device - if 'root' not in vdis: - create_image_vdis = vm_utils.create_image( - context, self._session, instance, name_label, image_meta.id, - image_type, self.image_handler) - vdis.update(create_image_vdis) - - # Fetch VDI refs now so we don't have to fetch the ref multiple times - for vdi in six.itervalues(vdis): - vdi['ref'] = self._session.call_xenapi('VDI.get_by_uuid', - vdi['uuid']) - return vdis - - def _connect_cinder_volumes(self, instance, block_device_info): - """Attach all the cinder volumes described in block_device_info.""" - vdis = {} - - if block_device_info: - msg = "block device info: %s" % block_device_info - # NOTE(mriedem): block_device_info can contain an auth_password - # so we have to scrub the message before logging it. - LOG.debug(strutils.mask_password(msg), instance=instance) - root_device_name = block_device_info['root_device_name'] - - for bdm in block_device_info['block_device_mapping']: - if (block_device.strip_prefix(bdm['mount_device']) == - block_device.strip_prefix(root_device_name)): - # If we're a root-device, record that fact so we don't - # download a root image via Glance - type_ = 'root' - else: - # Otherwise, use mount_device as `type_` so that we have - # easy access to it in _attach_disks to create the VBD - type_ = bdm['mount_device'] - - conn_info = bdm['connection_info'] - _sr, vdi_uuid = self._volumeops.connect_volume(conn_info) - if vdi_uuid: - vdis[type_] = dict(uuid=vdi_uuid, file=None, osvol=True) - - return vdis - - def _update_last_dom_id(self, vm_ref): - other_config = self._session.VM.get_other_config(vm_ref) - other_config['last_dom_id'] = self._session.VM.get_domid(vm_ref) - self._session.VM.set_other_config(vm_ref, other_config) - - def _attach_vgpu(self, vm_ref, vgpu_info, instance): - if not vgpu_info: - return - grp_ref = self._session.call_xenapi("GPU_group.get_by_uuid", - vgpu_info['gpu_grp_uuid']) - type_ref = self._session.call_xenapi("VGPU_type.get_by_uuid", - vgpu_info['vgpu_type_uuid']) - # NOTE(jianghuaw): set other-config with "nova-instance-uuid" to - # declare which nova instance owns this vGPU. That should be useful - # for tracking purposes. '0' is the device id for VGPU. As we only - # support one VGPU at the moment, so only '0' is the valid value. - # Refer to https://xapi-project.github.io/xen-api/classes/vgpu.html - # for this Xen API of 'VGPU.create'. - self._session.call_xenapi('VGPU.create', vm_ref, grp_ref, '0', - {'nova-instance-uuid': instance['uuid']}, - type_ref) - - def spawn(self, context, instance, image_meta, injected_files, - admin_password, network_info=None, block_device_info=None, - vgpu_info=None, name_label=None, rescue=False): - - if block_device_info: - LOG.debug("Block device information present: %s", - block_device_info, instance=instance) - if block_device_info and not block_device_info['root_device_name']: - block_device_info['root_device_name'] = self.default_root_dev - - step = make_step_decorator(context, instance, - self._update_instance_progress) - - @step - def create_disks_step(undo_mgr, disk_image_type, image_meta, - name_label): - vdis = self._get_vdis_for_instance(context, instance, name_label, - image_meta, disk_image_type, - block_device_info) - - def undo_create_disks(): - vdi_refs = [vdi['ref'] for vdi in vdis.values() - if not vdi.get('osvol')] - vm_utils.safe_destroy_vdis(self._session, vdi_refs) - vol_vdi_refs = [vdi['ref'] for vdi in vdis.values() - if vdi.get('osvol')] - self._volumeops.safe_cleanup_from_vdis(vol_vdi_refs) - - undo_mgr.undo_with(undo_create_disks) - return vdis - - self._spawn(context, instance, image_meta, step, create_disks_step, - True, injected_files, admin_password, network_info, - block_device_info, vgpu_info, name_label, rescue) - - def _spawn(self, context, instance, image_meta, step, create_disks_step, - first_boot, injected_files=None, admin_password=None, - network_info=None, block_device_info=None, vgpu_info=None, - name_label=None, rescue=False, power_on=True, resize=True, - completed_callback=None): - if name_label is None: - name_label = instance['name'] - - self._ensure_instance_name_unique(name_label) - self._ensure_enough_free_mem(instance) - - def attach_disks(undo_mgr, vm_ref, vdis, disk_image_type): - if image_meta.properties.get('hw_ipxe_boot', False): - if 'iso' in vdis: - vm_utils.handle_ipxe_iso( - self._session, instance, vdis['iso'], network_info) - else: - LOG.warning('ipxe_boot is True but no ISO image found', - instance=instance) - - if resize: - self._resize_up_vdis(instance, vdis) - - instance.device_metadata = self._save_device_metadata( - context, instance, block_device_info) - self._attach_disks(context, instance, image_meta, vm_ref, - name_label, vdis, disk_image_type, - network_info, rescue, - admin_password, injected_files) - if not first_boot: - self._attach_mapped_block_devices(instance, - block_device_info) - - def attach_pci_devices(undo_mgr, vm_ref): - dev_to_passthrough = "" - devices = pci_manager.get_instance_pci_devs(instance) - for d in devices: - pci_address = d["address"] - if pci_address.count(":") == 1: - pci_address = "0000:" + pci_address - dev_to_passthrough += ",0/" + pci_address - - # Remove the first comma if string is not empty. - # Note(guillaume-thouvenin): If dev_to_passthrough is empty, we - # don't need to update other_config. - if dev_to_passthrough: - vm_utils.set_other_config_pci(self._session, - vm_ref, - dev_to_passthrough[1:]) - - @step - def determine_disk_image_type_step(undo_mgr): - return vm_utils.determine_disk_image_type(image_meta) - - @step - def create_kernel_ramdisk_step(undo_mgr): - kernel_file, ramdisk_file = vm_utils.create_kernel_and_ramdisk( - context, self._session, instance, name_label) - - def undo_create_kernel_ramdisk(): - vm_utils.destroy_kernel_ramdisk(self._session, instance, - kernel_file, ramdisk_file) - - undo_mgr.undo_with(undo_create_kernel_ramdisk) - return kernel_file, ramdisk_file - - @step - def create_vm_record_step(undo_mgr, disk_image_type, - kernel_file, ramdisk_file): - vm_ref = self._create_vm_record(context, instance, name_label, - disk_image_type, kernel_file, - ramdisk_file, image_meta, rescue) - - def undo_create_vm(): - self._destroy(instance, vm_ref, network_info=network_info) - - undo_mgr.undo_with(undo_create_vm) - return vm_ref - - @step - def attach_devices_step(undo_mgr, vm_ref, vdis, disk_image_type, - vgpu_info): - attach_disks(undo_mgr, vm_ref, vdis, disk_image_type) - attach_pci_devices(undo_mgr, vm_ref) - # NOTE(jianghuaw): in XAPI, the VGPU record is associated with a - # VM since creation. The record will be destroyed automatically - # once VM is destroyed. So there is no need to add any additional - # undo functions for VGPU. - self._attach_vgpu(vm_ref, vgpu_info, instance) - - if rescue: - # NOTE(johannes): Attach disks from original VM to rescue VM now, - # before booting the VM, since we can't hotplug block devices - # on non-PV guests - @step - def attach_orig_disks_step(undo_mgr, vm_ref): - vbd_refs = self._attach_orig_disks(instance, vm_ref) - - def undo_attach_orig_disks(): - # Destroy the VBDs in preparation to re-attach the VDIs - # to its original VM. (does not delete VDI) - for vbd_ref in vbd_refs: - vm_utils.destroy_vbd(self._session, vbd_ref) - - undo_mgr.undo_with(undo_attach_orig_disks) - - @step - def inject_instance_data_step(undo_mgr, vm_ref, vdis): - self._inject_instance_metadata(instance, vm_ref) - self._inject_auto_disk_config(instance, vm_ref) - # NOTE: We add the hostname here so windows PV tools - # can pick it up during booting - if first_boot: - self._inject_hostname(instance, vm_ref, rescue) - self._file_inject_vm_settings(instance, vm_ref, vdis, network_info) - self.inject_network_info(instance, network_info, vm_ref) - - @step - def setup_network_step(undo_mgr, vm_ref): - self._create_vifs(instance, vm_ref, network_info) - - @step - def start_paused_step(undo_mgr, vm_ref): - if power_on: - self._start(instance, vm_ref, start_pause=True) - - @step - def boot_and_configure_instance_step(undo_mgr, vm_ref): - self._unpause_and_wait(vm_ref, instance, power_on) - if first_boot: - self._configure_new_instance_with_agent(instance, vm_ref, - injected_files, admin_password) - self._remove_hostname(instance, vm_ref) - - undo_mgr = utils.UndoManager() - try: - # NOTE(sirp): The create_disks() step will potentially take a - # *very* long time to complete since it has to fetch the image - # over the network and images can be several gigs in size. To - # avoid progress remaining at 0% for too long, make sure the - # first step is something that completes rather quickly. - disk_image_type = determine_disk_image_type_step(undo_mgr) - - vdis = create_disks_step(undo_mgr, disk_image_type, image_meta, - name_label) - kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr) - - vm_ref = create_vm_record_step(undo_mgr, disk_image_type, - kernel_file, ramdisk_file) - attach_devices_step(undo_mgr, vm_ref, vdis, disk_image_type, - vgpu_info) - - inject_instance_data_step(undo_mgr, vm_ref, vdis) - - # if use neutron, prepare waiting event from neutron - # first_boot is True in new booted instance - # first_boot is False in migration and we don't waiting - # for neutron event regardless of whether or not it is - # migrated to another host, if unplug VIFs locally, the - # port status may not changed in neutron side and we - # cannot get the vif plug event from neutron - # rescue is True in rescued instance and the port in neutron side - # won't change, so we don't wait event from neutron - timeout = CONF.vif_plugging_timeout - events = self._get_neutron_events(network_info, power_on, - first_boot, rescue) - try: - with self._virtapi.wait_for_instance_event( - instance, events, deadline=timeout, - error_callback=self._neutron_failed_callback): - LOG.debug("wait for instance event:%s", events, - instance=instance) - setup_network_step(undo_mgr, vm_ref) - if rescue: - attach_orig_disks_step(undo_mgr, vm_ref) - start_paused_step(undo_mgr, vm_ref) - except eventlet.timeout.Timeout: - self._handle_neutron_event_timeout(instance, undo_mgr) - - boot_and_configure_instance_step(undo_mgr, vm_ref) - if completed_callback: - completed_callback() - except Exception: - msg = _("Failed to spawn, rolling back") - undo_mgr.rollback_and_reraise(msg=msg, instance=instance) - - def _handle_neutron_event_timeout(self, instance, undo_mgr): - # We didn't get callback from Neutron within given time - LOG.warning('Timeout waiting for vif plugging callback', - instance=instance) - if CONF.vif_plugging_is_fatal: - raise exception.VirtualInterfaceCreateException() - - def _unpause_and_wait(self, vm_ref, instance, power_on): - if power_on: - LOG.debug("Update instance when power on", instance=instance) - self._session.VM.unpause(vm_ref) - self._wait_for_instance_to_start(instance, vm_ref) - self._update_last_dom_id(vm_ref) - - def _neutron_failed_callback(self, event_name, instance): - LOG.warning('Neutron Reported failure on event %(event)s', - {'event': event_name}, instance=instance) - if CONF.vif_plugging_is_fatal: - raise exception.VirtualInterfaceCreateException() - - def _get_neutron_events(self, network_info, power_on, first_boot, rescue): - # Only get network-vif-plugged events with VIF's status is not active. - # With VIF whose status is active, neutron may not notify such event. - # Don't get network-vif-plugged events from rescued VM or migrated VM - timeout = CONF.vif_plugging_timeout - if power_on and timeout and first_boot and not rescue: - return [('network-vif-plugged', vif['id']) - for vif in network_info if vif.get('active', True) is False] - else: - return [] - - def _attach_orig_disks(self, instance, vm_ref): - orig_vm_ref = vm_utils.lookup(self._session, instance['name']) - orig_vdi_refs = self._find_vdi_refs(orig_vm_ref, - exclude_volumes=True) - - # Attach original root disk - root_vdi_ref = orig_vdi_refs.get(DEVICE_ROOT) - if not root_vdi_ref: - raise exception.NotFound(_("Unable to find root VBD/VDI for VM")) - - vbd_ref = vm_utils.create_vbd(self._session, vm_ref, root_vdi_ref, - DEVICE_RESCUE, bootable=False) - vbd_refs = [vbd_ref] - - # Attach original swap disk - swap_vdi_ref = orig_vdi_refs.get(DEVICE_SWAP) - if swap_vdi_ref: - vbd_ref = vm_utils.create_vbd(self._session, vm_ref, swap_vdi_ref, - DEVICE_SWAP, bootable=False) - vbd_refs.append(vbd_ref) - - # Attach original ephemeral disks - for userdevice, vdi_ref in orig_vdi_refs.items(): - if userdevice >= DEVICE_EPHEMERAL: - vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, - userdevice, bootable=False) - vbd_refs.append(vbd_ref) - - return vbd_refs - - def _file_inject_vm_settings(self, instance, vm_ref, vdis, network_info): - if CONF.flat_injected: - vm_utils.preconfigure_instance(self._session, instance, - vdis['root']['ref'], network_info) - - def _ensure_instance_name_unique(self, name_label): - vm_ref = vm_utils.lookup(self._session, name_label) - if vm_ref is not None: - raise exception.InstanceExists(name=name_label) - - def _ensure_enough_free_mem(self, instance): - if not vm_utils.is_enough_free_mem(self._session, instance): - raise exception.InsufficientFreeMemory(uuid=instance['uuid']) - - def _create_vm_record(self, context, instance, name_label, disk_image_type, - kernel_file, ramdisk_file, image_meta, rescue=False): - """Create the VM record in Xen, making sure that we do not create - a duplicate name-label. Also do a rough sanity check on memory - to try to short-circuit a potential failure later. (The memory - check only accounts for running VMs, so it can miss other builds - that are in progress.) - """ - mode = vm_utils.determine_vm_mode(instance, disk_image_type) - # NOTE(tpownall): If rescue mode then we should try to pull the vm_mode - # value from the image properties to ensure the vm is built properly. - if rescue: - rescue_vm_mode = image_meta.properties.get('hw_vm_mode', None) - if rescue_vm_mode is None: - LOG.debug("vm_mode not found in rescue image properties." - "Setting vm_mode to %s", mode, instance=instance) - else: - mode = obj_fields.VMMode.canonicalize(rescue_vm_mode) - - if instance.vm_mode != mode: - # Update database with normalized (or determined) value - instance.vm_mode = mode - instance.save() - - device_id = vm_utils.get_vm_device_id(self._session, image_meta) - use_pv_kernel = (mode == obj_fields.VMMode.XEN) - LOG.debug("Using PV kernel: %s", use_pv_kernel, instance=instance) - vm_ref = vm_utils.create_vm(self._session, instance, name_label, - kernel_file, ramdisk_file, - use_pv_kernel, device_id) - return vm_ref - - def _attach_disks(self, context, instance, image_meta, vm_ref, name_label, - vdis, disk_image_type, network_info, rescue=False, - admin_password=None, files=None): - flavor = instance.get_flavor() - - # Attach (required) root disk - if disk_image_type == vm_utils.ImageType.DISK_ISO: - # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk - root_disk_size = flavor.root_gb - if root_disk_size > 0: - vm_utils.generate_iso_blank_root_disk(self._session, instance, - vm_ref, DEVICE_ROOT, name_label, root_disk_size) - - cd_vdi = vdis.pop('iso') - vm_utils.attach_cd(self._session, vm_ref, cd_vdi['ref'], - DEVICE_CD) - else: - root_vdi = vdis['root'] - - auto_disk_config = instance['auto_disk_config'] - # NOTE(tpownall): If rescue mode we need to ensure that we're - # pulling the auto_disk_config value from the image properties so - # that we can pull it from the rescue_image_ref. - if rescue: - if not image_meta.properties.obj_attr_is_set( - "hw_auto_disk_config"): - LOG.debug("'hw_auto_disk_config' value not found in " - "rescue image_properties. Setting value to %s", - auto_disk_config, instance=instance) - else: - auto_disk_config = strutils.bool_from_string( - image_meta.properties.hw_auto_disk_config) - - if auto_disk_config: - LOG.debug("Auto configuring disk, attempting to " - "resize root disk...", instance=instance) - vm_utils.try_auto_configure_disk(self._session, - root_vdi['ref'], - flavor.root_gb) - - vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'], - DEVICE_ROOT, bootable=True, - osvol=root_vdi.get('osvol')) - - # Attach (optional) additional block-devices - for type_, vdi_info in vdis.items(): - # Additional block-devices for boot use their device-name as the - # type. - if not type_.startswith('/dev'): - continue - - # Convert device name to user device number, e.g. /dev/xvdb -> 1 - userdevice = ord(block_device.strip_prefix(type_)) - ord('a') - vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'], - userdevice, bootable=False, - osvol=vdi_info.get('osvol')) - - # For rescue, swap and ephemeral disks get attached in - # _attach_orig_disks - - # Attach (optional) swap disk - swap_mb = flavor.swap - if not rescue and swap_mb: - vm_utils.generate_swap(self._session, instance, vm_ref, - DEVICE_SWAP, name_label, swap_mb) - - ephemeral_gb = flavor.ephemeral_gb - if not rescue and ephemeral_gb: - ephemeral_vdis = vdis.get('ephemerals') - if ephemeral_vdis: - # attach existing (migrated) ephemeral disks - for userdevice, ephemeral_vdi in ephemeral_vdis.items(): - vm_utils.create_vbd(self._session, vm_ref, - ephemeral_vdi['ref'], - userdevice, bootable=False) - else: - # create specified ephemeral disks - vm_utils.generate_ephemeral(self._session, instance, vm_ref, - DEVICE_EPHEMERAL, name_label, - ephemeral_gb) - - # Attach (optional) configdrive v2 disk - if configdrive.required_by(instance): - vm_utils.generate_configdrive(self._session, context, - instance, vm_ref, - DEVICE_CONFIGDRIVE, - network_info, - admin_password=admin_password, - files=files) - - @staticmethod - def _prepare_disk_metadata(bdm): - """Returns the disk metadata with dual disk buses - ide and xen. More - details about Xen device number can be found in - http://xenbits.xen.org/docs/4.2-testing/misc/vbd-interface.txt - """ - path = bdm.device_name - disk_num = volume_utils.get_device_number(path) - - xen0 = objects.XenDeviceBus(address=("00%02d00" % disk_num)) - - registry = ('HKLM\\SYSTEM\\ControlSet001\\Enum\\SCSI\\' - 'Disk&Ven_XENSRC&Prod_PVDISK\\') - vbd_prefix = '/sys/devices/vbd-' - - if disk_num < 4: - ide = objects.IDEDeviceBus( - address=("%d:%d" % (disk_num / 2, disk_num % 2))) - - xen1 = objects.XenDeviceBus( - address=("%d" % (202 << 8 | disk_num << 4))) - xen2 = objects.XenDeviceBus() - if disk_num < 2: - xen2.address = "%d" % (3 << 8 | disk_num << 6) - else: - xen2.address = "%d" % (22 << 8 | (disk_num - 2) << 6) - - return [objects.DiskMetadata(path=path, bus=ide, tags=[bdm.tag]), - objects.DiskMetadata(path=registry + xen0.address, - bus=xen0, tags=[bdm.tag]), - objects.DiskMetadata(path=vbd_prefix + xen1.address, - bus=xen1, tags=[bdm.tag]), - objects.DiskMetadata(path=vbd_prefix + xen2.address, - bus=xen2, tags=[bdm.tag])] - else: - xen1 = objects.XenDeviceBus() - - if disk_num < 16: - xen1.address = "%d" % (202 << 8 | disk_num << 4) - else: - xen1.address = "%d" % (1 << 28 | disk_num << 8) - - return [objects.DiskMetadata(path=registry + xen0.address, - bus=xen0, tags=[bdm.tag]), - objects.DiskMetadata(path=vbd_prefix + xen1.address, - bus=xen1, tags=[bdm.tag])] - - def _save_device_metadata(self, context, instance, block_device_info): - """Builds a metadata object for instance devices, that maps the user - provided tag to the hypervisor assigned device address. - """ - vifs = objects.VirtualInterfaceList.get_by_instance_uuid( - context, instance["uuid"]) - - metadata = [] - for vif in vifs: - if 'tag' in vif and vif.tag: - device = objects.NetworkInterfaceMetadata( - mac=vif.address, - bus=objects.PCIDeviceBus(), - tags=[vif.tag]) - metadata.append(device) - - block_device_mapping = virt_driver.block_device_info_get_mapping( - block_device_info) - if block_device_mapping: - # TODO(mriedem): We should be able to get the BDMs out of the - # block_device_info['block_device_mapping'] field, however, that - # is a list of DriverVolumeBlockDevice objects and do not currently - # proxy the 'tag' attribute. - bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance["uuid"]) - for bdm in bdms: - if 'tag' in bdm and bdm.tag: - metadata.extend(self._prepare_disk_metadata(bdm)) - - if metadata: - return objects.InstanceDeviceMetadata(devices=metadata) - - def _wait_for_instance_to_start(self, instance, vm_ref): - LOG.debug('Waiting for instance state to become running', - instance=instance) - expiration = time.time() + CONF.xenserver.running_timeout - while time.time() < expiration: - state = vm_utils.get_power_state(self._session, vm_ref) - if state == power_state.RUNNING: - break - greenthread.sleep(0.5) - - def _configure_new_instance_with_agent(self, instance, vm_ref, - injected_files, admin_password): - if not self.agent_enabled(instance): - LOG.debug("Skip agent setup, not enabled.", instance=instance) - return - - agent = self._get_agent(instance, vm_ref) - - version = agent.get_version() - if not version: - LOG.debug("Skip agent setup, unable to contact agent.", - instance=instance) - return - - LOG.debug('Detected agent version: %s', version, instance=instance) - - # NOTE(johngarbutt) the agent object allows all of - # the following steps to silently fail - agent.inject_ssh_key() - - if injected_files: - agent.inject_files(injected_files) - - if admin_password: - agent.set_admin_password(admin_password) - - agent.resetnetwork() - agent.update_if_needed(version) - - def _get_vm_opaque_ref(self, instance, check_rescue=False): - """Get xapi OpaqueRef from a db record. - :param check_rescue: if True will return the 'name'-rescue vm if it - exists, instead of just 'name' - """ - vm_ref = vm_utils.lookup(self._session, instance['name'], check_rescue) - if vm_ref is None: - raise exception.InstanceNotFound(instance_id=instance['name']) - return vm_ref - - def _acquire_bootlock(self, vm): - """Prevent an instance from booting.""" - self._session.call_xenapi( - "VM.set_blocked_operations", - vm, - {"start": ""}) - - def _release_bootlock(self, vm): - """Allow an instance to boot.""" - self._session.call_xenapi( - "VM.remove_from_blocked_operations", - vm, - "start") - - def snapshot(self, context, instance, image_id, update_task_state): - """Create snapshot from a running VM instance. - - :param context: request context - :param instance: instance to be snapshotted - :param image_id: id of image to upload to - - Steps involved in a XenServer snapshot: - - 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This - creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, - Snapshot VHD - - 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to - a 'base-copy' VDI. The base_copy is immutable and may be chained - with other base_copies. If chained, the base_copies - coalesce together, so, we must wait for this coalescing to occur to - get a stable representation of the data on disk. - - 3. Push-to-data-store: Once coalesced, we call - 'image_upload_handler' to upload the images. - - """ - vm_ref = self._get_vm_opaque_ref(instance) - label = "%s-snapshot" % instance['name'] - - start_time = timeutils.utcnow() - with vm_utils.snapshot_attached_here( - self._session, instance, vm_ref, label, - post_snapshot_callback=update_task_state) as vdi_uuids: - update_task_state(task_state=task_states.IMAGE_UPLOADING, - expected_state=task_states.IMAGE_PENDING_UPLOAD) - if self.image_upload_handler: - # TODO(jianghuaw): remove this branch once the - # deprecated option of "image_upload_handler" - # gets removed in the next release - Stein. - self.image_upload_handler.upload_image(context, - self._session, - instance, - image_id, - vdi_uuids, - ) - else: - self.image_handler.upload_image(context, - self._session, - instance, - image_id, - vdi_uuids, - ) - - duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) - LOG.debug("Finished snapshot and upload for VM, duration: " - "%(duration).2f secs for image %(image_id)s", - {'image_id': image_id, 'duration': duration}, - instance=instance) - - def post_interrupted_snapshot_cleanup(self, context, instance): - """Cleans up any resources left after a failed snapshot.""" - vm_ref = self._get_vm_opaque_ref(instance) - vm_utils.remove_old_snapshots(self._session, instance, vm_ref) - - def _get_orig_vm_name_label(self, instance): - return instance['name'] + '-orig' - - def _update_instance_progress(self, context, instance, step, total_steps): - """Update instance progress percent to reflect current step number - """ - # FIXME(sirp): for now we're taking a KISS approach to instance - # progress: - # Divide the action's workflow into discrete steps and "bump" the - # instance's progress field as each step is completed. - # - # For a first cut this should be fine, however, for large VM images, - # the get_vdis_for_instance step begins to dominate the equation. A - # better approximation would use the percentage of the VM image that - # has been streamed to the destination host. - progress = round(float(step) / total_steps * 100) - LOG.debug("Updating progress to %d", progress, - instance=instance) - instance.progress = progress - instance.save() - - def _resize_ensure_vm_is_shutdown(self, instance, vm_ref): - if vm_utils.is_vm_shutdown(self._session, vm_ref): - LOG.debug("VM was already shutdown.", instance=instance) - return - - if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref): - LOG.debug("Clean shutdown did not complete successfully, " - "trying hard shutdown.", instance=instance) - if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref): - raise exception.ResizeError( - reason=_("Unable to terminate instance.")) - - def _migrate_disk_resizing_down(self, context, instance, dest, - flavor, vm_ref, sr_path): - step = make_step_decorator(context, instance, - self._update_instance_progress, - total_offset=1) - - @step - def fake_step_to_match_resizing_up(): - pass - - @step - def rename_and_power_off_vm(undo_mgr): - self._resize_ensure_vm_is_shutdown(instance, vm_ref) - self._apply_orig_vm_name_label(instance, vm_ref) - - def restore_orig_vm(): - # Do not need to restore block devices, not yet been removed - self._restore_orig_vm_and_cleanup_orphan(instance) - - undo_mgr.undo_with(restore_orig_vm) - - @step - def create_copy_vdi_and_resize(undo_mgr, old_vdi_ref): - new_vdi_ref, new_vdi_uuid = vm_utils.resize_disk(self._session, - instance, old_vdi_ref, flavor) - - def cleanup_vdi_copy(): - vm_utils.destroy_vdi(self._session, new_vdi_ref) - - undo_mgr.undo_with(cleanup_vdi_copy) - - return new_vdi_ref, new_vdi_uuid - - @step - def transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid): - vm_utils.migrate_vhd(self._session, instance, new_vdi_uuid, - dest, sr_path, 0) - # Clean up VDI now that it's been copied - vm_utils.destroy_vdi(self._session, new_vdi_ref) - - undo_mgr = utils.UndoManager() - try: - fake_step_to_match_resizing_up() - rename_and_power_off_vm(undo_mgr) - old_vdi_ref, _ignore = vm_utils.get_vdi_for_vm_safely( - self._session, vm_ref) - new_vdi_ref, new_vdi_uuid = create_copy_vdi_and_resize( - undo_mgr, old_vdi_ref) - transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid) - except Exception as error: - LOG.exception("_migrate_disk_resizing_down failed. Restoring " - "orig vm", instance=instance) - undo_mgr._rollback() - raise exception.InstanceFaultRollback(error) - - def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref, - sr_path): - step = make_step_decorator(context, - instance, - self._update_instance_progress, - total_offset=1) - """ - NOTE(johngarbutt) Understanding how resize up works. - - For resize up, we attempt to minimize the amount of downtime - for users by copying snapshots of their disks, while their - VM is still running. - - It is worth noting, that migrating the snapshot, means migrating - the whole VHD chain up to, but not including, the leaf VHD the VM - is still writing to. - - Once the snapshots have been migrated, we power down the VM - and migrate all the disk changes since the snapshots were taken. - - In addition, the snapshots are taken at the latest possible point, - to help minimize the time it takes to migrate the disk changes - after the VM has been turned off. - - Before starting to migrate any of the disks, we rename the VM, - to -orig, in case we attempt to migrate the VM - back onto this host, and so once we have completed the migration - of the disk, confirm/rollback migrate can work in the usual way. - - If there is a failure at any point, we need to rollback to the - position we were in before starting to migrate. In particular, - we need to delete and snapshot VDIs that may have been created, - and restore the VM back to its original name. - """ - - @step - def fake_step_to_show_snapshot_complete(): - pass - - @step - def transfer_immutable_vhds(root_vdi_uuids): - immutable_root_vdi_uuids = root_vdi_uuids[1:] - for vhd_num, vdi_uuid in enumerate(immutable_root_vdi_uuids, - start=1): - vm_utils.migrate_vhd(self._session, instance, vdi_uuid, dest, - sr_path, vhd_num) - LOG.debug("Migrated root base vhds", instance=instance) - - def _process_ephemeral_chain_recursive(ephemeral_chains, - active_vdi_uuids, - ephemeral_disk_index=0): - # This method is called several times, recursively. - # The first phase snapshots the ephemeral disks, and - # migrates the read only VHD files. - # The final call into this method calls - # power_down_and_transfer_leaf_vhds - # to turn off the VM and copy the rest of the VHDs. - number_of_chains = len(ephemeral_chains) - if number_of_chains == 0: - # If we get here, we have snapshotted and migrated - # all the ephemeral disks, so its time to power down - # and complete the migration of the diffs since the snapshot - LOG.debug("Migrated all base vhds.", instance=instance) - return power_down_and_transfer_leaf_vhds(active_root_vdi_uuid, - active_vdi_uuids) - - remaining_chains = [] - if number_of_chains > 1: - remaining_chains = ephemeral_chains[1:] - - userdevice = int(DEVICE_EPHEMERAL) + ephemeral_disk_index - - # Ensure we are not snapshotting a volume - if not volume_utils.is_booted_from_volume(self._session, vm_ref, - userdevice): - - # Here we take a snapshot of the ephemeral disk, - # and migrate all VHDs in the chain that are not being written - # to. Once that is completed, we call back into this method to - # either: - # - migrate any remaining ephemeral disks - # - or, if all disks are migrated, we power down and complete - # the migration but copying the diffs since all the snapshots - # were taken - - with vm_utils.snapshot_attached_here(self._session, instance, - vm_ref, label, str(userdevice)) as chain_vdi_uuids: - - # remember active vdi, we will migrate these later - vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely( - self._session, vm_ref, str(userdevice)) - active_uuid = vm_vdi_rec['uuid'] - active_vdi_uuids.append(active_uuid) - - # migrate inactive vhds - inactive_vdi_uuids = chain_vdi_uuids[1:] - ephemeral_disk_number = ephemeral_disk_index + 1 - for seq_num, vdi_uuid in enumerate(inactive_vdi_uuids, - start=1): - vm_utils.migrate_vhd(self._session, instance, vdi_uuid, - dest, sr_path, seq_num, - ephemeral_disk_number) - - LOG.debug("Read-only migrated for disk: %s", userdevice, - instance=instance) - - # This method is recursive, so we will increment our index - # and process again until the chains are empty. - ephemeral_disk_index = ephemeral_disk_index + 1 - return _process_ephemeral_chain_recursive(remaining_chains, - active_vdi_uuids, - ephemeral_disk_index) - - @step - def transfer_ephemeral_disks_then_all_leaf_vdis(): - ephemeral_chains = vm_utils.get_all_vdi_uuids_for_vm( - self._session, vm_ref, - min_userdevice=int(DEVICE_EPHEMERAL)) - - if ephemeral_chains: - ephemeral_chains = list(ephemeral_chains) - else: - ephemeral_chains = [] - - _process_ephemeral_chain_recursive(ephemeral_chains, []) - - @step - def power_down_and_transfer_leaf_vhds(root_vdi_uuid, - ephemeral_vdi_uuids=None): - self._resize_ensure_vm_is_shutdown(instance, vm_ref) - if root_vdi_uuid is not None: - vm_utils.migrate_vhd(self._session, instance, root_vdi_uuid, - dest, sr_path, 0) - if ephemeral_vdi_uuids: - for ephemeral_disk_number, ephemeral_vdi_uuid in enumerate( - ephemeral_vdi_uuids, start=1): - vm_utils.migrate_vhd(self._session, instance, - ephemeral_vdi_uuid, dest, - sr_path, 0, ephemeral_disk_number) - - self._apply_orig_vm_name_label(instance, vm_ref) - try: - label = "%s-snapshot" % instance['name'] - - if volume_utils.is_booted_from_volume(self._session, vm_ref): - LOG.debug('Not snapshotting root disk since it is a volume', - instance=instance) - # NOTE(alaski): This is done twice to match the number of - # defined steps. - fake_step_to_show_snapshot_complete() - fake_step_to_show_snapshot_complete() - # NOTE(alaski): This is set to None to avoid transferring the - # VHD in power_down_and_transfer_leaf_vhds. - active_root_vdi_uuid = None - # snapshot and transfer all ephemeral disks - # then power down and transfer any diffs since - # the snapshots were taken - transfer_ephemeral_disks_then_all_leaf_vdis() - return - - with vm_utils.snapshot_attached_here( - self._session, instance, vm_ref, label) as root_vdi_uuids: - # NOTE(johngarbutt) snapshot attached here will delete - # the snapshot if an error occurs - fake_step_to_show_snapshot_complete() - - # transfer all the non-active VHDs in the root disk chain - transfer_immutable_vhds(root_vdi_uuids) - vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely( - self._session, vm_ref) - active_root_vdi_uuid = vm_vdi_rec['uuid'] - - # snapshot and transfer all ephemeral disks - # then power down and transfer any diffs since - # the snapshots were taken - transfer_ephemeral_disks_then_all_leaf_vdis() - - except Exception as error: - LOG.exception( - "_migrate_disk_resizing_up failed; restoring orig vm due_to: " - "%s.", error, instance=instance) - try: - self._restore_orig_vm_and_cleanup_orphan(instance) - # TODO(johngarbutt) should also cleanup VHDs at destination - except Exception as rollback_error: - LOG.warning( - "_migrate_disk_resizing_up failed to rollback: %s", - rollback_error, instance=instance) - raise exception.InstanceFaultRollback(error) - - def _apply_orig_vm_name_label(self, instance, vm_ref): - # NOTE(sirp): in case we're resizing to the same host (for dev - # purposes), apply a suffix to name-label so the two VM records - # extant until a confirm_resize don't collide. - name_label = self._get_orig_vm_name_label(instance) - vm_utils.set_vm_name_label(self._session, vm_ref, name_label) - - def _ensure_not_resize_down_ephemeral(self, instance, flavor): - old_gb = instance.flavor.ephemeral_gb - new_gb = flavor.ephemeral_gb - - if old_gb > new_gb: - reason = _("Can't resize down ephemeral disks.") - raise exception.ResizeError(reason) - - def migrate_disk_and_power_off(self, context, instance, dest, - flavor, block_device_info): - """Copies a VHD from one host machine to another, possibly - resizing filesystem beforehand. - - :param instance: the instance that owns the VHD in question. - :param dest: the destination host machine. - :param flavor: flavor to resize to - """ - self._ensure_not_resize_down_ephemeral(instance, flavor) - - # 0. Zero out the progress to begin - self._update_instance_progress(context, instance, - step=0, - total_steps=RESIZE_TOTAL_STEPS) - - old_gb = instance.flavor.root_gb - new_gb = flavor.root_gb - resize_down = old_gb > new_gb - - if new_gb == 0 and old_gb != 0: - reason = _("Can't resize a disk to 0 GB.") - raise exception.ResizeError(reason=reason) - - vm_ref = self._get_vm_opaque_ref(instance) - sr_path = vm_utils.get_sr_path(self._session) - - if resize_down: - self._migrate_disk_resizing_down( - context, instance, dest, flavor, vm_ref, sr_path) - else: - self._migrate_disk_resizing_up( - context, instance, dest, vm_ref, sr_path) - - self._detach_block_devices_from_orig_vm(instance, block_device_info) - - # NOTE(sirp): disk_info isn't used by the xenapi driver, instead it - # uses a staging-area (/images/instance) and sequence-numbered - # VHDs to figure out how to reconstruct the VDI chain after syncing - disk_info = {} - return disk_info - - def _detach_block_devices_from_orig_vm(self, instance, block_device_info): - block_device_mapping = virt_driver.block_device_info_get_mapping( - block_device_info) - name_label = self._get_orig_vm_name_label(instance) - for vol in block_device_mapping: - connection_info = vol['connection_info'] - mount_device = vol['mount_device'].rpartition("/")[2] - self._volumeops.detach_volume(connection_info, name_label, - mount_device) - - def _resize_up_vdis(self, instance, vdis): - new_root_gb = instance.flavor.root_gb - root_vdi = vdis.get('root') - if new_root_gb and root_vdi: - if root_vdi.get('osvol', False): # Don't resize root volumes. - LOG.debug("Not resizing the root volume.", - instance=instance) - else: - vdi_ref = root_vdi['ref'] - vm_utils.update_vdi_virtual_size(self._session, instance, - vdi_ref, new_root_gb) - - ephemeral_vdis = vdis.get('ephemerals') - if not ephemeral_vdis: - # NOTE(johngarbutt) no existing (migrated) ephemeral disks - # to resize, so nothing more to do here. - return - - total_ephemeral_gb = instance.flavor.ephemeral_gb - if total_ephemeral_gb: - sizes = vm_utils.get_ephemeral_disk_sizes(total_ephemeral_gb) - # resize existing (migrated) ephemeral disks, - # and add any extra disks if required due to a - # larger total_ephemeral_gb (resize down is not supported). - for userdevice, new_size in enumerate(sizes, - start=int(DEVICE_EPHEMERAL)): - vdi = ephemeral_vdis.get(str(userdevice)) - if vdi: - vdi_ref = vdi['ref'] - vm_utils.update_vdi_virtual_size(self._session, instance, - vdi_ref, new_size) - else: - LOG.debug("Generating new ephemeral vdi %d during resize", - userdevice, instance=instance) - # NOTE(johngarbutt) we generate but don't attach - # the new disk to make up any additional ephemeral space - vdi_ref = vm_utils.generate_single_ephemeral( - self._session, instance, None, userdevice, new_size) - vdis[str(userdevice)] = {'ref': vdi_ref, 'generated': True} - - def reboot(self, instance, reboot_type, bad_volumes_callback=None): - """Reboot VM instance.""" - # Note (salvatore-orlando): security group rules are not re-enforced - # upon reboot, since this action on the XenAPI drivers does not - # remove existing filters - vm_ref = self._get_vm_opaque_ref(instance, check_rescue=True) - - try: - if reboot_type == "HARD": - self._session.call_xenapi('VM.hard_reboot', vm_ref) - else: - self._session.call_xenapi('VM.clean_reboot', vm_ref) - except self._session.XenAPI.Failure as exc: - details = exc.details - if (details[0] == 'VM_BAD_POWER_STATE' and - details[-1] == 'halted'): - LOG.info("Starting halted instance found during reboot", - instance=instance) - self._start(instance, vm_ref=vm_ref, - bad_volumes_callback=bad_volumes_callback) - return - elif details[0] == 'SR_BACKEND_FAILURE_46': - LOG.warning("Reboot failed due to bad volumes, detaching " - "bad volumes and starting halted instance", - instance=instance) - self._start(instance, vm_ref=vm_ref, - bad_volumes_callback=bad_volumes_callback) - return - else: - raise - - def set_admin_password(self, instance, new_pass): - """Set the root/admin password on the VM instance.""" - if self.agent_enabled(instance): - vm_ref = self._get_vm_opaque_ref(instance) - agent = self._get_agent(instance, vm_ref) - agent.set_admin_password(new_pass) - else: - raise NotImplementedError() - - @staticmethod - def _sanitize_xenstore_key(key): - """Xenstore only allows the following characters as keys: - - ABCDEFGHIJKLMNOPQRSTUVWXYZ - abcdefghijklmnopqrstuvwxyz - 0123456789-/_@ - - So convert the others to _ - - Also convert / to _, because that is somewhat like a path - separator. - """ - allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz" - "0123456789-_@") - return ''.join([x in allowed_chars and x or '_' for x in key]) - - def _inject_instance_metadata(self, instance, vm_ref): - """Inject instance metadata into xenstore.""" - @utils.synchronized('xenstore-' + instance['uuid']) - def store_meta(topdir, data_dict): - for key, value in data_dict.items(): - key = self._sanitize_xenstore_key(key) - value = value or '' - self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key), - jsonutils.dumps(value)) - - # Store user metadata - store_meta('vm-data/user-metadata', utils.instance_meta(instance)) - - def _inject_auto_disk_config(self, instance, vm_ref): - """Inject instance's auto_disk_config attribute into xenstore.""" - @utils.synchronized('xenstore-' + instance['uuid']) - def store_auto_disk_config(key, value): - value = value and True or False - self._add_to_param_xenstore(vm_ref, key, str(value)) - - store_auto_disk_config('vm-data/auto-disk-config', - instance['auto_disk_config']) - - def change_instance_metadata(self, instance, diff): - """Apply changes to instance metadata to xenstore.""" - try: - vm_ref = self._get_vm_opaque_ref(instance) - except exception.NotFound: - # NOTE(johngarbutt) race conditions mean we can still get here - # during operations where the VM is not present, like resize. - # Skip the update when not possible, as the updated metadata will - # get added when the VM is being booted up at the end of the - # resize or rebuild. - LOG.warning("Unable to update metadata, VM not found.", - instance=instance, exc_info=True) - return - - def process_change(location, change): - if change[0] == '-': - self._remove_from_param_xenstore(vm_ref, location) - try: - self._delete_from_xenstore(instance, location, - vm_ref=vm_ref) - except exception.InstanceNotFound: - # If the VM is not running then no need to update - # the live xenstore - the param xenstore will be - # used next time the VM is booted - pass - elif change[0] == '+': - self._add_to_param_xenstore(vm_ref, location, - jsonutils.dumps(change[1])) - try: - self._write_to_xenstore(instance, location, change[1], - vm_ref=vm_ref) - except exception.InstanceNotFound: - # If the VM is not running then no need to update - # the live xenstore - pass - - @utils.synchronized('xenstore-' + instance['uuid']) - def update_meta(): - for key, change in diff.items(): - key = self._sanitize_xenstore_key(key) - location = 'vm-data/user-metadata/%s' % key - process_change(location, change) - update_meta() - - def _find_vdi_refs(self, vm_ref, exclude_volumes=False): - """Find and return the root and ephemeral vdi refs for a VM.""" - if not vm_ref: - return {} - - vdi_refs = {} - for vbd_ref in self._session.call_xenapi("VM.get_VBDs", vm_ref): - vbd = self._session.call_xenapi("VBD.get_record", vbd_ref) - if not exclude_volumes or 'osvol' not in vbd['other_config']: - vdi_refs[vbd['userdevice']] = vbd['VDI'] - - return vdi_refs - - def _destroy_vdis(self, instance, vm_ref): - """Destroys all VDIs associated with a VM.""" - LOG.debug("Destroying VDIs", instance=instance) - - vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref) - if not vdi_refs: - return - for vdi_ref in vdi_refs: - try: - vm_utils.destroy_vdi(self._session, vdi_ref) - except exception.StorageError as exc: - LOG.error(exc) - - def _destroy_kernel_ramdisk(self, instance, vm_ref): - """Three situations can occur: - - 1. We have neither a ramdisk nor a kernel, in which case we are a - RAW image and can omit this step - - 2. We have one or the other, in which case, we should flag as an - error - - 3. We have both, in which case we safely remove both the kernel - and the ramdisk. - - """ - instance_uuid = instance['uuid'] - if not instance['kernel_id'] and not instance['ramdisk_id']: - # 1. No kernel or ramdisk - LOG.debug("Using RAW or VHD, skipping kernel and ramdisk " - "deletion", instance=instance) - return - - if not (instance['kernel_id'] and instance['ramdisk_id']): - # 2. We only have kernel xor ramdisk - raise exception.InstanceUnacceptable(instance_id=instance_uuid, - reason=_("instance has a kernel or ramdisk but not both")) - - # 3. We have both kernel and ramdisk - (kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session, - vm_ref) - if kernel or ramdisk: - vm_utils.destroy_kernel_ramdisk(self._session, instance, - kernel, ramdisk) - LOG.debug("kernel/ramdisk files removed", instance=instance) - - def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref): - """Destroy a rescue instance.""" - # Shutdown Rescue VM - state = vm_utils.get_power_state(self._session, rescue_vm_ref) - if state != power_state.SHUTDOWN: - self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref) - - # Destroy Rescue VDIs - vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref) - - # Don't destroy any VDIs belonging to the original VM - orig_vdi_refs = self._find_vdi_refs(original_vm_ref) - vdi_refs = set(vdi_refs) - set(orig_vdi_refs.values()) - - vm_utils.safe_destroy_vdis(self._session, vdi_refs) - - # Destroy Rescue VM - self._session.call_xenapi("VM.destroy", rescue_vm_ref) - - def destroy(self, instance, network_info, block_device_info=None, - destroy_disks=True): - """Destroy VM instance. - - This is the method exposed by xenapi_conn.destroy(). The rest of the - destroy_* methods are internal. - - """ - LOG.info("Destroying VM", instance=instance) - - # We don't use _get_vm_opaque_ref because the instance may - # truly not exist because of a failure during build. A valid - # vm_ref is checked correctly where necessary. - vm_ref = vm_utils.lookup(self._session, instance['name']) - - rescue_vm_ref = vm_utils.lookup(self._session, - "%s-rescue" % instance['name']) - if rescue_vm_ref: - self._destroy_rescue_instance(rescue_vm_ref, vm_ref) - - # NOTE(sirp): information about which volumes should be detached is - # determined by the VBD.other_config['osvol'] attribute - # NOTE(alaski): `block_device_info` is used to efficiently determine if - # there's a volume attached, or which volumes to cleanup if there is - # no VM present. - return self._destroy(instance, vm_ref, network_info=network_info, - destroy_disks=destroy_disks, - block_device_info=block_device_info) - - def _destroy(self, instance, vm_ref, network_info=None, - destroy_disks=True, block_device_info=None): - """Destroys VM instance by performing: - - 1. A shutdown - 2. Destroying associated VDIs. - 3. Destroying kernel and ramdisk files (if necessary). - 4. Destroying that actual VM record. - - """ - if vm_ref is None: - LOG.warning("VM is not present, skipping destroy...", - instance=instance) - # NOTE(alaski): There should not be a block device mapping here, - # but if there is it very likely means there was an error cleaning - # it up previously and there is now an orphaned sr/pbd. This will - # prevent both volume and instance deletes from completing. - bdms = block_device_info['block_device_mapping'] or [] - if not bdms: - return - for bdm in bdms: - volume_id = bdm['connection_info']['data']['volume_id'] - # Note(bobba): Check for the old-style SR first; if this - # doesn't find the SR, also look for the new-style from - # parse_sr_info - sr_uuid = 'FA15E-D15C-%s' % volume_id - sr_ref = None - try: - sr_ref = volume_utils.find_sr_by_uuid(self._session, - sr_uuid) - if not sr_ref: - connection_data = bdm['connection_info']['data'] - (sr_uuid, unused, unused) = volume_utils.parse_sr_info( - connection_data) - sr_ref = volume_utils.find_sr_by_uuid(self._session, - sr_uuid) - except Exception: - LOG.exception('Failed to find an SR for volume %s', - volume_id, instance=instance) - - try: - if sr_ref: - volume_utils.forget_sr(self._session, sr_ref) - else: - LOG.error('Volume %s is associated with the ' - 'instance but no SR was found for it', - volume_id, instance=instance) - except Exception: - LOG.exception('Failed to forget the SR for volume %s', - volume_id, instance=instance) - return - - # NOTE(alaski): Attempt clean shutdown first if there's an attached - # volume to reduce the risk of corruption. - if block_device_info and block_device_info['block_device_mapping']: - if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref): - LOG.debug("Clean shutdown did not complete successfully, " - "trying hard shutdown.", instance=instance) - vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) - else: - vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) - - if destroy_disks: - self._volumeops.detach_all(vm_ref) - self._destroy_vdis(instance, vm_ref) - self._destroy_kernel_ramdisk(instance, vm_ref) - - self.unplug_vifs(instance, network_info, vm_ref) - vm_utils.destroy_vm(self._session, instance, vm_ref) - - def pause(self, instance): - """Pause VM instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - self._session.call_xenapi('VM.pause', vm_ref) - - def unpause(self, instance): - """Unpause VM instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - self._session.call_xenapi('VM.unpause', vm_ref) - - def suspend(self, instance): - """Suspend the specified instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - self._acquire_bootlock(vm_ref) - self._session.call_xenapi('VM.suspend', vm_ref) - - def resume(self, instance): - """Resume the specified instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - self._release_bootlock(vm_ref) - self._session.call_xenapi('VM.resume', vm_ref, False, True) - - def rescue(self, context, instance, network_info, image_meta, - rescue_password): - """Rescue the specified instance. - - - shutdown the instance VM. - - set 'bootlock' to prevent the instance from starting in rescue. - - spawn a rescue VM (the vm name-label will be instance-N-rescue). - - """ - rescue_name_label = '%s-rescue' % instance.name - rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label) - if rescue_vm_ref: - raise RuntimeError(_("Instance is already in Rescue Mode: %s") - % instance.name) - - vm_ref = self._get_vm_opaque_ref(instance) - vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) - self._acquire_bootlock(vm_ref) - self.spawn(context, instance, image_meta, [], rescue_password, - network_info, name_label=rescue_name_label, rescue=True) - - def set_bootable(self, instance, is_bootable): - """Set the ability to power on/off an instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - if is_bootable: - self._release_bootlock(vm_ref) - else: - self._acquire_bootlock(vm_ref) - - def unrescue(self, instance): - """Unrescue the specified instance. - - - unplug the instance VM's disk from the rescue VM. - - teardown the rescue VM. - - release the bootlock to allow the instance VM to start. - - """ - rescue_vm_ref = vm_utils.lookup(self._session, - "%s-rescue" % instance.name) - if not rescue_vm_ref: - raise exception.InstanceNotInRescueMode( - instance_id=instance.uuid) - - original_vm_ref = self._get_vm_opaque_ref(instance) - - self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref) - self._release_bootlock(original_vm_ref) - self._start(instance, original_vm_ref) - - def soft_delete(self, instance): - """Soft delete the specified instance.""" - try: - vm_ref = self._get_vm_opaque_ref(instance) - except exception.NotFound: - LOG.warning("VM is not present, skipping soft delete...", - instance=instance) - else: - vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) - self._acquire_bootlock(vm_ref) - - def restore(self, instance): - """Restore the specified instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - self._release_bootlock(vm_ref) - self._start(instance, vm_ref) - - def power_off(self, instance): - """Power off the specified instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) - - def power_on(self, instance): - """Power on the specified instance.""" - vm_ref = self._get_vm_opaque_ref(instance) - self._start(instance, vm_ref) - - def _cancel_stale_tasks(self, timeout, task): - """Cancel the given tasks that are older than the given timeout.""" - task_refs = self._session.call_xenapi("task.get_by_name_label", task) - for task_ref in task_refs: - task_rec = self._session.call_xenapi("task.get_record", task_ref) - task_created = timeutils.parse_strtime(task_rec["created"].value, - "%Y%m%dT%H:%M:%SZ") - - if timeutils.is_older_than(task_created, timeout): - self._session.call_xenapi("task.cancel", task_ref) - - def poll_rebooting_instances(self, timeout, instances): - """Look for rebooting instances that can be expired. - - - issue a "hard" reboot to any instance that has been stuck in a - reboot state for >= the given timeout - """ - # NOTE(jk0): All existing clean_reboot tasks must be canceled before - # we can kick off the hard_reboot tasks. - self._cancel_stale_tasks(timeout, 'VM.clean_reboot') - - ctxt = nova_context.get_admin_context() - - instances_info = dict(instance_count=len(instances), - timeout=timeout) - - if instances_info["instance_count"] > 0: - LOG.info("Found %(instance_count)d hung reboots " - "older than %(timeout)d seconds", instances_info) - - for instance in instances: - LOG.info("Automatically hard rebooting", instance=instance) - self.compute_api.reboot(ctxt, instance, "HARD") - - def get_info(self, instance, vm_ref=None): - """Return data about VM instance.""" - vm_ref = vm_ref or self._get_vm_opaque_ref(instance) - return vm_utils.compile_info(self._session, vm_ref) - - def get_diagnostics(self, instance): - """Return data about VM diagnostics.""" - vm_ref = self._get_vm_opaque_ref(instance) - vm_rec = self._session.call_xenapi("VM.get_record", vm_ref) - return vm_utils.compile_diagnostics(vm_rec) - - def get_instance_diagnostics(self, instance): - """Return data about VM diagnostics using the common API.""" - vm_ref = self._get_vm_opaque_ref(instance) - return vm_utils.compile_instance_diagnostics(self._session, instance, - vm_ref) - - def _get_vif_device_map(self, vm_rec): - vif_map = {} - for vif in [self._session.call_xenapi("VIF.get_record", vrec) - for vrec in vm_rec['VIFs']]: - vif_map[vif['device']] = vif['MAC'] - return vif_map - - def get_all_bw_counters(self): - """Return running bandwidth counter for each interface on each - running VM. - """ - counters = vm_utils.fetch_bandwidth(self._session) - bw = {} - for vm_ref, vm_rec in vm_utils.list_vms(self._session): - vif_map = self._get_vif_device_map(vm_rec) - name = vm_rec['name_label'] - if 'nova_uuid' not in vm_rec['other_config']: - continue - dom = vm_rec.get('domid') - if dom is None or dom not in counters: - continue - vifs_bw = bw.setdefault(name, {}) - for vif_num, vif_data in counters[dom].items(): - mac = vif_map[vif_num] - vif_data['mac_address'] = mac - vifs_bw[mac] = vif_data - return bw - - def get_console_output(self, instance): - """Return last few lines of instance console.""" - dom_id = self._get_last_dom_id(instance, check_rescue=True) - - try: - raw_console_data = vm_management.get_console_log( - self._session, dom_id) - except self._session.XenAPI.Failure: - LOG.exception("Guest does not have a console available") - raise exception.ConsoleNotAvailable() - - return zlib.decompress(base64.b64decode(raw_console_data)) - - def get_vnc_console(self, instance): - """Return connection info for a vnc console.""" - if instance.vm_state == vm_states.RESCUED: - name = '%s-rescue' % instance.name - vm_ref = vm_utils.lookup(self._session, name) - if vm_ref is None: - # The rescue instance might not be ready at this point. - raise exception.InstanceNotReady(instance_id=instance.uuid) - else: - vm_ref = vm_utils.lookup(self._session, instance.name) - if vm_ref is None: - # The compute manager expects InstanceNotFound for this case. - raise exception.InstanceNotFound(instance_id=instance.uuid) - - session_id = self._session.get_session_id() - path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id) - - # NOTE: XS5.6sp2+ use http over port 80 for xenapi com - return ctype.ConsoleVNC( - host=CONF.vnc.server_proxyclient_address, - port=80, - internal_access_path=path) - - def _vif_xenstore_data(self, vif): - """convert a network info vif to injectable instance data.""" - - def get_ip(ip): - if not ip: - return None - return ip['address'] - - def fixed_ip_dict(ip, subnet): - if ip['version'] == 4: - netmask = str(subnet.as_netaddr().netmask) - else: - netmask = subnet.as_netaddr()._prefixlen - - return {'ip': ip['address'], - 'enabled': '1', - 'netmask': netmask, - 'gateway': get_ip(subnet['gateway'])} - - def convert_route(route): - return {'route': str(netaddr.IPNetwork(route['cidr']).network), - 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask), - 'gateway': get_ip(route['gateway'])} - - network = vif['network'] - v4_subnets = [subnet for subnet in network['subnets'] - if subnet['version'] == 4] - v6_subnets = [subnet for subnet in network['subnets'] - if subnet['version'] == 6] - - # NOTE(tr3buchet): routes and DNS come from all subnets - routes = [convert_route(route) for subnet in network['subnets'] - for route in subnet['routes']] - dns = [get_ip(ip) for subnet in network['subnets'] - for ip in subnet['dns']] - - info_dict = {'label': network['label'], - 'mac': vif['address']} - - if v4_subnets: - # NOTE(tr3buchet): gateway and broadcast from first subnet - # primary IP will be from first subnet - # subnets are generally unordered :( - info_dict['gateway'] = get_ip(v4_subnets[0]['gateway']) - info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast) - info_dict['ips'] = [fixed_ip_dict(ip, subnet) - for subnet in v4_subnets - for ip in subnet['ips']] - if v6_subnets: - # NOTE(tr3buchet): gateway from first subnet - # primary IP will be from first subnet - # subnets are generally unordered :( - info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway']) - info_dict['ip6s'] = [fixed_ip_dict(ip, subnet) - for subnet in v6_subnets - for ip in subnet['ips']] - if routes: - info_dict['routes'] = routes - - if dns: - info_dict['dns'] = list(set(dns)) - - return info_dict - - def inject_network_info(self, instance, network_info, vm_ref=None): - """Generate the network info and make calls to place it into the - xenstore and the xenstore param list. - vm_ref can be passed in because it will sometimes be different than - what vm_utils.lookup(session, instance['name']) will find (ex: rescue) - """ - vm_ref = vm_ref or self._get_vm_opaque_ref(instance) - LOG.debug("Injecting network info to xenstore", instance=instance) - - @utils.synchronized('xenstore-' + instance['uuid']) - def update_nwinfo(): - for vif in network_info: - xs_data = self._vif_xenstore_data(vif) - location = ('vm-data/networking/%s' % - vif['address'].replace(':', '')) - self._add_to_param_xenstore(vm_ref, - location, - jsonutils.dumps(xs_data)) - try: - self._write_to_xenstore(instance, location, xs_data, - vm_ref=vm_ref) - except exception.InstanceNotFound: - # If the VM is not running, no need to update the - # live xenstore - pass - update_nwinfo() - - def _create_vifs(self, instance, vm_ref, network_info): - """Creates vifs for an instance.""" - - LOG.debug("Creating vifs", instance=instance) - vif_refs = [] - - # this function raises if vm_ref is not a vm_opaque_ref - self._session.call_xenapi("VM.get_domid", vm_ref) - - for device, vif in enumerate(network_info): - LOG.debug('Create VIF %s', vif, instance=instance) - vif_ref = self.vif_driver.plug(instance, vif, - vm_ref=vm_ref, device=device) - vif_refs.append(vif_ref) - - LOG.debug('Created the vif_refs: %(vifs)s for VM name: %(name)s', - {'vifs': vif_refs, 'name': instance['name']}, - instance=instance) - - def plug_vifs(self, instance, network_info): - """Set up VIF networking on the host.""" - for device, vif in enumerate(network_info): - self.vif_driver.plug(instance, vif, device=device) - - def unplug_vifs(self, instance, network_info, vm_ref): - if network_info: - for vif in network_info: - self.vif_driver.unplug(instance, vif, vm_ref) - - def reset_network(self, instance, rescue=False): - """Calls resetnetwork method in agent.""" - if self.agent_enabled(instance): - vm_ref = self._get_vm_opaque_ref(instance) - agent = self._get_agent(instance, vm_ref) - self._inject_hostname(instance, vm_ref, rescue) - agent.resetnetwork() - self._remove_hostname(instance, vm_ref) - else: - raise NotImplementedError() - - def _inject_hostname(self, instance, vm_ref, rescue): - """Inject the hostname of the instance into the xenstore.""" - hostname = instance['hostname'] - if rescue: - hostname = 'RESCUE-%s' % hostname - - if instance['os_type'] == "windows": - # NOTE(jk0): Windows host names can only be <= 15 chars. - hostname = hostname[:15] - - LOG.debug("Injecting hostname (%s) into xenstore", hostname, - instance=instance) - - @utils.synchronized('xenstore-' + instance['uuid']) - def update_hostname(): - self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname) - - update_hostname() - - def _remove_hostname(self, instance, vm_ref): - LOG.debug("Removing hostname from xenstore", instance=instance) - - @utils.synchronized('xenstore-' + instance['uuid']) - def update_hostname(): - self._remove_from_param_xenstore(vm_ref, 'vm-data/hostname') - - update_hostname() - - def _write_to_xenstore(self, instance, path, value, vm_ref=None): - """Writes the passed value to the xenstore record for the given VM - at the specified location. A XenAPIPlugin.PluginError will be raised - if any error is encountered in the write process. - """ - dom_id = self._get_dom_id(instance, vm_ref) - try: - return host_xenstore.write_record(self._session, dom_id, path, - jsonutils.dumps(value)) - except self._session.XenAPI.Failure as e: - return self._process_plugin_exception(e, 'write_record', instance) - - def _read_from_xenstore(self, instance, path, ignore_missing_path=True, - vm_ref=None): - """Reads the passed location from xenstore for the given vm. Missing - paths are ignored, unless explicitly stated not to, which causes - xenstore to raise an exception. A XenAPIPlugin.PluginError is raised - if any error is encountered in the read process. - """ - # NOTE(sulo): These need to be string for valid field type - # for xapi. - dom_id = self._get_dom_id(instance, vm_ref) - try: - return host_xenstore.read_record( - self._session, dom_id, path, - ignore_missing_path=ignore_missing_path) - except XenAPI.Failure as e: - return self._process_plugin_exception(e, 'read_record', instance) - - def _delete_from_xenstore(self, instance, path, vm_ref=None): - """Deletes the value from the xenstore record for the given VM at - the specified location. A XenAPIPlugin.PluginError will be - raised if any error is encountered in the delete process. - """ - dom_id = self._get_dom_id(instance, vm_ref) - try: - return host_xenstore.delete_record(self._session, dom_id, path) - except XenAPI.Failure as e: - return self._process_plugin_exception(e, 'delete_record', instance) - - def _process_plugin_exception(self, plugin_exception, method, instance): - err_msg = plugin_exception.details[-1].splitlines()[-1] - if 'TIMEOUT:' in err_msg: - LOG.error('TIMEOUT: The call to %s timed out', - method, instance=instance) - return {'returncode': 'timeout', 'message': err_msg} - elif 'NOT IMPLEMENTED:' in err_msg: - LOG.error('NOT IMPLEMENTED: The call to %s is not supported' - ' by the agent.', method, instance=instance) - return {'returncode': 'notimplemented', 'message': err_msg} - else: - LOG.error('The call to %(method)s returned an error: %(e)s.', - {'method': method, 'e': plugin_exception}, - instance=instance) - return {'returncode': 'error', 'message': err_msg} - - def _get_dom_id(self, instance, vm_ref=None, check_rescue=False): - vm_ref = vm_ref or self._get_vm_opaque_ref(instance, check_rescue) - domid = self._session.call_xenapi("VM.get_domid", vm_ref) - if not domid or domid == "-1": - raise exception.InstanceNotFound(instance_id=instance['name']) - return domid - - def _get_last_dom_id(self, instance, vm_ref=None, check_rescue=False): - vm_ref = vm_ref or self._get_vm_opaque_ref(instance, check_rescue) - other_config = self._session.call_xenapi("VM.get_other_config", vm_ref) - if 'last_dom_id' not in other_config: - raise exception.InstanceNotFound(instance_id=instance['name']) - return other_config['last_dom_id'] - - def _add_to_param_xenstore(self, vm_ref, key, val): - """Takes a key/value pair and adds it to the xenstore parameter - record for the given vm instance. If the key exists in xenstore, - it is overwritten - """ - self._remove_from_param_xenstore(vm_ref, key) - self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val) - - def _remove_from_param_xenstore(self, vm_ref, key): - """Takes a single key and removes it from the xenstore parameter - record data for the given VM. - If the key doesn't exist, the request is ignored. - """ - self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key) - - def _get_host_opaque_ref(self, hostname): - host_ref_set = self._session.host.get_by_name_label(hostname) - # If xenapi can't get host ref by the name label, it means the - # destination host is not in the same pool with the source host. - if host_ref_set is None or host_ref_set == []: - return None - # It should be only one host with the name, or there would be - # a confuse on which host is required - if len(host_ref_set) > 1: - reason = _('Multiple hosts have the same hostname: %s.') % hostname - raise exception.MigrationPreCheckError(reason=reason) - return host_ref_set[0] - - def _get_host_ref_no_aggr(self): - # Pull the current host ref from Dom0's resident_on field. This - # allows us a simple way to pull the accurate host without aggregates - dom0_rec = self._session.call_xenapi("VM.get_all_records_where", - 'field "domid"="0"') - dom0_ref = list(dom0_rec.keys())[0] - - return dom0_rec[dom0_ref]['resident_on'] - - def _get_host_software_versions(self): - # Get software versions from host.get_record. - # Works around aggregate checking as not all places use aggregates. - host_ref = self._get_host_ref_no_aggr() - host_rec = self._session.call_xenapi("host.get_record", host_ref) - return host_rec['software_version'] - - def _get_network_ref(self): - # Get the network to for migrate. - # This is the one associated with the pif marked management. From cli: - # uuid=`xe pif-list --minimal management=true` - # xe pif-param-get param-name=network-uuid uuid=$uuid - expr = ('field "management" = "true" and field "host" = "%s"' % - self._session.host_ref) - pifs = self._session.call_xenapi('PIF.get_all_records_where', - expr) - if len(pifs) != 1: - msg = _('No suitable network for migrate') - raise exception.MigrationPreCheckError(reason=msg) - - pifkey = list(pifs.keys())[0] - if not (netutils.is_valid_ipv4(pifs[pifkey]['IP']) or - netutils.is_valid_ipv6(pifs[pifkey]['IPv6'])): - msg = (_('PIF %s does not contain IP address') - % pifs[pifkey]['uuid']) - raise exception.MigrationPreCheckError(reason=msg) - - nwref = pifs[list(pifs.keys())[0]]['network'] - return nwref - - def _migrate_receive(self, ctxt): - destref = self._session.host_ref - # Get the network to for migrate. - nwref = self._get_network_ref() - try: - options = {} - migrate_data = self._session.call_xenapi("host.migrate_receive", - destref, - nwref, - options) - except self._session.XenAPI.Failure: - LOG.exception('Migrate Receive failed') - msg = _('Migrate Receive failed') - raise exception.MigrationPreCheckError(reason=msg) - return migrate_data - - def _get_iscsi_srs(self, ctxt, instance_ref): - vm_ref = self._get_vm_opaque_ref(instance_ref) - vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) - - iscsi_srs = [] - - for vbd_ref in vbd_refs: - vdi_ref = self._session.call_xenapi("VBD.get_VDI", vbd_ref) - # Check if it's on an iSCSI SR - sr_ref = self._session.call_xenapi("VDI.get_SR", vdi_ref) - if self._session.call_xenapi("SR.get_type", sr_ref) == 'iscsi': - iscsi_srs.append(sr_ref) - - return iscsi_srs - - def check_can_live_migrate_destination(self, ctxt, instance_ref, - block_migration=False, - disk_over_commit=False): - """Check if it is possible to execute live migration. - - :param ctxt: security context - :param instance_ref: nova.db.sqlalchemy.models.Instance object - :param block_migration: if true, prepare for block migration - if None, calculate it from driver - :param disk_over_commit: if true, allow disk over commit - - """ - dest_check_data = objects.XenapiLiveMigrateData() - - src = instance_ref.host - - def _host_in_this_pool(host_name_label): - host_ref = self._get_host_opaque_ref(host_name_label) - if not host_ref: - return False - return vm_utils.host_in_this_pool(self._session, host_ref) - - # Check if migrate happen in a xapi pool - pooled_migrate = _host_in_this_pool(src) - # Notes(eliqiao): if block_migration is None, we calculate it - # by checking if src and dest node are in same xapi pool - if block_migration is None: - if not pooled_migrate: - block_migration = True - else: - sr_ref = vm_utils.safe_find_sr(self._session) - sr_rec = self._session.get_rec('SR', sr_ref) - block_migration = not sr_rec["shared"] - - if block_migration: - dest_check_data.block_migration = True - dest_check_data.migrate_send_data = self._migrate_receive(ctxt) - dest_check_data.destination_sr_ref = vm_utils.safe_find_sr( - self._session) - else: - dest_check_data.block_migration = False - # TODO(eilqiao): There is still one case that block_migration is - # passed from admin user, so we need this check until - # block_migration flag is removed from API - if not pooled_migrate: - reason = _("Destination host is not in the same shared " - "storage pool as source host %s.") % src - raise exception.MigrationPreCheckError(reason=reason) - # TODO(johngarbutt) we currently assume - # instance is on a SR shared with other destination - # block migration work will be able to resolve this - - # Set the default net_ref for use in generate_vif_mapping - net_ref = self._get_network_ref() - dest_check_data.vif_uuid_map = {'': net_ref} - return dest_check_data - - def check_can_live_migrate_source(self, ctxt, instance_ref, - dest_check_data): - """Check if it's possible to execute live migration on the source side. - - :param ctxt: security context - :param instance_ref: nova.db.sqlalchemy.models.Instance object - :param dest_check_data: data returned by the check on the - destination, includes block_migration flag - - """ - if len(self._get_iscsi_srs(ctxt, instance_ref)) > 0: - # XAPI must support the relaxed SR check for live migrating with - # iSCSI VBDs - if not self._session.is_xsm_sr_check_relaxed(): - raise exception.MigrationError(reason=_('XAPI supporting ' - 'relax-xsm-sr-check=true required')) - - # TODO(bkaminski): This entire block needs to be removed from this - # if statement. Live Migration should assert_can_migrate either way. - if ('block_migration' in dest_check_data and - dest_check_data.block_migration): - vm_ref = self._get_vm_opaque_ref(instance_ref) - host_sw = self._get_host_software_versions() - host_pfv = host_sw['platform_version'] - try: - self._call_live_migrate_command( - "VM.assert_can_migrate", vm_ref, dest_check_data) - except self._session.XenAPI.Failure as exc: - reason = exc.details[0] - # XCP>=2.1 Will error on this assert call if iSCSI are attached - # as the SR has not been configured on the hypervisor at this - # point in the migration. We swallow this exception until a - # more intensive refactor can be done to correct this. - if ("VDI_NOT_IN_MAP" in reason and - host_sw['platform_name'] == "XCP" and - versionutils.is_compatible("2.1.0", host_pfv)): - LOG.debug("Skipping exception for XCP>=2.1.0, %s", reason) - return dest_check_data - msg = _('assert_can_migrate failed because: %s') % reason - LOG.debug(msg, exc_info=True) - raise exception.MigrationPreCheckError(reason=msg) - return dest_check_data - - def _ensure_pv_driver_info_for_live_migration(self, instance, vm_ref): - """Checks if pv drivers are present for this instance. If it is - present but not reported, try to fake the info for live-migration. - """ - if self._pv_driver_version_reported(instance, vm_ref): - # Since driver version is reported we do not need to do anything - return - - if self._pv_device_reported(instance, vm_ref): - LOG.debug("PV device present but missing pv driver info. " - "Attempting to insert missing info in xenstore.", - instance=instance) - self._write_fake_pv_version(instance, vm_ref) - else: - LOG.debug("Could not determine the presence of pv device. " - "Skipping inserting pv driver info.", - instance=instance) - - def _pv_driver_version_reported(self, instance, vm_ref): - xs_path = "attr/PVAddons/MajorVersion" - major_version = self._read_from_xenstore(instance, xs_path, - vm_ref=vm_ref) - LOG.debug("Major Version: %s reported.", major_version, - instance=instance) - # xenstore reports back string only, if the path is missing we get - # None as string, since missing paths are ignored. - if major_version == '"None"': - return False - else: - return True - - def _pv_device_reported(self, instance, vm_ref): - vm_rec = self._session.VM.get_record(vm_ref) - vif_list = [self._session.call_xenapi("VIF.get_record", vrec) - for vrec in vm_rec['VIFs']] - net_devices = [vif['device'] for vif in vif_list] - # NOTE(sulo): We infer the presence of pv driver - # by the presence of a pv network device. If xenstore reports - # device status as connected (status=4) we take that as the presence - # of pv driver. Any other status will likely cause migration to fail. - for device in net_devices: - xs_path = "device/vif/%s/state" % device - ret = self._read_from_xenstore(instance, xs_path, vm_ref=vm_ref) - LOG.debug("PV Device vif.%(vif_ref)s reporting state %(ret)s", - {'vif_ref': device, 'ret': ret}, instance=instance) - if strutils.is_int_like(ret) and int(ret) == 4: - return True - - return False - - def _write_fake_pv_version(self, instance, vm_ref): - version = self._session.product_version - LOG.debug("Writing pvtools version major: %(major)s minor: %(minor)s " - "micro: %(micro)s", {'major': version[0], - 'minor': version[1], - 'micro': version[2]}, - instance=instance) - major_ver = "attr/PVAddons/MajorVersion" - self._write_to_xenstore(instance, major_ver, version[0], vm_ref=vm_ref) - minor_ver = "attr/PVAddons/MinorVersion" - self._write_to_xenstore(instance, minor_ver, version[1], vm_ref=vm_ref) - micro_ver = "attr/PVAddons/MicroVersion" - self._write_to_xenstore(instance, micro_ver, version[2], vm_ref=vm_ref) - xs_path = "data/updated" - self._write_to_xenstore(instance, xs_path, "1", vm_ref=vm_ref) - - def _generate_vdi_map(self, destination_sr_ref, vm_ref, sr_ref=None): - """generate a vdi_map for _call_live_migrate_command.""" - if sr_ref is None: - sr_ref = vm_utils.safe_find_sr(self._session) - vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session, - vm_ref, sr_ref) - return {vdi: destination_sr_ref for vdi in vm_vdis} - - def _call_live_migrate_command(self, command_name, vm_ref, migrate_data): - """unpack xapi specific parameters, and call a live migrate command.""" - # NOTE(coreywright): though a nullable object field, migrate_send_data - # is required for XenAPI live migration commands - migrate_send_data = None - if 'migrate_send_data' in migrate_data: - migrate_send_data = migrate_data.migrate_send_data - if not migrate_send_data: - raise exception.InvalidParameterValue( - 'XenAPI requires destination migration data') - # NOTE(coreywright): convert to xmlrpc marshallable type - migrate_send_data = dict(migrate_send_data) - - destination_sr_ref = migrate_data.destination_sr_ref - vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref) - - # Add destination SR refs for all of the VDIs that we created - # as part of the pre migration callback - sr_uuid_map = None - if "sr_uuid_map" in migrate_data: - sr_uuid_map = migrate_data.sr_uuid_map - if sr_uuid_map: - for sr_uuid in sr_uuid_map: - # Source and destination SRs have the same UUID, so get the - # reference for the local SR - sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid) - vdi_map.update( - self._generate_vdi_map( - sr_uuid_map[sr_uuid], vm_ref, sr_ref)) - vif_map = {} - # For block migration, need to pass vif map to the destination hosts. - if not vm_utils.host_in_this_pool(self._session, - migrate_send_data.get('host')): - vif_uuid_map = None - if 'vif_uuid_map' in migrate_data: - vif_uuid_map = migrate_data.vif_uuid_map - if vif_uuid_map: - vif_map = self._generate_vif_network_map(vm_ref, vif_uuid_map) - LOG.debug("Generated vif_map for live migration: %s", vif_map) - options = {} - self._session.call_xenapi(command_name, vm_ref, - migrate_send_data, True, - vdi_map, vif_map, options) - - def _generate_vif_network_map(self, vm_ref, vif_uuid_map): - # Generate a mapping dictionary of src_vif_ref: dest_network_ref - vif_map = {} - # vif_uuid_map is dictionary of neutron_vif_uuid: dest_network_ref - vifs = self._session.VM.get_VIFs(vm_ref) - default_net_ref = vif_uuid_map.get('') - for vif in vifs: - other_config = self._session.VIF.get_other_config(vif) - neutron_id = other_config.get('neutron-port-id') - network_ref = vif_uuid_map.get(neutron_id, default_net_ref) - if network_ref is None: - raise exception.MigrationError( - reason=_('No mapping for source network %s') % ( - neutron_id)) - vif_map[vif] = network_ref - return vif_map - - def create_interim_networks(self, network_info): - # Creating an interim bridge in destination host before live_migration - vif_map = {} - for vif in network_info: - network_ref = self.vif_driver.create_vif_interim_network(vif) - vif_map.update({vif['id']: network_ref}) - return vif_map - - def pre_live_migration(self, context, instance, block_device_info, - network_info, disk_info, migrate_data): - migrate_data.sr_uuid_map = self.connect_block_device_volumes( - block_device_info) - migrate_data.vif_uuid_map = self.create_interim_networks(network_info) - LOG.debug("pre_live_migration, vif_uuid_map: %(vif_map)s, " - "sr_uuid_map: %(sr_map)s", - {'vif_map': migrate_data.vif_uuid_map, - 'sr_map': migrate_data.sr_uuid_map}, instance=instance) - return migrate_data - - def live_migrate(self, context, instance, destination_hostname, - post_method, recover_method, block_migration, - migrate_data=None): - try: - vm_ref = self._get_vm_opaque_ref(instance) - # NOTE(sulo): We try to ensure that PV driver information is - # present in xenstore for the instance we are trying to - # live-migrate, if the process of faking pv version info fails, - # we simply log it and carry on with the rest of the process. - # Any xapi error due to PV version are caught and migration - # will be safely reverted by the rollback process. - try: - self._ensure_pv_driver_info_for_live_migration(instance, - vm_ref) - except Exception as e: - LOG.warning(e) - - if migrate_data is not None: - (kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk( - self._session, vm_ref) - migrate_data.kernel_file = kernel - migrate_data.ramdisk_file = ramdisk - - if migrate_data is not None and migrate_data.block_migration: - iscsi_srs = self._get_iscsi_srs(context, instance) - try: - self._call_live_migrate_command( - "VM.migrate_send", vm_ref, migrate_data) - except self._session.XenAPI.Failure: - LOG.exception('Migrate Send failed') - raise exception.MigrationError( - reason=_('Migrate Send failed')) - - # Tidy up the iSCSI SRs - for sr_ref in iscsi_srs: - volume_utils.forget_sr(self._session, sr_ref) - else: - host_ref = self._get_host_opaque_ref(destination_hostname) - if not host_ref: - LOG.exception( - "Destination host %s was not found in the same shared " - "storage pool as source host.", - destination_hostname - ) - raise exception.MigrationError( - reason=_('No host with name %s found') - % destination_hostname) - self._session.call_xenapi("VM.pool_migrate", vm_ref, - host_ref, {"live": "true"}) - post_method(context, instance, destination_hostname, - block_migration, migrate_data) - except Exception: - with excutils.save_and_reraise_exception(): - recover_method(context, instance, destination_hostname, - migrate_data) - - def post_live_migration(self, context, instance, migrate_data=None): - if migrate_data is not None: - vm_utils.destroy_kernel_ramdisk(self._session, instance, - migrate_data.kernel_file, - migrate_data.ramdisk_file) - - def post_live_migration_at_source(self, context, instance, network_info): - LOG.debug('post_live_migration_at_source, delete networks and bridges', - instance=instance) - self._delete_networks_and_bridges(instance, network_info) - - def post_live_migration_at_destination(self, context, instance, - network_info, block_migration, - block_device_info): - # hook linux bridge and ovs bridge at destination - self._post_start_actions(instance) - vm_utils.create_kernel_and_ramdisk(context, self._session, instance, - instance['name']) - - # NOTE(johngarbutt) workaround XenServer bug CA-98606 - vm_ref = self._get_vm_opaque_ref(instance) - vm_utils.strip_base_mirror_from_vdis(self._session, vm_ref) - - def rollback_live_migration_at_destination(self, instance, network_info, - block_device_info): - bdms = block_device_info['block_device_mapping'] or [] - - for bdm in bdms: - conn_data = bdm['connection_info']['data'] - uuid, label, params = volume_utils.parse_sr_info(conn_data) - try: - sr_ref = volume_utils.find_sr_by_uuid(self._session, - uuid) - - if sr_ref: - volume_utils.forget_sr(self._session, sr_ref) - except Exception: - LOG.exception('Failed to forget the SR for volume %s', - params['id'], instance=instance) - - # delete VIF and network in destination host - LOG.debug('rollback_live_migration_at_destination, delete networks ' - 'and bridges', instance=instance) - self._delete_networks_and_bridges(instance, network_info) - - def _delete_networks_and_bridges(self, instance, network_info): - # Unplug VIFs and delete networks - for vif in network_info: - try: - self.vif_driver.delete_network_and_bridge(instance, vif['id']) - except Exception: - LOG.exception( - 'Failed to delete networks and bridges with VIF %s', - vif['id'], instance=instance, - ) - - def get_per_instance_usage(self): - """Get usage info about each active instance.""" - usage = {} - - def _is_active(vm_rec): - power_state = vm_rec['power_state'].lower() - return power_state in ['running', 'paused'] - - def _get_uuid(vm_rec): - other_config = vm_rec['other_config'] - return other_config.get('nova_uuid', None) - - for vm_ref, vm_rec in vm_utils.list_vms(self._session): - uuid = _get_uuid(vm_rec) - - if _is_active(vm_rec) and uuid is not None: - memory_mb = int(vm_rec['memory_static_max']) / units.Mi - usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid} - - return usage - - def connect_block_device_volumes(self, block_device_info): - sr_uuid_map = {} - try: - if block_device_info is not None: - for block_device_map in block_device_info[ - 'block_device_mapping']: - sr_uuid, _ = self._volumeops.connect_volume( - block_device_map['connection_info']) - sr_ref = self._session.call_xenapi('SR.get_by_uuid', - sr_uuid) - sr_uuid_map[sr_uuid] = sr_ref - except Exception: - with excutils.save_and_reraise_exception(): - # Disconnect the volumes we just connected - for sr_ref in six.itervalues(sr_uuid_map): - volume_utils.forget_sr(self._session, sr_ref) - - return sr_uuid_map - - def attach_interface(self, instance, vif): - LOG.debug("Attach interface, vif info: %s", vif, instance=instance) - vm_ref = self._get_vm_opaque_ref(instance) - - @utils.synchronized('xenapi-vif-' + vm_ref) - def _attach_interface(instance, vm_ref, vif): - # find device for use with XenAPI - allowed_devices = self._session.VM.get_allowed_VIF_devices(vm_ref) - if allowed_devices is None or len(allowed_devices) == 0: - raise exception.InterfaceAttachFailed( - _('attach network interface %(vif_id)s to instance ' - '%(instance_uuid)s failed, no allowed devices.'), - vif_id=vif['id'], instance_uuid=instance.uuid) - device = allowed_devices[0] - try: - # plug VIF - self.vif_driver.plug(instance, vif, vm_ref=vm_ref, - device=device) - except exception.NovaException: - with excutils.save_and_reraise_exception(): - LOG.exception('attach network interface %s failed.', - vif['id'], instance=instance) - try: - self.vif_driver.unplug(instance, vif, vm_ref) - except exception.NovaException: - # if unplug failed, no need to raise exception - LOG.warning('Unplug VIF %s failed.', - vif['id'], instance=instance) - - _attach_interface(instance, vm_ref, vif) - - def detach_interface(self, instance, vif): - LOG.debug("Detach interface, vif info: %s", vif, instance=instance) - - try: - vm_ref = self._get_vm_opaque_ref(instance) - self.vif_driver.unplug(instance, vif, vm_ref) - except exception.InstanceNotFound: - # Let this go up to the compute manager which will log a message - # for it. - raise - except exception.NovaException: - with excutils.save_and_reraise_exception(): - LOG.exception('detach network interface %s failed.', - vif['id'], instance=instance) diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py deleted file mode 100644 index 33c1b66de436..000000000000 --- a/nova/virt/xenapi/volume_utils.py +++ /dev/null @@ -1,394 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper methods for operations related to the management of volumes, -and storage repositories -""" - -import re -import uuid - -from eventlet import greenthread -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import strutils -from oslo_utils import versionutils - -import nova.conf -from nova import exception -from nova.i18n import _ - - -CONF = nova.conf.CONF - -LOG = logging.getLogger(__name__) - -# Namespace for SRs so we can reliably generate a UUID -# Generated from uuid.uuid5(uuid.UUID(int=0), 'volume_utils-SR_UUID') -SR_NAMESPACE = uuid.UUID("3cca4135-a809-5bb3-af62-275fbfe87178") - - -def parse_sr_info(connection_data, description=''): - params = {} - if 'sr_uuid' not in connection_data: - params = _parse_volume_info(connection_data) - sr_identity = "%s/%s/%s" % (params['target'], params['port'], - params['targetIQN']) - sr_uuid = str(uuid.uuid5(SR_NAMESPACE, sr_identity)) - else: - sr_uuid = connection_data['sr_uuid'] - for k in connection_data.get('introduce_sr_keys', {}): - params[k] = connection_data[k] - - label = connection_data.pop('name_label', - 'tempSR-%s' % sr_uuid) - params['name_description'] = connection_data.get('name_description', - description) - - return (sr_uuid, label, params) - - -def _parse_volume_info(connection_data): - """Parse device_path and mountpoint as they can be used by XenAPI. - In particular, the mountpoint (e.g. /dev/sdc) must be translated - into a numeric literal. - """ - volume_id = connection_data['volume_id'] - target_portal = connection_data['target_portal'] - target_host = _get_target_host(target_portal) - target_port = _get_target_port(target_portal) - target_iqn = connection_data['target_iqn'] - - log_params = { - "vol_id": volume_id, - "host": target_host, - "port": target_port, - "iqn": target_iqn - } - LOG.debug('(vol_id,host,port,iqn): ' - '(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)', log_params) - - if (volume_id is None or - target_host is None or - target_iqn is None): - raise exception.StorageError( - reason=_('Unable to obtain target information %s') % - strutils.mask_password(connection_data)) - volume_info = {} - volume_info['id'] = volume_id - volume_info['target'] = target_host - volume_info['port'] = target_port - volume_info['targetIQN'] = target_iqn - if ('auth_method' in connection_data and - connection_data['auth_method'] == 'CHAP'): - volume_info['chapuser'] = connection_data['auth_username'] - volume_info['chappassword'] = connection_data['auth_password'] - - return volume_info - - -def _get_target_host(iscsi_string): - """Retrieve target host.""" - if iscsi_string: - host = iscsi_string.split(':')[0] - if len(host) > 0: - return host - return CONF.xenserver.target_host - - -def _get_target_port(iscsi_string): - """Retrieve target port.""" - if iscsi_string and ':' in iscsi_string: - return iscsi_string.split(':')[1] - - return CONF.xenserver.target_port - - -def introduce_sr(session, sr_uuid, label, params): - LOG.debug('Introducing SR %s', label) - - sr_type, sr_desc = _handle_sr_params(params) - - if _requires_backend_kind(session.product_version) and sr_type == 'iscsi': - params['backend-kind'] = 'vbd' - - sr_ref = session.call_xenapi('SR.introduce', sr_uuid, label, sr_desc, - sr_type, '', False, params) - - LOG.debug('Creating PBD for SR') - pbd_ref = _create_pbd(session, sr_ref, params) - - LOG.debug('Plugging SR') - session.call_xenapi("PBD.plug", pbd_ref) - - session.call_xenapi("SR.scan", sr_ref) - return sr_ref - - -def _requires_backend_kind(version): - # Fix for Bug #1502929 - version_as_string = '.'.join(str(v) for v in version) - return (versionutils.is_compatible('6.5', version_as_string)) - - -def _handle_sr_params(params): - if 'id' in params: - del params['id'] - - sr_type = params.pop('sr_type', 'iscsi') - sr_desc = params.pop('name_description', '') - return sr_type, sr_desc - - -def _create_pbd(session, sr_ref, params): - pbd_rec = {} - pbd_rec['host'] = session.host_ref - pbd_rec['SR'] = sr_ref - pbd_rec['device_config'] = params - pbd_ref = session.call_xenapi("PBD.create", pbd_rec) - return pbd_ref - - -def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None): - """Introduce VDI in the host.""" - try: - vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) - if vdi_ref is None: - greenthread.sleep(CONF.xenserver.introduce_vdi_retry_wait) - session.call_xenapi("SR.scan", sr_ref) - vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) - except session.XenAPI.Failure: - LOG.exception('Unable to introduce VDI on SR') - raise exception.StorageError( - reason=_('Unable to introduce VDI on SR %s') % sr_ref) - - if not vdi_ref: - raise exception.StorageError( - reason=_('VDI not found on SR %(sr)s (vdi_uuid ' - '%(vdi_uuid)s, target_lun %(target_lun)s)') % - {'sr': sr_ref, 'vdi_uuid': vdi_uuid, - 'target_lun': target_lun}) - - try: - vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) - LOG.debug(vdi_rec) - except session.XenAPI.Failure: - LOG.exception('Unable to get record of VDI') - raise exception.StorageError( - reason=_('Unable to get record of VDI %s on') % vdi_ref) - - if vdi_rec['managed']: - # We do not need to introduce the vdi - return vdi_ref - - try: - return session.call_xenapi("VDI.introduce", - vdi_rec['uuid'], - vdi_rec['name_label'], - vdi_rec['name_description'], - vdi_rec['SR'], - vdi_rec['type'], - vdi_rec['sharable'], - vdi_rec['read_only'], - vdi_rec['other_config'], - vdi_rec['location'], - vdi_rec['xenstore_data'], - vdi_rec['sm_config']) - except session.XenAPI.Failure: - LOG.exception('Unable to introduce VDI for SR') - raise exception.StorageError( - reason=_('Unable to introduce VDI for SR %s') % sr_ref) - - -def _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun): - if vdi_uuid: - LOG.debug("vdi_uuid: %s", vdi_uuid) - return session.call_xenapi("VDI.get_by_uuid", vdi_uuid) - elif target_lun: - vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref) - for curr_ref in vdi_refs: - curr_rec = session.call_xenapi("VDI.get_record", curr_ref) - if ('sm_config' in curr_rec and - 'LUNid' in curr_rec['sm_config'] and - curr_rec['sm_config']['LUNid'] == str(target_lun)): - return curr_ref - else: - return (session.call_xenapi("SR.get_VDIs", sr_ref))[0] - - return None - - -def purge_sr(session, sr_ref): - # Make sure no VBDs are referencing the SR VDIs - vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref) - for vdi_ref in vdi_refs: - vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref) - if vbd_refs: - LOG.warning('Cannot purge SR with referenced VDIs') - return - - forget_sr(session, sr_ref) - - -def forget_sr(session, sr_ref): - """Forgets the storage repository without destroying the VDIs within.""" - LOG.debug('Forgetting SR...') - _unplug_pbds(session, sr_ref) - session.call_xenapi("SR.forget", sr_ref) - - -def _unplug_pbds(session, sr_ref): - try: - pbds = session.call_xenapi("SR.get_PBDs", sr_ref) - except session.XenAPI.Failure as exc: - LOG.warning('Ignoring exception %(exc)s when getting PBDs' - ' for %(sr_ref)s', {'exc': exc, 'sr_ref': sr_ref}) - return - - for pbd in pbds: - try: - session.call_xenapi("PBD.unplug", pbd) - except session.XenAPI.Failure as exc: - LOG.warning('Ignoring exception %(exc)s when unplugging' - ' PBD %(pbd)s', {'exc': exc, 'pbd': pbd}) - - -def get_device_number(mountpoint): - device_number = _mountpoint_to_number(mountpoint) - if device_number < 0: - raise exception.StorageError( - reason=_('Unable to obtain target information %s') % - mountpoint) - return device_number - - -def _mountpoint_to_number(mountpoint): - """Translate a mountpoint like /dev/sdc into a numeric.""" - if mountpoint.startswith('/dev/'): - mountpoint = mountpoint[5:] - if re.match('^[hs]d[a-p]$', mountpoint): - return (ord(mountpoint[2:3]) - ord('a')) - elif re.match('^x?vd[a-p]$', mountpoint): - return (ord(mountpoint[-1]) - ord('a')) - elif re.match('^[0-9]+$', mountpoint): - return int(mountpoint, 10) - else: - LOG.warning('Mountpoint cannot be translated: %s', mountpoint) - return -1 - - -def find_sr_by_uuid(session, sr_uuid): - """Return the storage repository given a uuid.""" - try: - return session.call_xenapi("SR.get_by_uuid", sr_uuid) - except session.XenAPI.Failure as exc: - if exc.details[0] == 'UUID_INVALID': - return None - raise - - -def find_sr_from_vbd(session, vbd_ref): - """Find the SR reference from the VBD reference.""" - try: - vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) - sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) - except session.XenAPI.Failure: - LOG.exception('Unable to find SR from VBD') - raise exception.StorageError( - reason=_('Unable to find SR from VBD %s') % vbd_ref) - return sr_ref - - -def find_sr_from_vdi(session, vdi_ref): - """Find the SR reference from the VDI reference.""" - try: - sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) - except session.XenAPI.Failure: - LOG.exception('Unable to find SR from VDI') - raise exception.StorageError( - reason=_('Unable to find SR from VDI %s') % vdi_ref) - return sr_ref - - -def find_vbd_by_number(session, vm_ref, dev_number): - """Get the VBD reference from the device number.""" - vbd_refs = session.VM.get_VBDs(vm_ref) - requested_device = str(dev_number) - if vbd_refs: - for vbd_ref in vbd_refs: - try: - user_device = session.VBD.get_userdevice(vbd_ref) - if user_device == requested_device: - return vbd_ref - except session.XenAPI.Failure: - msg = "Error looking up VBD %s for %s" % (vbd_ref, vm_ref) - LOG.debug(msg, exc_info=True) - - -def is_booted_from_volume(session, vm_ref, user_device=0): - """Determine if the root device is a volume.""" - # TODO(bkaminski): We have opened the scope of this method to accept - # userdevice. We should rename this method and its references for clarity. - vbd_ref = find_vbd_by_number(session, vm_ref, user_device) - vbd_other_config = session.VBD.get_other_config(vbd_ref) - if vbd_other_config.get('osvol', False): - return True - return False - - -def _get_vdi_import_path(session, task_ref, vdi_ref, disk_format): - session_id = session.get_session_id() - str_fmt = '/import_raw_vdi?session_id={}&task_id={}&vdi={}&format={}' - return str_fmt.format(session_id, task_ref, vdi_ref, disk_format) - - -def _stream_to_vdi(conn, vdi_import_path, file_size, file_obj): - headers = {'Content-Type': 'application/octet-stream', - 'Content-Length': '%s' % file_size} - - CHUNK_SIZE = 16 * 1024 - LOG.debug('Initialising PUT request to %s (Headers: %s)', - vdi_import_path, headers) - conn.request('PUT', vdi_import_path, headers=headers) - remain_size = file_size - while remain_size >= CHUNK_SIZE: - trunk = file_obj.read(CHUNK_SIZE) - remain_size -= CHUNK_SIZE - conn.send(trunk) - if remain_size != 0: - trunk = file_obj.read(remain_size) - conn.send(trunk) - resp = conn.getresponse() - LOG.debug("Connection response status:reason is " - "%(status)s:%(reason)s", - {'status': resp.status, 'reason': resp.reason}) - - -def stream_to_vdi(session, instance, disk_format, - file_obj, file_size, vdi_ref): - - task_name_label = 'VDI_IMPORT_for_' + instance['name'] - with session.custom_task(task_name_label) as task_ref: - vdi_import_path = _get_vdi_import_path(session, task_ref, vdi_ref, - disk_format) - - with session.http_connection() as conn: - try: - _stream_to_vdi(conn, vdi_import_path, file_size, file_obj) - except Exception as e: - with excutils.save_and_reraise_exception(): - LOG.error('Streaming disk to VDI failed with error: %s', - e, instance=instance) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py deleted file mode 100644 index 9195be10a73e..000000000000 --- a/nova/virt/xenapi/volumeops.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright (c) 2010 Citrix Systems, Inc. -# Copyright (c) 2013 OpenStack Foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Management class for Storage-related functions (attach, detach, etc). -""" - -from oslo_log import log as logging -from oslo_utils import excutils -from oslo_utils import strutils - -from nova import exception -from nova.virt.xenapi import vm_utils -from nova.virt.xenapi import volume_utils - - -LOG = logging.getLogger(__name__) - - -class VolumeOps(object): - """Management class for Volume-related tasks.""" - - def __init__(self, session): - self._session = session - - def attach_volume(self, connection_info, instance_name, mountpoint, - hotplug=True): - """Attach volume to VM instance.""" - vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) - return self._attach_volume(connection_info, vm_ref, - instance_name, mountpoint, hotplug) - - def connect_volume(self, connection_info): - """Attach volume to hypervisor, but not the VM.""" - return self._attach_volume(connection_info) - - def _attach_volume(self, connection_info, vm_ref=None, instance_name=None, - dev_number=None, hotplug=False): - - self._check_is_supported_driver_type(connection_info) - - connection_data = connection_info['data'] - sr_ref, sr_uuid = self._connect_to_volume_provider(connection_data, - instance_name) - try: - vdi_ref = self._connect_hypervisor_to_volume(sr_ref, - connection_data) - vdi_uuid = self._session.VDI.get_uuid(vdi_ref) - LOG.info('Connected volume (vdi_uuid): %s', vdi_uuid) - - if vm_ref: - self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name, - dev_number, hotplug) - - return (sr_uuid, vdi_uuid) - except Exception: - with excutils.save_and_reraise_exception(): - # NOTE(sirp): Forgetting the SR will have the effect of - # cleaning up the VDI and VBD records, so no need to handle - # that explicitly. - volume_utils.forget_sr(self._session, sr_ref) - - def _check_is_supported_driver_type(self, connection_info): - driver_type = connection_info['driver_volume_type'] - if driver_type not in ['iscsi', 'xensm']: - raise exception.VolumeDriverNotFound(driver_type=driver_type) - - def _connect_to_volume_provider(self, connection_data, instance_name): - sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info( - connection_data, 'Disk-for:%s' % instance_name) - sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid) - if not sr_ref: - # introduce SR because not already present - sr_ref = volume_utils.introduce_sr( - self._session, sr_uuid, sr_label, sr_params) - return (sr_ref, sr_uuid) - - def _connect_hypervisor_to_volume(self, sr_ref, connection_data): - # connection_data can have credentials in it so make sure to scrub - # those before logging. - LOG.debug("Connect volume to hypervisor: %s", - strutils.mask_password(connection_data)) - if 'vdi_uuid' in connection_data: - vdi_ref = volume_utils.introduce_vdi( - self._session, sr_ref, - vdi_uuid=connection_data['vdi_uuid']) - - elif 'target_lun' in connection_data: - vdi_ref = volume_utils.introduce_vdi( - self._session, sr_ref, - target_lun=connection_data['target_lun']) - - else: - # NOTE(sirp): This will introduce the first VDI in the SR - vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref) - - return vdi_ref - - def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, mountpoint, - hotplug): - LOG.debug('Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s', - {'vdi_ref': vdi_ref, 'vm_ref': vm_ref}) - - dev_number = volume_utils.get_device_number(mountpoint) - - # osvol is added to the vbd so we can spot which vbds are volumes - vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, - dev_number, bootable=False, - osvol=True) - if hotplug: - # NOTE(johngarbutt) can only call VBD.plug on a running vm - running = not vm_utils.is_vm_shutdown(self._session, vm_ref) - if running: - LOG.debug("Plugging VBD: %s", vbd_ref) - self._session.VBD.plug(vbd_ref, vm_ref) - - LOG.info('Dev %(dev_number)s attached to' - ' instance %(instance_name)s', - {'instance_name': instance_name, 'dev_number': dev_number}) - - def detach_volume(self, connection_info, instance_name, mountpoint): - """Detach volume storage to VM instance.""" - LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s", - {'instance_name': instance_name, 'mountpoint': mountpoint}) - - vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) - - device_number = volume_utils.get_device_number(mountpoint) - vbd_ref = volume_utils.find_vbd_by_number(self._session, vm_ref, - device_number) - - if vbd_ref is None: - # NOTE(sirp): If we don't find the VBD then it must have been - # detached previously. - LOG.warning('Skipping detach because VBD for %s was not found', - instance_name) - else: - self._detach_vbds_and_srs(vm_ref, [vbd_ref]) - LOG.info('Mountpoint %(mountpoint)s detached from instance' - ' %(instance_name)s', - {'instance_name': instance_name, - 'mountpoint': mountpoint}) - - def _detach_vbds_and_srs(self, vm_ref, vbd_refs): - is_vm_shutdown = vm_utils.is_vm_shutdown(self._session, vm_ref) - - for vbd_ref in vbd_refs: - # find sr before we destroy the vbd - sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) - - if not is_vm_shutdown: - vm_utils.unplug_vbd(self._session, vbd_ref, vm_ref) - - vm_utils.destroy_vbd(self._session, vbd_ref) - # Forget (i.e. disconnect) SR only if not in use - volume_utils.purge_sr(self._session, sr_ref) - - def detach_all(self, vm_ref): - """Detach all cinder volumes.""" - vbd_refs = self._get_all_volume_vbd_refs(vm_ref) - if vbd_refs: - self._detach_vbds_and_srs(vm_ref, vbd_refs) - - def _get_all_volume_vbd_refs(self, vm_ref): - """Return VBD refs for all Nova/Cinder volumes.""" - vbd_refs = self._session.VM.get_VBDs(vm_ref) - for vbd_ref in vbd_refs: - other_config = self._session.VBD.get_other_config(vbd_ref) - if other_config.get('osvol'): - yield vbd_ref - - def find_bad_volumes(self, vm_ref): - """Find any volumes with their connection severed. - - Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not - work when a VBD is present that points to a non-working volume. To work - around this, we scan for non-working volumes and detach them before - retrying a failed operation. - """ - bad_devices = [] - vbd_refs = self._get_all_volume_vbd_refs(vm_ref) - for vbd_ref in vbd_refs: - sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) - - try: - # TODO(sirp): bug1152401 This relies on a 120 sec timeout - # within XenServer, update this to fail-fast when this is fixed - # upstream - self._session.SR.scan(sr_ref) - except self._session.XenAPI.Failure as exc: - if exc.details[0] == 'SR_BACKEND_FAILURE_40': - device = self._session.VBD.get_device(vbd_ref) - bad_devices.append('/dev/%s' % device) - else: - raise - - return bad_devices - - def safe_cleanup_from_vdis(self, vdi_refs): - # A helper method to detach volumes that are not associated with an - # instance - - for vdi_ref in vdi_refs: - try: - sr_ref = volume_utils.find_sr_from_vdi(self._session, vdi_ref) - except exception.StorageError as exc: - LOG.debug(exc.format_message()) - continue - try: - # Forget (i.e. disconnect) SR only if not in use - volume_utils.purge_sr(self._session, sr_ref) - except Exception: - LOG.debug('Ignoring error while purging sr: %s', sr_ref, - exc_info=True) diff --git a/releasenotes/notes/remove-xenapi-driver-194756049f22dc9e.yaml b/releasenotes/notes/remove-xenapi-driver-194756049f22dc9e.yaml new file mode 100644 index 000000000000..25f82ce1e8b3 --- /dev/null +++ b/releasenotes/notes/remove-xenapi-driver-194756049f22dc9e.yaml @@ -0,0 +1,46 @@ +--- +upgrade: + - | + The ``XenAPI`` driver, which was deprecated in the 20.0.0 (Train), has now + been removed. + - | + The following config options only apply when using the ``XenAPI`` virt + driver which has now been removed. The config options have therefore been + removed also. + + * ``[xenserver] agent_timeout`` + * ``[xenserver] agent_version_timeout`` + * ``[xenserver] agent_resetnetwork_timeout`` + * ``[xenserver] agent_path`` + * ``[xenserver] disable_agent`` + * ``[xenserver] use_agent_default`` + * ``[xenserver] login_timeout`` + * ``[xenserver] connection_concurrent`` + * ``[xenserver] cache_images`` + * ``[xenserver] image_compression_level`` + * ``[xenserver] default_os_type`` + * ``[xenserver] block_device_creation_timeout`` + * ``[xenserver] max_kernel_ramdisk_size`` + * ``[xenserver] sr_matching_filter`` + * ``[xenserver] sparse_copy`` + * ``[xenserver] num_vbd_unplug_retries`` + * ``[xenserver] ipxe_network_name`` + * ``[xenserver] ipxe_boot_menu_url`` + * ``[xenserver] ipxe_mkisofs_cmd`` + * ``[xenserver] connection_url`` + * ``[xenserver] connection_username`` + * ``[xenserver] connection_password`` + * ``[xenserver] vhd_coalesce_poll_interval`` + * ``[xenserver] check_host`` + * ``[xenserver] vhd_coalesce_max_attempts`` + * ``[xenserver] sr_base_path`` + * ``[xenserver] target_host`` + * ``[xenserver] target_port`` + * ``[xenserver] independent_compute`` + * ``[xenserver] running_timeout`` + * ``[xenserver] image_upload_handler`` + * ``[xenserver] image_handler`` + * ``[xenserver] introduce_vdi_retry_wait`` + * ``[xenserver] ovs_integration_bridge`` + * ``[xenserver] use_join_force`` + * ``[xenserver] console_public_hostname``