xenapi: Remove driver and tests

Change-Id: I42b302afbb1cfede7a0f7b16485a596cd70baf17
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2020-08-31 15:01:15 +01:00
parent 19cb983800
commit adb28f503c
43 changed files with 50 additions and 24482 deletions

View File

@ -65,7 +65,6 @@ from nova.conf import vmware
from nova.conf import vnc
from nova.conf import workarounds
from nova.conf import wsgi
from nova.conf import xenserver
from nova.conf import zvm
CONF = cfg.CONF
@ -116,5 +115,4 @@ vmware.register_opts(CONF)
vnc.register_opts(CONF)
workarounds.register_opts(CONF)
wsgi.register_opts(CONF)
xenserver.register_opts(CONF)
zvm.register_opts(CONF)

View File

@ -1,549 +0,0 @@
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo_config import cfg
from oslo_utils import units
xenserver_group = cfg.OptGroup('xenserver',
title='Xenserver Options',
help="""
.. warning:: The xenapi driver is deprecated and may be removed in a future
release. The driver is not tested by the OpenStack project nor
does it have clear maintainer(s) and thus its quality can not be
ensured. If you are using the driver in production please let us
know in freenode IRC and/or the openstack-discuss mailing list.
XenServer options are used when the compute_driver is set to use
XenServer (compute_driver=xenapi.XenAPIDriver).
Must specify connection_url, connection_password and ovs_integration_bridge to
use compute_driver=xenapi.XenAPIDriver.
""")
xenapi_agent_opts = [
cfg.IntOpt('agent_timeout',
default=30,
min=0,
help="""
Number of seconds to wait for agent's reply to a request.
Nova configures/performs certain administrative actions on a server with the
help of an agent that's installed on the server. The communication between
Nova and the agent is achieved via sharing messages, called records, over
xenstore, a shared storage across all the domains on a Xenserver host.
Operations performed by the agent on behalf of nova are: 'version',' key_init',
'password','resetnetwork','inject_file', and 'agentupdate'.
To perform one of the above operations, the xapi 'agent' plugin writes the
command and its associated parameters to a certain location known to the domain
and awaits response. On being notified of the message, the agent performs
appropriate actions on the server and writes the result back to xenstore. This
result is then read by the xapi 'agent' plugin to determine the success/failure
of the operation.
This config option determines how long the xapi 'agent' plugin shall wait to
read the response off of xenstore for a given request/command. If the agent on
the instance fails to write the result in this time period, the operation is
considered to have timed out.
Related options:
* ``agent_version_timeout``
* ``agent_resetnetwork_timeout``
"""),
cfg.IntOpt('agent_version_timeout',
default=300,
min=0,
help="""
Number of seconds to wait for agent't reply to version request.
This indicates the amount of time xapi 'agent' plugin waits for the agent to
respond to the 'version' request specifically. The generic timeout for agent
communication ``agent_timeout`` is ignored in this case.
During the build process the 'version' request is used to determine if the
agent is available/operational to perform other requests such as
'resetnetwork', 'password', 'key_init' and 'inject_file'. If the 'version' call
fails, the other configuration is skipped. So, this configuration option can
also be interpreted as time in which agent is expected to be fully operational.
"""),
cfg.IntOpt('agent_resetnetwork_timeout',
default=60,
min=0,
help="""
Number of seconds to wait for agent's reply to resetnetwork
request.
This indicates the amount of time xapi 'agent' plugin waits for the agent to
respond to the 'resetnetwork' request specifically. The generic timeout for
agent communication ``agent_timeout`` is ignored in this case.
"""),
cfg.StrOpt('agent_path',
default='usr/sbin/xe-update-networking',
help="""
Path to locate guest agent on the server.
Specifies the path in which the XenAPI guest agent should be located. If the
agent is present, network configuration is not injected into the image.
Related options:
For this option to have an effect:
* ``flat_injected`` should be set to ``True``
* ``compute_driver`` should be set to ``xenapi.XenAPIDriver``
"""),
cfg.BoolOpt('disable_agent',
default=False,
help="""
Disables the use of XenAPI agent.
This configuration option suggests whether the use of agent should be enabled
or not regardless of what image properties are present. Image properties have
an effect only when this is set to ``True``. Read description of config option
``use_agent_default`` for more information.
Related options:
* ``use_agent_default``
"""),
cfg.BoolOpt('use_agent_default',
default=False,
help="""
Whether or not to use the agent by default when its usage is enabled but not
indicated by the image.
The use of XenAPI agent can be disabled altogether using the configuration
option ``disable_agent``. However, if it is not disabled, the use of an agent
can still be controlled by the image in use through one of its properties,
``xenapi_use_agent``. If this property is either not present or specified
incorrectly on the image, the use of agent is determined by this configuration
option.
Note that if this configuration is set to ``True`` when the agent is not
present, the boot times will increase significantly.
Related options:
* ``disable_agent``
"""),
]
xenapi_session_opts = [
cfg.IntOpt('login_timeout',
default=10,
min=0,
help='Timeout in seconds for XenAPI login.'),
cfg.IntOpt('connection_concurrent',
default=5,
min=1,
help="""
Maximum number of concurrent XenAPI connections.
In nova, multiple XenAPI requests can happen at a time.
Configuring this option will parallelize access to the XenAPI
session, which allows you to make concurrent XenAPI connections.
"""),
]
xenapi_vm_utils_opts = [
cfg.StrOpt('cache_images',
default='all',
choices=[
('all', 'Will cache all images'),
('some', 'Will only cache images that have the image_property '
'``cache_in_nova=True``'),
('none', 'Turns off caching entirely')],
help="""
Cache glance images locally.
The value for this option must be chosen from the choices listed
here. Configuring a value other than these will default to 'all'.
Note: There is nothing that deletes these images.
"""),
cfg.IntOpt('image_compression_level',
min=1,
max=9,
help="""
Compression level for images.
By setting this option we can configure the gzip compression level.
This option sets GZIP environment variable before spawning tar -cz
to force the compression level. It defaults to none, which means the
GZIP environment variable is not set and the default (usually -6)
is used.
Possible values:
* Range is 1-9, e.g., 9 for gzip -9, 9 being most
compressed but most CPU intensive on dom0.
* Any values out of this range will default to None.
"""),
cfg.StrOpt('default_os_type',
default='linux',
help='Default OS type used when uploading an image to glance'),
cfg.IntOpt('block_device_creation_timeout',
default=10,
min=1,
help='Time in secs to wait for a block device to be created'),
cfg.IntOpt('max_kernel_ramdisk_size',
default=16 * units.Mi,
help="""
Maximum size in bytes of kernel or ramdisk images.
Specifying the maximum size of kernel or ramdisk will avoid copying
large files to dom0 and fill up /boot/guest.
"""),
cfg.StrOpt('sr_matching_filter',
default='default-sr:true',
help="""
Filter for finding the SR to be used to install guest instances on.
Possible values:
* To use the Local Storage in default XenServer/XCP installations
set this flag to other-config:i18n-key=local-storage.
* To select an SR with a different matching criteria, you could
set it to other-config:my_favorite_sr=true.
* To fall back on the Default SR, as displayed by XenCenter,
set this flag to: default-sr:true.
"""),
cfg.BoolOpt('sparse_copy',
default=True,
help="""
Whether to use sparse_copy for copying data on a resize down.
(False will use standard dd). This speeds up resizes down
considerably since large runs of zeros won't have to be rsynced.
"""),
cfg.IntOpt('num_vbd_unplug_retries',
default=10,
min=0,
help="""
Maximum number of retries to unplug VBD.
If set to 0, should try once, no retries.
"""),
cfg.StrOpt('ipxe_network_name',
help="""
Name of network to use for booting iPXE ISOs.
An iPXE ISO is a specially crafted ISO which supports iPXE booting.
This feature gives a means to roll your own image.
By default this option is not set. Enable this option to
boot an iPXE ISO.
Related Options:
* `ipxe_boot_menu_url`
* `ipxe_mkisofs_cmd`
"""),
cfg.StrOpt('ipxe_boot_menu_url',
help="""
URL to the iPXE boot menu.
An iPXE ISO is a specially crafted ISO which supports iPXE booting.
This feature gives a means to roll your own image.
By default this option is not set. Enable this option to
boot an iPXE ISO.
Related Options:
* `ipxe_network_name`
* `ipxe_mkisofs_cmd`
"""),
cfg.StrOpt('ipxe_mkisofs_cmd',
default='mkisofs',
help="""
Name and optionally path of the tool used for ISO image creation.
An iPXE ISO is a specially crafted ISO which supports iPXE booting.
This feature gives a means to roll your own image.
Note: By default `mkisofs` is not present in the Dom0, so the
package can either be manually added to Dom0 or include the
`mkisofs` binary in the image itself.
Related Options:
* `ipxe_network_name`
* `ipxe_boot_menu_url`
"""),
]
xenapi_opts = [
cfg.StrOpt('connection_url',
help="""
URL for connection to XenServer/Xen Cloud Platform. A special value
of unix://local can be used to connect to the local unix socket.
Possible values:
* Any string that represents a URL. The connection_url is
generally the management network IP address of the XenServer.
* This option must be set if you chose the XenServer driver.
"""),
cfg.StrOpt('connection_username',
default='root',
help='Username for connection to XenServer/Xen Cloud Platform'),
cfg.StrOpt('connection_password',
secret=True,
help='Password for connection to XenServer/Xen Cloud Platform'),
cfg.FloatOpt('vhd_coalesce_poll_interval',
default=5.0,
min=0,
help="""
The interval used for polling of coalescing vhds.
This is the interval after which the task of coalesce VHD is
performed, until it reaches the max attempts that is set by
vhd_coalesce_max_attempts.
Related options:
* `vhd_coalesce_max_attempts`
"""),
cfg.BoolOpt('check_host',
default=True,
help="""
Ensure compute service is running on host XenAPI connects to.
This option must be set to false if the 'independent_compute'
option is set to true.
Possible values:
* Setting this option to true will make sure that compute service
is running on the same host that is specified by connection_url.
* Setting this option to false, doesn't perform the check.
Related options:
* `independent_compute`
"""),
cfg.IntOpt('vhd_coalesce_max_attempts',
default=20,
min=0,
help="""
Max number of times to poll for VHD to coalesce.
This option determines the maximum number of attempts that can be
made for coalescing the VHD before giving up.
Related opitons:
* `vhd_coalesce_poll_interval`
"""),
cfg.StrOpt('sr_base_path',
default='/var/run/sr-mount',
help='Base path to the storage repository on the XenServer host.'),
cfg.HostAddressOpt('target_host',
help="""
The iSCSI Target Host.
This option represents the hostname or ip of the iSCSI Target.
If the target host is not present in the connection information from
the volume provider then the value from this option is taken.
Possible values:
* Any string that represents hostname/ip of Target.
"""),
cfg.PortOpt('target_port',
default=3260,
help="""
The iSCSI Target Port.
This option represents the port of the iSCSI Target. If the
target port is not present in the connection information from the
volume provider then the value from this option is taken.
"""),
cfg.BoolOpt('independent_compute',
default=False,
help="""
Used to prevent attempts to attach VBDs locally, so Nova can
be run in a VM on a different host.
Related options:
* ``CONF.flat_injected`` (Must be False)
* ``CONF.xenserver.check_host`` (Must be False)
* ``CONF.default_ephemeral_format`` (Must be unset or 'ext3')
* Joining host aggregates (will error if attempted)
* Swap disks for Windows VMs (will error if attempted)
* Nova-based auto_configure_disk (will error if attempted)
""")
]
xenapi_vmops_opts = [
cfg.IntOpt('running_timeout',
default=60,
min=0,
help="""
Wait time for instances to go to running state.
Provide an integer value representing time in seconds to set the
wait time for an instance to go to running state.
When a request to create an instance is received by nova-api and
communicated to nova-compute, the creation of the instance occurs
through interaction with Xen via XenAPI in the compute node. Once
the node on which the instance(s) are to be launched is decided by
nova-schedule and the launch is triggered, a certain amount of wait
time is involved until the instance(s) can become available and
'running'. This wait time is defined by running_timeout. If the
instances do not go to running state within this specified wait
time, the launch expires and the instance(s) are set to 'error'
state.
"""),
# TODO(dharinic): Make this, a stevedore plugin
cfg.StrOpt('image_upload_handler',
default='',
deprecated_for_removal=True,
deprecated_since='18.0.0',
deprecated_reason="""
Instead of setting the class path here, we will use short names
to represent image handlers. The download and upload handlers
must also be matching. So another new option "image_handler"
will be used to set the short name for a specific image handler
for both image download and upload.
""",
help="""
Dom0 plugin driver used to handle image uploads.
Provide a string value representing a plugin driver required to
handle the image uploading to GlanceStore.
Images, and snapshots from XenServer need to be uploaded to the data
store for use. image_upload_handler takes in a value for the Dom0
plugin driver. This driver is then called to uplaod images to the
GlanceStore.
"""),
cfg.StrOpt('image_handler',
default='direct_vhd',
choices=[
('direct_vhd', 'This plugin directly processes the VHD files in '
'XenServer SR(Storage Repository). So this plugin only works '
'when the host\'s SR type is file system based e.g. ext, nfs.'),
('vdi_local_dev', 'This plugin implements an image handler which '
'attaches the instance\'s VDI as a local disk to the VM where '
'the OpenStack Compute service runs. It uploads the raw disk '
'to glance when creating image; when booting an instance from a '
'glance image, it downloads the image and streams it into the '
'disk which is attached to the compute VM.'),
('vdi_remote_stream', 'This plugin implements an image handler '
'which works as a proxy between glance and XenServer. The VHD '
'streams to XenServer via a remote import API supplied by XAPI '
'for image download; and for image upload, the VHD streams from '
'XenServer via a remote export API supplied by XAPI. This '
'plugin works for all SR types supported by XenServer.'),
],
help="""
The plugin used to handle image uploads and downloads.
Provide a short name representing an image driver required to
handle the image between compute host and glance.
"""),
]
xenapi_volume_utils_opts = [
cfg.IntOpt('introduce_vdi_retry_wait',
default=20,
min=0,
help="""
Number of seconds to wait for SR to settle if the VDI
does not exist when first introduced.
Some SRs, particularly iSCSI connections are slow to see the VDIs
right after they got introduced. Setting this option to a
time interval will make the SR to wait for that time period
before raising VDI not found exception.
""")
]
xenapi_ovs_integration_bridge_opts = [
cfg.StrOpt('ovs_integration_bridge',
help="""
The name of the integration Bridge that is used with xenapi
when connecting with Open vSwitch.
Note: The value of this config option is dependent on the
environment, therefore this configuration value must be set
accordingly if you are using XenAPI.
Possible values:
* Any string that represents a bridge name.
"""),
]
xenapi_pool_opts = [
# TODO(macsz): This should be deprecated. Until providing solid reason,
# leaving it as-it-is.
cfg.BoolOpt('use_join_force',
default=True,
help="""
When adding new host to a pool, this will append a --force flag to the
command, forcing hosts to join a pool, even if they have different CPUs.
Since XenServer version 5.6 it is possible to create a pool of hosts that have
different CPU capabilities. To accommodate CPU differences, XenServer limited
features it uses to determine CPU compatibility to only the ones that are
exposed by CPU and support for CPU masking was added.
Despite this effort to level differences between CPUs, it is still possible
that adding new host will fail, thus option to force join was introduced.
"""),
]
xenapi_console_opts = [
cfg.StrOpt('console_public_hostname',
default=socket.gethostname(),
sample_default='<current_hostname>',
deprecated_group='DEFAULT',
help="""
Publicly visible name for this console host.
Possible values:
* Current hostname (default) or any string representing hostname.
"""),
]
ALL_XENSERVER_OPTS = (xenapi_agent_opts +
xenapi_session_opts +
xenapi_vm_utils_opts +
xenapi_opts +
xenapi_vmops_opts +
xenapi_volume_utils_opts +
xenapi_ovs_integration_bridge_opts +
xenapi_pool_opts +
xenapi_console_opts)
def register_opts(conf):
conf.register_group(xenserver_group)
conf.register_opts(ALL_XENSERVER_OPTS, group=xenserver_group)
def list_opts():
return {xenserver_group: ALL_XENSERVER_OPTS}

View File

@ -713,19 +713,11 @@ class ImageDeleteConflict(NovaException):
msg_fmt = _("Conflict deleting image. Reason: %(reason)s.")
class ImageHandlerUnsupported(NovaException):
msg_fmt = _("Error: unsupported image handler %(image_handler)s.")
class PreserveEphemeralNotSupported(Invalid):
msg_fmt = _("The current driver does not support "
"preserving ephemeral partitions.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class InstanceMappingNotFound(NotFound):
msg_fmt = _("Instance %(uuid)s has no mapping to a cell.")
@ -1193,10 +1185,6 @@ class BootFromVolumeRequiredForZeroDiskFlavor(Forbidden):
"zero disk.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
@ -1255,6 +1243,7 @@ class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
# TODO(stephenfin): Remove this XenAPI relic
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
@ -1458,19 +1447,6 @@ class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")

View File

@ -1,37 +0,0 @@
# Copyright 2018 Michael Still and Aptira
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
xenapi specific routines.
"""
from oslo_concurrency import processutils
import nova.privsep
@nova.privsep.sys_admin_pctxt.entrypoint
def xenstore_read(path):
return processutils.execute('xenstore-read', path)
@nova.privsep.sys_admin_pctxt.entrypoint
def block_copy(src_path, dst_path, block_size, num_blocks):
processutils.execute('dd',
'if=%s' % src_path,
'of=%s' % dst_path,
'bs=%d' % block_size,
'count=%d' % num_blocks,
'iflag=direct,sync',
'oflag=direct,sync')

View File

@ -1,71 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for expectations of behaviour from the Xen driver."""
import mock
from nova.compute import manager
from nova.compute import power_state
from nova import context
from nova import objects
from nova.objects import instance as instance_obj
from nova.tests.unit.compute import eventlet_utils
from nova.tests.unit import fake_instance
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
class ComputeXenTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(ComputeXenTestCase, self).setUp()
self.flags(compute_driver='xenapi.XenAPIDriver')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self, stubs.FakeSessionForVMTests)
self.compute = manager.ComputeManager()
# execute power syncing synchronously for testing:
self.compute._sync_power_pool = eventlet_utils.SyncPool()
def test_sync_power_states_instance_not_found(self):
db_instance = fake_instance.fake_db_instance()
ctxt = context.get_admin_context()
instance_list = instance_obj._make_instance_list(ctxt,
objects.InstanceList(), [db_instance], None)
instance = instance_list[0]
@mock.patch.object(vm_utils, 'lookup')
@mock.patch.object(objects.InstanceList, 'get_by_host')
@mock.patch.object(self.compute.driver, 'get_num_instances')
@mock.patch.object(self.compute, '_sync_instance_power_state')
def do_test(mock_compute_sync_powerstate,
mock_compute_get_num_instances,
mock_instance_list_get_by_host,
mock_vm_utils_lookup):
mock_instance_list_get_by_host.return_value = instance_list
mock_compute_get_num_instances.return_value = 1
mock_vm_utils_lookup.return_value = None
self.compute._sync_power_states(ctxt)
mock_instance_list_get_by_host.assert_called_once_with(
ctxt, self.compute.host, expected_attrs=[], use_slave=True)
mock_compute_get_num_instances.assert_called_once_with()
mock_compute_sync_powerstate.assert_called_once_with(
ctxt, instance, power_state.NOSTATE, use_slave=True)
mock_vm_utils_lookup.assert_called_once_with(
self.compute.driver._session, instance['name'],
False)
do_test()

View File

@ -19727,17 +19727,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
vif_driver.unplug.side_effect = exception.InternalError('foo')
drvr._unplug_vifs('inst', [1], ignore_errors=True)
vif_driver.unplug.assert_called_once_with('inst', 1)
def test_unplug_vifs_reports_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
self.assertRaises(exception.AgentError,
vif_driver.unplug.side_effect = exception.InternalError('foo')
self.assertRaises(exception.InternalError,
drvr.unplug_vifs, 'inst', [1])
vif_driver.unplug.assert_called_once_with('inst', 1)

View File

@ -1,334 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
import mock
from os_xenapi.client import exception as xenapi_exception
from os_xenapi.client import host_glance
from os_xenapi.client import XenAPI
from nova.compute import utils as compute_utils
from nova import context
from nova import exception
from nova.image import glance as common_glance
from nova.tests.unit.virt.xenapi import stubs
from nova import utils
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import vm_utils
class TestGlanceStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestGlanceStore, self).setUp()
self.store = glance.GlanceStore()
self.flags(api_servers=['http://localhost:9292'], group='glance')
self.flags(connection_url='http://localhost',
connection_password='test_pass',
group='xenserver')
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
fake.reset()
stubs.stubout_session(self, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
self.session = driver._session
self.stub_out('nova.virt.xenapi.vm_utils.get_sr_path',
lambda *a, **kw: '/fake/sr/path')
self.instance = {'uuid': 'blah',
'system_metadata': [],
'auto_disk_config': True,
'os_type': 'default',
'xenapi_use_agent': 'true'}
def _get_params(self):
return {'image_id': 'fake_image_uuid',
'endpoint': 'http://localhost:9292',
'sr_path': '/fake/sr/path',
'api_version': 2,
'extra_headers': {'X-Auth-Token': 'foobar',
'X-Roles': '',
'X-Tenant-Id': 'project',
'X-User-Id': 'user',
'X-Identity-Status': 'Confirmed'}}
def _get_download_params(self):
params = self._get_params()
params['uuid_stack'] = ['uuid1']
return params
@mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
def test_download_image(self, mock_make_uuid_stack):
params = self._get_download_params()
with mock.patch.object(self.session, 'call_plugin_serialized'
) as mock_call_plugin:
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
mock_call_plugin.assert_called_once_with('glance.py',
'download_vhd2',
**params)
mock_make_uuid_stack.assert_called_once_with()
@mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
@mock.patch.object(random, 'shuffle')
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_download_image_retry(self, mock_fault, mock_sleep,
mock_shuffle, mock_make_uuid_stack):
params = self._get_download_params()
self.flags(num_retries=2, group='glance')
params.pop("endpoint")
calls = [mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.1.1:9292',
**params),
mock.call('glance.py', 'download_vhd2',
endpoint='http://10.0.0.1:9293',
**params)]
glance_api_servers = ['http://10.0.1.1:9292',
'http://10.0.0.1:9293']
self.flags(api_servers=glance_api_servers, group='glance')
with (mock.patch.object(self.session, 'call_plugin_serialized')
) as mock_call_plugin_serialized:
error_details = ["", "", "RetryableError", ""]
error = self.session.XenAPI.Failure(details=error_details)
mock_call_plugin_serialized.side_effect = [error, "success"]
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
mock_call_plugin_serialized.assert_has_calls(calls)
self.assertEqual(1, mock_fault.call_count)
def _get_upload_params(self, auto_disk_config=True,
expected_os_type='default'):
params = {}
params['vdi_uuids'] = ['fake_vdi_uuid']
params['properties'] = {'auto_disk_config': auto_disk_config,
'os_type': expected_os_type}
return params
@mock.patch.object(utils, 'get_auto_disk_config_from_instance')
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image(self, mock_upload, mock_sr_path, mock_extra_header,
mock_disk_config):
params = self._get_upload_params()
mock_upload.return_value = 'fake_upload'
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_disk_config.return_value = 'true'
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
@mock.patch.object(utils, 'get_auto_disk_config_from_instance')
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_None_os_type(self, mock_upload, mock_sr_path,
mock_extra_header, mock_disk_config):
self.instance['os_type'] = None
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_upload.return_value = 'fake_upload'
mock_disk_config.return_value = 'true'
params = self._get_upload_params(True, 'linux')
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
mock_disk_config.assert_called_once_with(self.instance)
@mock.patch.object(utils, 'get_auto_disk_config_from_instance')
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_no_os_type(self, mock_upload, mock_sr_path,
mock_extra_header, mock_disk_config):
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_upload.return_value = 'fake_upload'
del self.instance['os_type']
params = self._get_upload_params(True, 'linux')
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
mock_disk_config.assert_called_once_with(self.instance)
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_auto_config_disk_disabled(
self, mock_upload, mock_sr_path, mock_extra_header):
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_upload.return_value = 'fake_upload'
sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
self.instance["system_metadata"] = sys_meta
params = self._get_upload_params("disabled")
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_raises_exception(self, mock_upload, mock_sr_path,
mock_extra_header):
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_upload.side_effect = RuntimeError
params = self._get_upload_params()
self.assertRaises(RuntimeError, self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_upload_image_retries_then_raises_exception(self,
mock_add_inst,
mock_time_sleep):
self.flags(num_retries=2, group='glance')
params = self._get_params()
params.update(self._get_upload_params())
error_details = ["", "", "RetryableError", ""]
error = XenAPI.Failure(details=error_details)
with mock.patch.object(self.session, 'call_plugin_serialized',
side_effect=error) as mock_call_plugin:
self.assertRaises(exception.CouldNotUploadImage,
self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
time_sleep_args = [mock.call(0.5), mock.call(1)]
call_plugin_args = [
mock.call('glance.py', 'upload_vhd2', **params),
mock.call('glance.py', 'upload_vhd2', **params),
mock.call('glance.py', 'upload_vhd2', **params)]
add_inst_args = [
mock.call(self.context, self.instance, error,
(XenAPI.Failure, error, mock.ANY)),
mock.call(self.context, self.instance, error,
(XenAPI.Failure, error, mock.ANY)),
mock.call(self.context, self.instance, error,
(XenAPI.Failure, error, mock.ANY))]
mock_time_sleep.assert_has_calls(time_sleep_args)
mock_call_plugin.assert_has_calls(call_plugin_args)
mock_add_inst.assert_has_calls(add_inst_args)
@mock.patch.object(time, 'sleep')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_upload_image_retries_on_signal_exception(self,
mock_add_inst,
mock_time_sleep):
self.flags(num_retries=2, group='glance')
params = self._get_params()
params.update(self._get_upload_params())
error_details = ["", "task signaled", "", ""]
error = XenAPI.Failure(details=error_details)
# Note(johngarbutt) XenServer 6.1 and later has this error
error_details_v61 = ["", "signal: SIGTERM", "", ""]
error_v61 = self.session.XenAPI.Failure(details=error_details_v61)
with mock.patch.object(self.session, 'call_plugin_serialized',
side_effect=[error, error_v61, None]
) as mock_call_plugin:
self.store.upload_image(self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
time_sleep_args = [mock.call(0.5), mock.call(1)]
call_plugin_args = [
mock.call('glance.py', 'upload_vhd2', **params),
mock.call('glance.py', 'upload_vhd2', **params),
mock.call('glance.py', 'upload_vhd2', **params)]
add_inst_args = [
mock.call(self.context, self.instance, error,
(XenAPI.Failure, error, mock.ANY)),
mock.call(self.context, self.instance, error_v61,
(XenAPI.Failure, error_v61, mock.ANY))]
mock_time_sleep.assert_has_calls(time_sleep_args)
mock_call_plugin.assert_has_calls(call_plugin_args)
mock_add_inst.assert_has_calls(add_inst_args)
@mock.patch.object(utils, 'get_auto_disk_config_from_instance')
@mock.patch.object(common_glance, 'generate_identity_headers')
@mock.patch.object(vm_utils, 'get_sr_path')
@mock.patch.object(host_glance, 'upload_vhd')
def test_upload_image_raises_exception_image_not_found(self,
mock_upload,
mock_sr_path,
mock_extra_header,
mock_disk_config):
params = self._get_upload_params()
mock_upload.return_value = 'fake_upload'
mock_sr_path.return_value = 'fake_sr_path'
mock_extra_header.return_value = 'fake_extra_header'
mock_disk_config.return_value = 'true'
image_id = 'fake_image_id'
mock_upload.side_effect = xenapi_exception.PluginImageNotFound(
image_id=image_id
)
self.assertRaises(exception.ImageNotFound, self.store.upload_image,
self.context, self.session, self.instance,
'fake_image_uuid', ['fake_vdi_uuid'])
mock_sr_path.assert_called_once_with(self.session)
mock_extra_header.assert_called_once_with(self.context)
mock_upload.assert_called_once_with(
self.session, 3, mock.ANY, mock.ANY, 'fake_image_uuid',
'fake_sr_path', 'fake_extra_header', **params)

View File

@ -1,244 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tarfile
import mock
from nova import test
from nova.virt.xenapi.image import utils
@mock.patch.object(utils, 'IMAGE_API')
class GlanceImageTestCase(test.NoDBTestCase):
def _get_image(self):
return utils.GlanceImage(mock.sentinel.context,
mock.sentinel.image_ref)
def test_meta(self, mocked):
mocked.get.return_value = mock.sentinel.meta
image = self._get_image()
self.assertEqual(mock.sentinel.meta, image.meta)
mocked.get.assert_called_once_with(mock.sentinel.context,
mock.sentinel.image_ref)
def test_download_to(self, mocked):
mocked.download.return_value = None
image = self._get_image()
result = image.download_to(mock.sentinel.fobj)
self.assertIsNone(result)
mocked.download.assert_called_once_with(mock.sentinel.context,
mock.sentinel.image_ref,
mock.sentinel.fobj)
def test_is_raw_tgz_empty_meta(self, mocked):
mocked.get.return_value = {}
image = self._get_image()
self.assertFalse(image.is_raw_tgz())
def test_is_raw_tgz_for_raw_tgz(self, mocked):
mocked.get.return_value = {
'disk_format': 'raw',
'container_format': 'tgz'
}
image = self._get_image()
self.assertTrue(image.is_raw_tgz())
def test_data(self, mocked):
mocked.download.return_value = mock.sentinel.image
image = self._get_image()
self.assertEqual(mock.sentinel.image, image.data())
class RawImageTestCase(test.NoDBTestCase):
@mock.patch.object(utils, 'GlanceImage', spec_set=True, autospec=True)
def test_get_size(self, mock_glance_image):
mock_glance_image.meta = {'size': '123'}
raw_image = utils.RawImage(mock_glance_image)
self.assertEqual(123, raw_image.get_size())
@mock.patch.object(utils, 'GlanceImage', spec_set=True, autospec=True)
def test_stream_to(self, mock_glance_image):
mock_glance_image.download_to.return_value = 'result'
raw_image = utils.RawImage(mock_glance_image)
self.assertEqual('result', raw_image.stream_to('file'))
mock_glance_image.download_to.assert_called_once_with('file')
class TestIterableBasedFile(test.NoDBTestCase):
def test_constructor(self):
class FakeIterable(object):
def __iter__(_self):
return 'iterator'
the_file = utils.IterableToFileAdapter(FakeIterable())
self.assertEqual('iterator', the_file.iterator)
def test_read_one_character(self):
the_file = utils.IterableToFileAdapter([
'chunk1', 'chunk2'
])
self.assertEqual('c', the_file.read(1))
def test_read_stores_remaining_characters(self):
the_file = utils.IterableToFileAdapter([
'chunk1', 'chunk2'
])
the_file.read(1)
self.assertEqual('hunk1', the_file.remaining_data)
def test_read_remaining_characters(self):
the_file = utils.IterableToFileAdapter([
'chunk1', 'chunk2'
])
self.assertEqual('c', the_file.read(1))
self.assertEqual('h', the_file.read(1))
def test_read_reached_end_of_file(self):
the_file = utils.IterableToFileAdapter([
'chunk1', 'chunk2'
])
self.assertEqual('chunk1', the_file.read(100))
self.assertEqual('chunk2', the_file.read(100))
self.assertEqual('', the_file.read(100))
def test_empty_chunks(self):
the_file = utils.IterableToFileAdapter([
'', '', 'chunk2'
])
self.assertEqual('chunk2', the_file.read(100))
class RawTGZTestCase(test.NoDBTestCase):
@mock.patch.object(utils.RawTGZImage, '_as_file', return_value='the_file')
@mock.patch.object(utils.tarfile, 'open', return_value='tf')
def test_as_tarfile(self, mock_open, mock_as_file):
image = utils.RawTGZImage(None)
result = image._as_tarfile()
self.assertEqual('tf', result)
mock_as_file.assert_called_once_with()
mock_open.assert_called_once_with(mode='r|gz', fileobj='the_file')
@mock.patch.object(utils, 'GlanceImage', spec_set=True, autospec=True)
@mock.patch.object(utils, 'IterableToFileAdapter',
return_value='data-as-file')
def test_as_file(self, mock_adapter, mock_glance_image):
mock_glance_image.data.return_value = 'iterable-data'
image = utils.RawTGZImage(mock_glance_image)
result = image._as_file()
self.assertEqual('data-as-file', result)
mock_glance_image.data.assert_called_once_with()
mock_adapter.assert_called_once_with('iterable-data')
@mock.patch.object(tarfile, 'TarFile', spec_set=True, autospec=True)
@mock.patch.object(tarfile, 'TarInfo', autospec=True)
@mock.patch.object(utils.RawTGZImage, '_as_tarfile')
def test_get_size(self, mock_as_tar, mock_tar_info, mock_tar_file):
mock_tar_file.next.return_value = mock_tar_info
mock_tar_info.size = 124
mock_as_tar.return_value = mock_tar_file
image = utils.RawTGZImage(None)
result = image.get_size()
self.assertEqual(124, result)
self.assertEqual(image._tar_info, mock_tar_info)
self.assertEqual(image._tar_file, mock_tar_file)
mock_as_tar.assert_called_once_with()
mock_tar_file.next.assert_called_once_with()
@mock.patch.object(tarfile, 'TarFile', spec_set=True, autospec=True)
@mock.patch.object(tarfile, 'TarInfo', autospec=True)
@mock.patch.object(utils.RawTGZImage, '_as_tarfile')
def test_get_size_called_twice(self, mock_as_tar, mock_tar_info,
mock_tar_file):
mock_tar_file.next.return_value = mock_tar_info
mock_tar_info.size = 124
mock_as_tar.return_value = mock_tar_file
image = utils.RawTGZImage(None)
image.get_size()
result = image.get_size()
self.assertEqual(124, result)
self.assertEqual(image._tar_info, mock_tar_info)
self.assertEqual(image._tar_file, mock_tar_file)
mock_as_tar.assert_called_once_with()
mock_tar_file.next.assert_called_once_with()
@mock.patch.object(tarfile, 'TarFile', spec_set=True, autospec=True)
@mock.patch.object(tarfile, 'TarInfo', spec_set=True, autospec=True)
@mock.patch.object(utils.RawTGZImage, '_as_tarfile')
@mock.patch.object(utils.shutil, 'copyfileobj')
def test_stream_to_without_size_retrieved(self, mock_copyfile,
mock_as_tar, mock_tar_info,
mock_tar_file):
target_file = mock.create_autospec(open)
source_file = mock.create_autospec(open)
mock_tar_file.next.return_value = mock_tar_info
mock_tar_file.extractfile.return_value = source_file
mock_as_tar.return_value = mock_tar_file
image = utils.RawTGZImage(None)
image._image_service_and_image_id = ('service', 'id')
image.stream_to(target_file)
mock_as_tar.assert_called_once_with()
mock_tar_file.next.assert_called_once_with()
mock_tar_file.extractfile.assert_called_once_with(mock_tar_info)
mock_copyfile.assert_called_once_with(
source_file, target_file)
mock_tar_file.close.assert_called_once_with()
@mock.patch.object(tarfile, 'TarFile', spec_set=True, autospec=True)
@mock.patch.object(tarfile, 'TarInfo', autospec=True)
@mock.patch.object(utils.RawTGZImage, '_as_tarfile')
@mock.patch.object(utils.shutil, 'copyfileobj')
def test_stream_to_with_size_retrieved(self, mock_copyfile,
mock_as_tar, mock_tar_info,
mock_tar_file):
target_file = mock.create_autospec(open)
source_file = mock.create_autospec(open)
mock_tar_info.size = 124
mock_tar_file.next.return_value = mock_tar_info
mock_tar_file.extractfile.return_value = source_file
mock_as_tar.return_value = mock_tar_file
image = utils.RawTGZImage(None)
image._image_service_and_image_id = ('service', 'id')
image.get_size()
image.stream_to(target_file)
mock_as_tar.assert_called_once_with()
mock_tar_file.next.assert_called_once_with()
mock_tar_file.extractfile.assert_called_once_with(mock_tar_info)
mock_copyfile.assert_called_once_with(
source_file, target_file)
mock_tar_file.close.assert_called_once_with()

View File

@ -1,149 +0,0 @@
# Copyright 2017 Citrix System
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_xenapi.client import exception as xenapi_except
from os_xenapi.client import image
from nova import context
from nova import exception
from nova.image.glance import API as image_api
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi.image import utils
from nova.virt.xenapi.image import vdi_stream
from nova.virt.xenapi import vm_utils
class TestVdiStreamStore(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(TestVdiStreamStore, self).setUp()
self.store = vdi_stream.VdiStreamStore()
self.flags(connection_url='test_url',
image_compression_level=5,
group='xenserver')
self.session = mock.Mock()
self.context = context.RequestContext(
'user', 'project', auth_token='foobar')
self.instance = {'uuid': 'e6ad57c9-115e-4b7d-a872-63cea0ac3cf2',
'system_metadata': [],
'auto_disk_config': True,
'os_type': 'default',
'xenapi_use_agent': 'true'}
@mock.patch.object(image_api, 'download',
return_value='fake_data')
@mock.patch.object(utils, 'IterableToFileAdapter',
return_value='fake_stream')
@mock.patch.object(vm_utils, 'safe_find_sr',
return_value='fake_sr_ref')
@mock.patch.object(image, 'stream_to_vdis')
def test_download_image(self, stream_to, find_sr, to_file, download):
self.store.download_image(self.context, self.session,
self.instance, 'fake_image_uuid')
download.assert_called_once_with(self.context, 'fake_image_uuid')
to_file.assert_called_once_with('fake_data')
find_sr.assert_called_once_with(self.session)
stream_to.assert_called_once_with(self.context, self.session,
self.instance, 'test_url',
'fake_sr_ref', 'fake_stream')
@mock.patch.object(image_api, 'download',
return_value='fake_data')
@mock.patch.object(utils, 'IterableToFileAdapter',
return_value='fake_stream')
@mock.patch.object(vm_utils, 'safe_find_sr',
return_value='fake_sr_ref')
@mock.patch.object(image, 'stream_to_vdis',
side_effect=xenapi_except.OsXenApiException)
def test_download_image_exception(self, stream_to, find_sr, to_file,
download):
self.assertRaises(exception.CouldNotFetchImage,
self.store.download_image,
self.context, self.session,
self.instance, 'fake_image_uuid')
@mock.patch.object(vdi_stream.VdiStreamStore, '_get_metadata',
return_value='fake_meta_data')
@mock.patch.object(image, 'stream_from_vdis',
return_value='fake_data')
@mock.patch.object(utils, 'IterableToFileAdapter',
return_value='fake_stream')
@mock.patch.object(image_api, 'update')
def test_upload_image(self, update, to_file, to_stream, get):
fake_vdi_uuids = ['fake-vdi-uuid']
self.store.upload_image(self.context, self.session,
self.instance, 'fake_image_uuid',
fake_vdi_uuids)
get.assert_called_once_with(self.context, self.instance,
'fake_image_uuid')
to_stream.assert_called_once_with(self.context, self.session,
self.instance, 'test_url',
fake_vdi_uuids, compresslevel=5)
to_file.assert_called_once_with('fake_data')
update.assert_called_once_with(self.context, 'fake_image_uuid',
'fake_meta_data', data='fake_stream')
@mock.patch.object(vdi_stream.VdiStreamStore, '_get_metadata')
@mock.patch.object(image, 'stream_from_vdis',
side_effect=xenapi_except.OsXenApiException)
@mock.patch.object(utils, 'IterableToFileAdapter',
return_value='fake_stream')
@mock.patch.object(image_api, 'update')
def test_upload_image_exception(self, update, to_file, to_stream, get):
fake_vdi_uuids = ['fake-vdi-uuid']
self.assertRaises(exception.CouldNotUploadImage,
self.store.upload_image,
self.context, self.session,
self.instance, 'fake_image_uuid',
fake_vdi_uuids)
@mock.patch.object(image_api, 'get',
return_value={})
def test_get_metadata(self, image_get):
expect_metadata = {'disk_format': 'vhd',
'container_format': 'ovf',
'auto_disk_config': 'True',
'os_type': 'default',
'size': 0}
result = self.store._get_metadata(self.context, self.instance,
'fake_image_uuid')
self.assertEqual(result, expect_metadata)
@mock.patch.object(image_api, 'get',
return_value={})
def test_get_metadata_disabled(self, image_get):
# Verify the metadata contains auto_disk_config=disabled, when
# image_auto_disk_config is ""Disabled".
self.instance['system_metadata'] = [
{"key": "image_auto_disk_config",
"value": "Disabled"}]
expect_metadata = {'disk_format': 'vhd',
'container_format': 'ovf',
'auto_disk_config': 'disabled',
'os_type': 'default',
'size': 0}
result = self.store._get_metadata(self.context, self.instance,
'fake_image_uuid')
self.assertEqual(result, expect_metadata)

View File

@ -1,204 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import tarfile
import eventlet
import mock
from os_xenapi.client import session as xenapi_session
import six
from nova.image import glance
from nova import test
from nova.virt.xenapi.image import vdi_through_dev
@contextlib.contextmanager
def fake_context(result=None):
yield result
class TestDelegatingToCommand(test.NoDBTestCase):
def test_upload_image_is_delegated_to_command(self):
command = mock.create_autospec(vdi_through_dev.UploadToGlanceAsRawTgz,
spec_set=True)
command.upload_image.return_value = 'result'
with mock.patch.object(vdi_through_dev, 'UploadToGlanceAsRawTgz',
return_value=command) as mock_upload:
store = vdi_through_dev.VdiThroughDevStore()
result = store.upload_image(
'ctx', 'session', 'instance', 'image_id', 'vdis')
self.assertEqual('result', result)
mock_upload.assert_called_once_with(
'ctx', 'session', 'instance', 'image_id', 'vdis')
command.upload_image.assert_called_once_with()
class TestUploadToGlanceAsRawTgz(test.NoDBTestCase):
@mock.patch.object(vdi_through_dev.vm_utils, 'vdi_attached')
@mock.patch.object(vdi_through_dev.utils, 'make_dev_path')
@mock.patch.object(vdi_through_dev.utils, 'temporary_chown')
def test_upload_image(self, mock_vdi_temp_chown,
mock_vdi_make_dev_path, mock_vdi_attached):
mock_vdi_attached.return_value = fake_context('dev')
mock_vdi_make_dev_path.return_value = 'devpath'
mock_vdi_temp_chown.return_value = fake_context()
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
with test.nested(
mock.patch.object(store, '_perform_upload'),
mock.patch.object(store, '_get_vdi_ref',
return_value='vdi_ref'),
) as (mock_upload, mock_get_vdi):
store.upload_image()
mock_get_vdi.assert_called_once_with()
mock_upload.assert_called_once_with('devpath')
mock_vdi_attached.assert_called_once_with(
'session', 'vdi_ref', read_only=True)
mock_vdi_make_dev_path.assert_called_once_with('dev')
mock_vdi_temp_chown.assert_called_once_with('devpath')
def test__perform_upload(self):
producer = mock.create_autospec(vdi_through_dev.TarGzProducer,
spec_set=True)
consumer = mock.create_autospec(glance.UpdateGlanceImage,
spec_set=True)
pool = mock.create_autospec(eventlet.GreenPool,
spec_set=True)
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
with test.nested(
mock.patch.object(store, '_create_pipe',
return_value=('readfile', 'writefile')),
mock.patch.object(store, '_get_virtual_size',
return_value='324'),
mock.patch.object(glance, 'UpdateGlanceImage',
return_value=consumer),
mock.patch.object(vdi_through_dev, 'TarGzProducer',
return_value=producer),
mock.patch.object(vdi_through_dev.eventlet, 'GreenPool',
return_value=pool)
) as (mock_create_pipe, mock_virtual_size,
mock_upload, mock_TarGzProducer, mock_greenpool):
producer.get_metadata.return_value = "metadata"
store._perform_upload('devpath')
producer.get_metadata.assert_called_once_with()
mock_virtual_size.assert_called_once_with()
mock_create_pipe.assert_called_once_with()
mock_TarGzProducer.assert_called_once_with(
'devpath', 'writefile', '324', 'disk.raw')
mock_upload.assert_called_once_with(
'context', 'id', 'metadata', 'readfile')
mock_greenpool.assert_called_once_with()
pool.spawn.assert_has_calls([mock.call(producer.start),
mock.call(consumer.start)])
pool.waitall.assert_called_once_with()
def test__get_vdi_ref(self):
session = mock.create_autospec(xenapi_session.XenAPISession,
spec_set=True)
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
session.call_xenapi.return_value = 'vdi_ref'
self.assertEqual('vdi_ref', store._get_vdi_ref())
session.call_xenapi.assert_called_once_with(
'VDI.get_by_uuid', 'vdi0')
def test__get_virtual_size(self):
session = mock.create_autospec(xenapi_session.XenAPISession,
spec_set=True)
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', session, 'instance', 'id', ['vdi0', 'vdi1'])
with mock.patch.object(store, '_get_vdi_ref',
return_value='vdi_ref') as mock_get_vdi:
store._get_virtual_size()
mock_get_vdi.assert_called_once_with()
session.call_xenapi.assert_called_once_with(
'VDI.get_virtual_size', 'vdi_ref')
@mock.patch.object(vdi_through_dev.os, 'pipe')
@mock.patch.object(vdi_through_dev.greenio, 'GreenPipe')
def test__create_pipe(self, mock_vdi_greenpipe, mock_vdi_os_pipe):
store = vdi_through_dev.UploadToGlanceAsRawTgz(
'context', 'session', 'instance', 'id', ['vdi0', 'vdi1'])
mock_vdi_os_pipe.return_value = ('rpipe', 'wpipe')
mock_vdi_greenpipe.side_effect = ['rfile', 'wfile']
result = store._create_pipe()
self.assertEqual(('rfile', 'wfile'), result)
mock_vdi_os_pipe.assert_called_once_with()
mock_vdi_greenpipe.assert_has_calls(
[mock.call('rpipe', 'rb', 0),
mock.call('wpipe', 'wb', 0)])
class TestTarGzProducer(test.NoDBTestCase):
def test_constructor(self):
producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
'100', 'fname')
self.assertEqual('devpath', producer.fpath)
self.assertEqual('writefile', producer.output)
self.assertEqual('100', producer.size)
self.assertEqual('writefile', producer.output)
@mock.patch.object(vdi_through_dev.tarfile, 'TarInfo')
@mock.patch.object(vdi_through_dev.tarfile, 'open')
def test_start(self, mock_tar_open, mock_tar_TarInfo):
outf = six.StringIO()
producer = vdi_through_dev.TarGzProducer('fpath', outf,
'100', 'fname')
tfile = mock.create_autospec(tarfile.TarFile, spec_set=True)
tinfo = mock.create_autospec(tarfile.TarInfo)
inf = mock.create_autospec(open, spec_set=True)
mock_tar_open.return_value = fake_context(tfile)
mock_tar_TarInfo.return_value = tinfo
with mock.patch.object(producer, '_open_file',
return_value=fake_context(inf)
) as mock_open_file:
producer.start()
self.assertEqual(100, tinfo.size)
mock_tar_TarInfo.assert_called_once_with(name='fname')
mock_tar_open.assert_called_once_with(fileobj=outf, mode='w|gz')
mock_open_file.assert_called_once_with('fpath', 'rb')
tfile.addfile.assert_called_once_with(tinfo, fileobj=inf)
def test_get_metadata(self):
producer = vdi_through_dev.TarGzProducer('devpath', 'writefile',
'100', 'fname')
self.assertEqual({
'disk_format': 'raw',
'container_format': 'tgz'},
producer.get_metadata())

View File

@ -1,180 +0,0 @@
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite."""
import pickle
import random
import sys
import fixtures
import mock
from os_xenapi.client import session
from os_xenapi.client import XenAPI
from nova import test
from nova.virt.xenapi import fake
def stubout_session(test, cls, product_version=(5, 6, 2),
product_brand='XenServer', platform_version=(1, 9, 0),
**opt_args):
"""Stubs out methods from XenAPISession."""
test.stub_out('os_xenapi.client.session.XenAPISession._create_session',
lambda s, url: cls(url, **opt_args))
test.stub_out('os_xenapi.client.session.XenAPISession.'
'_get_product_version_and_brand',
lambda s: (product_version, product_brand))
test.stub_out('os_xenapi.client.session.XenAPISession.'
'_get_platform_version',
lambda s: platform_version)
def _make_fake_vdi():
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', sr_ref)
vdi_rec = fake.get_record('VDI', vdi_ref)
return vdi_rec['uuid']
class FakeSessionForVMTests(fake.SessionBase):
"""Stubs out a XenAPISession for VM tests."""
def host_call_plugin(self, _1, _2, plugin, method, _5):
plugin = plugin.rstrip('.py')
if plugin == 'glance' and method == 'download_vhd2':
root_uuid = _make_fake_vdi()
return pickle.dumps(dict(root=dict(uuid=root_uuid)))
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, _5))
def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref)
if vm['power_state'] != 'Halted':
raise XenAPI.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
vm['power_state']])
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
vm['domid'] = random.randrange(1, 1 << 16)
return vm
def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
vm_rec = self.VM_start(_1, vm_ref, _2, _3)
vm_rec['resident_on'] = host_ref
def VDI_snapshot(self, session_ref, vm_ref, _1):
sr_ref = "fakesr"
return fake.create_vdi('fakelabel', sr_ref, read_only=True)
def SR_scan(self, session_ref, sr_ref):
pass
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
def __init__(self, name, new_value):
self.name = name
self.new_value = new_value
def _restore(self, old_value):
sys.modules[self.name] = old_value
def setUp(self):
super(ReplaceModule, self).setUp()
old_value = sys.modules.get(self.name)
sys.modules[self.name] = self.new_value
self.addCleanup(self._restore, old_value)
class FakeSessionForVolumeTests(fake.SessionBase):
"""Stubs out a XenAPISession for Volume tests."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
refs = fake.get_all('VDI')
for ref in refs:
rec = fake.get_record('VDI', ref)
if rec['uuid'] == uuid:
valid_vdi = True
if not valid_vdi:
raise XenAPI.Failure([['INVALID_VDI', 'session', self._session]])
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
"""Stubs out a XenAPISession for Volume tests: it injects failures."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
raise XenAPI.Failure([['INVALID_VDI', 'session', self._session]])
def PBD_unplug(self, _1, ref):
rec = fake.get_record('PBD', ref)
rec['currently-attached'] = False
def SR_forget(self, _1, ref):
pass
class FakeSessionForFailedMigrateTests(FakeSessionForVMTests):
def VM_assert_can_migrate(self, session, vmref, migrate_data,
live, vdi_map, vif_map, options):
raise XenAPI.Failure("XenAPI VM.assert_can_migrate failed")
def host_migrate_receive(self, session, hostref, networkref, options):
raise XenAPI.Failure("XenAPI host.migrate_receive failed")
def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map,
vif_map, options):
raise XenAPI.Failure("XenAPI VM.migrate_send failed")
# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted
# over to use XenAPITestBaseNoDB
class XenAPITestBase(test.TestCase):
def setUp(self):
super(XenAPITestBase, self).setUp()
self.useFixture(ReplaceModule('XenAPI', fake))
fake.reset()
def stubout_get_this_vm_uuid(self):
def f(session):
vms = [rec['uuid'] for rec
in fake.get_all_records('VM').values()
if rec['is_control_domain']]
return vms[0]
self.stub_out('nova.virt.xenapi.vm_utils.get_this_vm_uuid', f)
class XenAPITestBaseNoDB(test.NoDBTestCase):
def setUp(self):
super(XenAPITestBaseNoDB, self).setUp()
self.useFixture(ReplaceModule('XenAPI', fake))
fake.reset()
@staticmethod
def get_fake_session(error=None):
fake_session = mock.MagicMock()
session.apply_session_helpers(fake_session)
if error is not None:
class FakeException(Exception):
details = [error, "a", "b", "c"]
fake_session.XenAPI.Failure = FakeException
fake_session.call_xenapi.side_effect = FakeException
return fake_session

View File

@ -1,471 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import time
import mock
from os_xenapi.client import host_agent
from os_xenapi.client import XenAPI
from oslo_concurrency import processutils
from oslo_utils import uuidutils
from nova import exception
from nova import test
from nova.virt.xenapi import agent
def _get_fake_instance(**kwargs):
system_metadata = []
for k, v in kwargs.items():
system_metadata.append({
"key": k,
"value": v
})
return {
"system_metadata": system_metadata,
"uuid": "uuid",
"key_data": "ssh-rsa asdf",
"os_type": "asdf",
}
class AgentTestCaseBase(test.NoDBTestCase):
def _create_agent(self, instance, session="session"):
self.session = session
self.virtapi = "virtapi"
self.vm_ref = "vm_ref"
return agent.XenAPIBasedAgent(self.session, self.virtapi,
instance, self.vm_ref)
class AgentImageFlagsTestCase(AgentTestCaseBase):
def test_agent_is_present(self):
self.flags(use_agent_default=False, group='xenserver')
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "true"}]}
self.assertTrue(agent.should_use_agent(instance))
def test_agent_is_disabled(self):
self.flags(use_agent_default=True, group='xenserver')
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "false"}]}
self.assertFalse(agent.should_use_agent(instance))
def test_agent_uses_deafault_when_prop_invalid(self):
self.flags(use_agent_default=True, group='xenserver')
instance = {"system_metadata":
[{"key": "image_xenapi_use_agent", "value": "bob"}],
"uuid": "uuid"}
self.assertTrue(agent.should_use_agent(instance))
def test_agent_default_not_present(self):
self.flags(use_agent_default=False, group='xenserver')
instance = {"system_metadata": []}
self.assertFalse(agent.should_use_agent(instance))
def test_agent_default_present(self):
self.flags(use_agent_default=True, group='xenserver')
instance = {"system_metadata": []}
self.assertTrue(agent.should_use_agent(instance))
class SysMetaKeyTestBase(object):
key = None
def _create_agent_with_value(self, value):
kwargs = {self.key: value}
instance = _get_fake_instance(**kwargs)
return self._create_agent(instance)
def test_get_sys_meta_key_true(self):
agent = self._create_agent_with_value("true")
self.assertTrue(agent._get_sys_meta_key(self.key))
def test_get_sys_meta_key_false(self):
agent = self._create_agent_with_value("False")
self.assertFalse(agent._get_sys_meta_key(self.key))
def test_get_sys_meta_key_invalid_is_false(self):
agent = self._create_agent_with_value("invalid")
self.assertFalse(agent._get_sys_meta_key(self.key))
def test_get_sys_meta_key_missing_is_false(self):
instance = _get_fake_instance()
agent = self._create_agent(instance)
self.assertFalse(agent._get_sys_meta_key(self.key))
class SkipSshFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
key = "image_xenapi_skip_agent_inject_ssh"
def test_skip_ssh_key_inject(self):
agent = self._create_agent_with_value("True")
self.assertTrue(agent._skip_ssh_key_inject())
class SkipFileInjectAtBootFlagTestCase(SysMetaKeyTestBase, AgentTestCaseBase):
key = "image_xenapi_skip_agent_inject_files_at_boot"
def test_skip_inject_files_at_boot(self):
agent = self._create_agent_with_value("True")
self.assertTrue(agent._skip_inject_files_at_boot())
class InjectSshTestCase(AgentTestCaseBase):
@mock.patch.object(agent.XenAPIBasedAgent, 'inject_file')
def test_inject_ssh_key_succeeds(self, mock_inject_file):
instance = _get_fake_instance()
agent = self._create_agent(instance)
agent.inject_ssh_key()
mock_inject_file.assert_called_once_with("/root/.ssh/authorized_keys",
"\n# The following ssh key "
"was injected by Nova"
"\nssh-rsa asdf\n")
@mock.patch.object(agent.XenAPIBasedAgent, 'inject_file')
def _test_inject_ssh_key_skipped(self, instance, mock_inject_file):
agent = self._create_agent(instance)
# make sure its not called
agent.inject_ssh_key()
mock_inject_file.assert_not_called()
def test_inject_ssh_key_skipped_no_key_data(self):
instance = _get_fake_instance()
instance["key_data"] = None
self._test_inject_ssh_key_skipped(instance)
def test_inject_ssh_key_skipped_windows(self):
instance = _get_fake_instance()
instance["os_type"] = "windows"
self._test_inject_ssh_key_skipped(instance)
def test_inject_ssh_key_skipped_cloud_init_present(self):
instance = _get_fake_instance(
image_xenapi_skip_agent_inject_ssh="True")
self._test_inject_ssh_key_skipped(instance)
class FileInjectionTestCase(AgentTestCaseBase):
@mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
def test_inject_file(self, mock_call_agent):
instance = _get_fake_instance()
agent = self._create_agent(instance)
b64_path = base64.b64encode(b'path')
b64_contents = base64.b64encode(b'contents')
agent.inject_file("path", "contents")
mock_call_agent.assert_called_once_with(host_agent.inject_file,
{'b64_contents': b64_contents,
'b64_path': b64_path})
@mock.patch.object(agent.XenAPIBasedAgent, 'inject_file')
def test_inject_files(self, mock_inject_file):
instance = _get_fake_instance()
agent = self._create_agent(instance)
files = [("path1", "content1"), ("path2", "content2")]
agent.inject_files(files)
mock_inject_file.assert_has_calls(
[mock.call("path1", "content1"), mock.call("path2", "content2")])
@mock.patch.object(agent.XenAPIBasedAgent, 'inject_file')
def test_inject_files_skipped_when_cloud_init_installed(self,
mock_inject_file):
instance = _get_fake_instance(
image_xenapi_skip_agent_inject_files_at_boot="True")
agent = self._create_agent(instance)
files = [("path1", "content1"), ("path2", "content2")]
agent.inject_files(files)
mock_inject_file.assert_not_called()
class RebootRetryTestCase(AgentTestCaseBase):
@mock.patch.object(agent, '_wait_for_new_dom_id')
def test_retry_on_reboot(self, mock_wait):
mock_session = mock.Mock()
mock_session.VM.get_domid.return_value = "fake_dom_id"
agent = self._create_agent(None, mock_session)
mock_method = mock.Mock().method()
mock_method.side_effect = [XenAPI.Failure(["REBOOT: fake"]),
{"returncode": '0', "message": "done"}]
result = agent._call_agent(mock_method)
self.assertEqual("done", result)
self.assertTrue(mock_session.VM.get_domid.called)
self.assertEqual(2, mock_method.call_count)
mock_wait.assert_called_once_with(mock_session, self.vm_ref,
"fake_dom_id", mock_method)
@mock.patch.object(time, 'sleep')
@mock.patch.object(time, 'time')
def test_wait_for_new_dom_id_found(self, mock_time, mock_sleep):
mock_session = mock.Mock()
mock_session.VM.get_domid.return_value = "new"
agent._wait_for_new_dom_id(mock_session, "vm_ref", "old", "method")
mock_session.VM.get_domid.assert_called_once_with("vm_ref")
self.assertFalse(mock_sleep.called)
@mock.patch.object(time, 'sleep')
@mock.patch.object(time, 'time')
def test_wait_for_new_dom_id_after_retry(self, mock_time, mock_sleep):
self.flags(agent_timeout=3, group="xenserver")
mock_time.return_value = 0
mock_session = mock.Mock()
old = "40"
new = "42"
mock_session.VM.get_domid.side_effect = [old, "-1", new]
agent._wait_for_new_dom_id(mock_session, "vm_ref", old, "method")
mock_session.VM.get_domid.assert_called_with("vm_ref")
self.assertEqual(3, mock_session.VM.get_domid.call_count)
self.assertEqual(2, mock_sleep.call_count)
@mock.patch.object(time, 'sleep')
@mock.patch.object(time, 'time')
def test_wait_for_new_dom_id_timeout(self, mock_time, mock_sleep):
self.flags(agent_timeout=3, group="xenserver")
def fake_time():
fake_time.time = fake_time.time + 1
return fake_time.time
fake_time.time = 0
mock_time.side_effect = fake_time
mock_session = mock.Mock()
mock_session.VM.get_domid.return_value = "old"
mock_method = mock.Mock().method()
mock_method.__name__ = "mock_method"
self.assertRaises(exception.AgentTimeout,
agent._wait_for_new_dom_id,
mock_session, "vm_ref", "old", mock_method)
self.assertEqual(4, mock_session.VM.get_domid.call_count)
class SetAdminPasswordTestCase(AgentTestCaseBase):
@mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
@mock.patch("nova.virt.xenapi.agent.SimpleDH")
def test_exchange_key_with_agent(self, mock_simple_dh, mock_call_agent):
agent = self._create_agent(None)
instance_mock = mock_simple_dh()
instance_mock.get_public.return_value = 4321
mock_call_agent.return_value = "1234"
result = agent._exchange_key_with_agent()
mock_call_agent.assert_called_once_with(host_agent.key_init,
{"pub": "4321"},
success_codes=['D0'],
ignore_errors=False)
result.compute_shared.assert_called_once_with(1234)
@mock.patch.object(agent.XenAPIBasedAgent, '_call_agent')
@mock.patch.object(agent.XenAPIBasedAgent,
'_save_instance_password_if_sshkey_present')
@mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
def test_set_admin_password_works(self, mock_exchange, mock_save,
mock_call_agent):
mock_dh = mock.Mock(spec_set=agent.SimpleDH)
mock_dh.encrypt.return_value = "enc_pass"
mock_exchange.return_value = mock_dh
agent_inst = self._create_agent(None)
agent_inst.set_admin_password("new_pass")
mock_dh.encrypt.assert_called_once_with("new_pass\n")
mock_call_agent.assert_called_once_with(host_agent.password,
{'enc_pass': 'enc_pass'})
mock_save.assert_called_once_with("new_pass")
@mock.patch.object(agent.XenAPIBasedAgent, '_add_instance_fault')
@mock.patch.object(agent.XenAPIBasedAgent, '_exchange_key_with_agent')
def test_set_admin_password_silently_fails(self, mock_exchange,
mock_add_fault):
error = exception.AgentTimeout(method="fake")
mock_exchange.side_effect = error
agent_inst = self._create_agent(None)
agent_inst.set_admin_password("new_pass")
mock_add_fault.assert_called_once_with(error, mock.ANY)
@mock.patch('oslo_concurrency.processutils.execute')
def test_run_ssl_successful(self, mock_execute):
mock_execute.return_value = ('0',
'*** WARNING : deprecated key derivation used.'
'Using -iter or -pbkdf2 would be better.')
agent.SimpleDH()._run_ssl('foo')
@mock.patch('oslo_concurrency.processutils.execute',
side_effect=processutils.ProcessExecutionError(
exit_code=1, stderr=('ERROR: Something bad happened')))
def test_run_ssl_failure(self, mock_execute):
self.assertRaises(RuntimeError, agent.SimpleDH()._run_ssl, 'foo')
class UpgradeRequiredTestCase(test.NoDBTestCase):
def test_less_than(self):
self.assertTrue(agent.is_upgrade_required('1.2.3.4', '1.2.3.5'))
def test_greater_than(self):
self.assertFalse(agent.is_upgrade_required('1.2.3.5', '1.2.3.4'))
def test_equal(self):
self.assertFalse(agent.is_upgrade_required('1.2.3.4', '1.2.3.4'))
def test_non_lexical(self):
self.assertFalse(agent.is_upgrade_required('1.2.3.10', '1.2.3.4'))
def test_length(self):
self.assertTrue(agent.is_upgrade_required('1.2.3', '1.2.3.4'))
@mock.patch.object(uuidutils, 'generate_uuid')
class CallAgentTestCase(AgentTestCaseBase):
def test_call_agent_success(self, mock_uuid):
session = mock.Mock()
instance = {"uuid": "fake"}
addl_args = {"foo": "bar"}
session.VM.get_domid.return_value = '42'
mock_uuid.return_value = 1
mock_method = mock.Mock().method()
mock_method.return_value = {'returncode': '4', 'message': "asdf\\r\\n"}
mock_method.__name__ = "mock_method"
self.assertEqual("asdf",
agent._call_agent(session, instance, "vm_ref",
mock_method, addl_args, timeout=300,
success_codes=['0', '4']))
expected_args = {}
expected_args.update(addl_args)
mock_method.assert_called_once_with(session, 1, '42', 300,
**expected_args)
session.VM.get_domid.assert_called_once_with("vm_ref")
def _call_agent_setup(self, session, mock_uuid, mock_method,
returncode='0', success_codes=None,
exception=None):
session.XenAPI.Failure = XenAPI.Failure
instance = {"uuid": "fake"}
addl_args = {"foo": "bar"}
session.VM.get_domid.return_value = "42"
mock_uuid.return_value = 1
if exception:
mock_method.side_effect = exception
else:
mock_method.return_value = {'returncode': returncode,
'message': "asdf\\r\\n"}
return agent._call_agent(session, instance, "vm_ref", mock_method,
addl_args, success_codes=success_codes)
def _assert_agent_called(self, session, mock_uuid, mock_method):
expected_args = {"foo": "bar"}
mock_uuid.assert_called_once_with()
mock_method.assert_called_once_with(session, 1, '42', 30,
**expected_args)
session.VM.get_domid.assert_called_once_with("vm_ref")
def test_call_agent_works_with_defaults(self, mock_uuid):
session = mock.Mock()
mock_method = mock.Mock().method()
mock_method.__name__ = "mock_method"
self._call_agent_setup(session, mock_uuid, mock_method)
self._assert_agent_called(session, mock_uuid, mock_method)
def test_call_agent_fails_with_timeout(self, mock_uuid):
session = mock.Mock()
mock_method = mock.Mock().method()
mock_method.__name__ = "mock_method"
self.assertRaises(exception.AgentTimeout, self._call_agent_setup,
session, mock_uuid, mock_method,
exception=XenAPI.Failure(["TIMEOUT:fake"]))
self._assert_agent_called(session, mock_uuid, mock_method)
def test_call_agent_fails_with_not_implemented(self, mock_uuid):
session = mock.Mock()
mock_method = mock.Mock().method()
mock_method.__name__ = "mock_method"
self.assertRaises(exception.AgentNotImplemented,
self._call_agent_setup,
session, mock_uuid, mock_method,
exception=XenAPI.Failure(["NOT IMPLEMENTED:"]))
self._assert_agent_called(session, mock_uuid, mock_method)
def test_call_agent_fails_with_other_error(self, mock_uuid):
session = mock.Mock()
mock_method = mock.Mock().method()
mock_method.__name__ = "mock_method"
self.assertRaises(exception.AgentError, self._call_agent_setup,
session, mock_uuid, mock_method,
exception=XenAPI.Failure(["asdf"]))
self._assert_agent_called(session, mock_uuid, mock_method)
def test_call_agent_fails_with_returned_error(self, mock_uuid):
session = mock.Mock()
mock_method = mock.Mock().method()
mock_method.__name__ = "mock_method"
self.assertRaises(exception.AgentError, self._call_agent_setup,
session, mock_uuid, mock_method, returncode='42')
self._assert_agent_called(session, mock_uuid, mock_method)
class XenAPIBasedAgent(AgentTestCaseBase):
@mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
@mock.patch.object(agent, "_call_agent")
def test_call_agent_swallows_error(self, mock_call_agent,
mock_add_instance_fault):
fake_error = exception.AgentError(method="bob")
mock_call_agent.side_effect = fake_error
instance = _get_fake_instance()
agent = self._create_agent(instance)
agent._call_agent("bob")
mock_call_agent.assert_called_once_with(agent.session, agent.instance,
agent.vm_ref, "bob", None, None, None)
mock_add_instance_fault.assert_called_once_with(fake_error, mock.ANY)
@mock.patch.object(agent.XenAPIBasedAgent, "_add_instance_fault")
@mock.patch.object(agent, "_call_agent")
def test_call_agent_throws_error(self, mock_call_agent,
mock_add_instance_fault):
fake_error = exception.AgentError(method="bob")
mock_call_agent.side_effect = fake_error
instance = _get_fake_instance()
agent = self._create_agent(instance)
self.assertRaises(exception.AgentError, agent._call_agent,
"bob", ignore_errors=False)
mock_call_agent.assert_called_once_with(agent.session, agent.instance,
agent.vm_ref, "bob", None, None, None)
self.assertFalse(mock_add_instance_fault.called)

View File

@ -1,433 +0,0 @@
# Copyright (c) 2013 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os_resource_classes as orc
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from nova.compute import provider_tree
from nova import conf
from nova import exception
from nova.objects import fields as obj_fields
from nova.tests.unit.virt.xenapi import stubs
from nova.virt import driver
from nova.virt import fake
from nova.virt import xenapi
from nova.virt.xenapi import driver as xenapi_driver
from nova.virt.xenapi import host
CONF = conf.CONF
class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Driver operations."""
def _get_driver(self):
stubs.stubout_session(self, stubs.FakeSessionForVMTests)
self.flags(connection_url='http://localhost',
connection_password='test_pass', group='xenserver')
return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
def host_stats(self, refresh=True):
return {'host_memory_total': 3 * units.Mi,
'host_memory_free_computed': 2 * units.Mi,
'disk_total': 5 * units.Gi,
'disk_used': 2 * units.Gi,
'disk_allocated': 4 * units.Gi,
'host_hostname': 'somename',
'supported_instances': obj_fields.Architecture.X86_64,
'host_cpu_info': {'cpu_count': 50},
'cpu_model': {
'vendor': 'GenuineIntel',
'model': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'topology': {
'sockets': 1,
'cores': 4,
'threads': 1,
},
'features': [
'fpu', 'de', 'tsc', 'msr', 'pae', 'mce',
'cx8', 'apic', 'sep', 'mtrr', 'mca',
'cmov', 'pat', 'clflush', 'acpi', 'mmx',
'fxsr', 'sse', 'sse2', 'ss', 'ht',
'nx', 'constant_tsc', 'nonstop_tsc',
'aperfmperf', 'pni', 'vmx', 'est', 'ssse3',
'sse4_1', 'sse4_2', 'popcnt', 'hypervisor',
'ida', 'tpr_shadow', 'vnmi', 'flexpriority',
'ept', 'vpid',
],
},
'vcpus_used': 10,
'pci_passthrough_devices': '',
'host_other-config': {'iscsi_iqn': 'someiqn'},
'vgpu_stats': {
'c8328467-badf-43d8-8e28-0e096b0f88b1':
{'uuid': '6444c6ee-3a49-42f5-bebb-606b52175e67',
'type_name': 'Intel GVT-g',
'max_heads': 1,
'total': 7,
'remaining': 7,
},
}}
def test_available_resource(self):
driver = self._get_driver()
driver._session.product_version = (6, 8, 2)
with mock.patch.object(driver.host_state, 'get_host_stats',
side_effect=self.host_stats) as mock_get:
resources = driver.get_available_resource(None)
self.assertEqual(6008002, resources['hypervisor_version'])
self.assertEqual(50, resources['vcpus'])
self.assertEqual(3, resources['memory_mb'])
self.assertEqual(5, resources['local_gb'])
self.assertEqual(10, resources['vcpus_used'])
self.assertEqual(3 - 2, resources['memory_mb_used'])
self.assertEqual(2, resources['local_gb_used'])
self.assertEqual('XenServer', resources['hypervisor_type'])
self.assertEqual('somename', resources['hypervisor_hostname'])
self.assertEqual(1, resources['disk_available_least'])
mock_get.assert_called_once_with(refresh=True)
def test_set_bootable(self):
driver = self._get_driver()
with mock.patch.object(driver._vmops,
'set_bootable') as mock_set_bootable:
driver.set_bootable('inst', True)
mock_set_bootable.assert_called_once_with('inst', True)
def test_post_interrupted_snapshot_cleanup(self):
driver = self._get_driver()
fake_vmops_cleanup = mock.Mock()
driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
driver.post_interrupted_snapshot_cleanup("context", "instance")
fake_vmops_cleanup.assert_called_once_with("context", "instance")
def test_public_api_signatures(self):
inst = self._get_driver()
self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
def test_get_volume_connector(self):
ip = '123.123.123.123'
driver = self._get_driver()
self.flags(connection_url='http://%s' % ip,
connection_password='test_pass', group='xenserver')
with mock.patch.object(driver.host_state, 'get_host_stats',
side_effect=self.host_stats) as mock_get:
connector = driver.get_volume_connector({'uuid': 'fake'})
self.assertIn('ip', connector)
self.assertEqual(connector['ip'], ip)
self.assertIn('initiator', connector)
self.assertEqual(connector['initiator'], 'someiqn')
mock_get.assert_called_once_with(refresh=True)
def test_get_block_storage_ip(self):
my_ip = '123.123.123.123'
connection_ip = '124.124.124.124'
driver = self._get_driver()
self.flags(connection_url='http://%s' % connection_ip,
group='xenserver')
self.flags(my_ip=my_ip, my_block_storage_ip=my_ip)
ip = driver._get_block_storage_ip()
self.assertEqual(connection_ip, ip)
def test_get_block_storage_ip_conf(self):
driver = self._get_driver()
my_ip = '123.123.123.123'
my_block_storage_ip = '124.124.124.124'
self.flags(my_ip=my_ip, my_block_storage_ip=my_block_storage_ip)
ip = driver._get_block_storage_ip()
self.assertEqual(my_block_storage_ip, ip)
@mock.patch.object(xenapi_driver, 'invalid_option')
@mock.patch.object(xenapi_driver.vm_utils, 'ensure_correct_host')
def test_invalid_options(self, mock_ensure, mock_invalid):
driver = self._get_driver()
self.flags(independent_compute=True, group='xenserver')
self.flags(check_host=True, group='xenserver')
self.flags(flat_injected=True)
self.flags(default_ephemeral_format='vfat')
driver.init_host('host')
expected_calls = [
mock.call('CONF.xenserver.check_host', False),
mock.call('CONF.flat_injected', False),
mock.call('CONF.default_ephemeral_format', 'ext3')]
mock_invalid.assert_has_calls(expected_calls)
@mock.patch.object(xenapi_driver.vm_utils, 'cleanup_attached_vdis')
@mock.patch.object(xenapi_driver.vm_utils, 'ensure_correct_host')
def test_independent_compute_no_vdi_cleanup(self, mock_ensure,
mock_cleanup):
driver = self._get_driver()
self.flags(independent_compute=True, group='xenserver')
self.flags(check_host=False, group='xenserver')
self.flags(flat_injected=False)
driver.init_host('host')
self.assertFalse(mock_cleanup.called)
self.assertFalse(mock_ensure.called)
@mock.patch.object(xenapi_driver.vm_utils, 'cleanup_attached_vdis')
@mock.patch.object(xenapi_driver.vm_utils, 'ensure_correct_host')
def test_dependent_compute_vdi_cleanup(self, mock_ensure, mock_cleanup):
driver = self._get_driver()
self.assertFalse(mock_cleanup.called)
self.flags(independent_compute=False, group='xenserver')
self.flags(check_host=True, group='xenserver')
driver.init_host('host')
self.assertTrue(mock_cleanup.called)
self.assertTrue(mock_ensure.called)
@mock.patch.object(xenapi_driver.vmops.VMOps, 'attach_interface')
def test_attach_interface(self, mock_attach_interface):
driver = self._get_driver()
driver.attach_interface('fake_context', 'fake_instance',
'fake_image_meta', 'fake_vif')
mock_attach_interface.assert_called_once_with('fake_instance',
'fake_vif')
@mock.patch.object(xenapi_driver.vmops.VMOps, 'detach_interface')
def test_detach_interface(self, mock_detach_interface):
driver = self._get_driver()
driver.detach_interface('fake_context', 'fake_instance', 'fake_vif')
mock_detach_interface.assert_called_once_with('fake_instance',
'fake_vif')
@mock.patch.object(xenapi_driver.vmops.VMOps,
'post_live_migration_at_source')
def test_post_live_migration_at_source(self, mock_post_live_migration):
driver = self._get_driver()
driver.post_live_migration_at_source('fake_context', 'fake_instance',
'fake_network_info')
mock_post_live_migration.assert_called_once_with(
'fake_context', 'fake_instance', 'fake_network_info')
@mock.patch.object(xenapi_driver.vmops.VMOps,
'rollback_live_migration_at_destination')
def test_rollback_live_migration_at_destination(self, mock_rollback):
driver = self._get_driver()
driver.rollback_live_migration_at_destination(
'fake_context', 'fake_instance', 'fake_network_info',
'fake_block_device')
mock_rollback.assert_called_once_with('fake_instance',
'fake_network_info',
'fake_block_device')
@mock.patch.object(host.HostState, 'get_host_stats')
def test_update_provider_tree(self, mock_get_stats):
# Add a wrinkle such that cpu_allocation_ratio is configured to a
# non-default value and overrides initial_cpu_allocation_ratio.
self.flags(cpu_allocation_ratio=1.0)
# Add a wrinkle such that reserved_host_memory_mb is set to a
# non-default value.
self.flags(reserved_host_memory_mb=2048)
expected_reserved_disk = (
xenapi_driver.XenAPIDriver._get_reserved_host_disk_gb_from_config()
)
expected_inv = {
orc.VCPU: {
'total': 50,
'min_unit': 1,
'max_unit': 50,
'step_size': 1,
'allocation_ratio': CONF.cpu_allocation_ratio,
'reserved': CONF.reserved_host_cpus,
},
orc.MEMORY_MB: {
'total': 3,
'min_unit': 1,
'max_unit': 3,
'step_size': 1,
'allocation_ratio': CONF.initial_ram_allocation_ratio,
'reserved': CONF.reserved_host_memory_mb,
},
orc.DISK_GB: {
'total': 5,
'min_unit': 1,
'max_unit': 5,
'step_size': 1,
'allocation_ratio': CONF.initial_disk_allocation_ratio,
'reserved': expected_reserved_disk,
},
orc.VGPU: {
'total': 7,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
},
}
mock_get_stats.side_effect = self.host_stats
drv = self._get_driver()
pt = provider_tree.ProviderTree()
nodename = 'fake-node'
pt.new_root(nodename, uuids.rp_uuid)
drv.update_provider_tree(pt, nodename)
inv = pt.data(nodename).inventory
mock_get_stats.assert_called_once_with(refresh=True)
self.assertEqual(expected_inv, inv)
@mock.patch.object(host.HostState, 'get_host_stats')
def test_update_provider_tree_no_vgpu(self, mock_get_stats):
# Test when there are no vGPU resources in the inventory.
host_stats = self.host_stats()
host_stats.update(vgpu_stats={})
mock_get_stats.return_value = host_stats
drv = self._get_driver()
pt = provider_tree.ProviderTree()
nodename = 'fake-node'
pt.new_root(nodename, uuids.rp_uuid)
drv.update_provider_tree(pt, nodename)
inv = pt.data(nodename).inventory
# check if the inventory data does NOT contain VGPU.
self.assertNotIn(orc.VGPU, inv)
def test_get_vgpu_total_single_grp(self):
# Test when only one group included in the host_stats.
vgpu_stats = {
'grp_uuid_1': {
'total': 7
}
}
drv = self._get_driver()
vgpu_total = drv._get_vgpu_total(vgpu_stats)
self.assertEqual(7, vgpu_total)
def test_get_vgpu_total_multiple_grps(self):
# Test when multiple groups included in the host_stats.
vgpu_stats = {
'grp_uuid_1': {
'total': 7
},
'grp_uuid_2': {
'total': 4
}
}
drv = self._get_driver()
vgpu_total = drv._get_vgpu_total(vgpu_stats)
self.assertEqual(11, vgpu_total)
def test_get_vgpu_info_no_vgpu_alloc(self):
# no vgpu in allocation.
alloc = {
'rp1': {
'resources': {
'VCPU': 1,
'MEMORY_MB': 512,
'DISK_GB': 1,
}
}
}
drv = self._get_driver()
vgpu_info = drv._get_vgpu_info(alloc)
self.assertIsNone(vgpu_info)
@mock.patch.object(host.HostState, 'get_host_stats')
def test_get_vgpu_info_has_vgpu_alloc(self, mock_get_stats):
# Have vgpu in allocation.
alloc = {
'rp1': {
'resources': {
'VCPU': 1,
'MEMORY_MB': 512,
'DISK_GB': 1,
'VGPU': 1,
}
}
}
# The following fake data assumes there are two GPU
# groups both of which supply the same type of vGPUs.
# If the 1st GPU group has no remaining available vGPUs;
# the 2nd GPU group still has remaining available vGPUs.
# it should return the uuid from the 2nd GPU group.
vgpu_stats = {
uuids.gpu_group_1: {
'uuid': uuids.vgpu_type,
'type_name': 'GRID K180Q',
'max_heads': 4,
'total': 2,
'remaining': 0,
},
uuids.gpu_group_2: {
'uuid': uuids.vgpu_type,
'type_name': 'GRID K180Q',
'max_heads': 4,
'total': 2,
'remaining': 2,
},
}
host_stats = self.host_stats()
host_stats.update(vgpu_stats=vgpu_stats)
mock_get_stats.return_value = host_stats
drv = self._get_driver()
vgpu_info = drv._get_vgpu_info(alloc)
expected_info = {'gpu_grp_uuid': uuids.gpu_group_2,
'vgpu_type_uuid': uuids.vgpu_type}
self.assertEqual(expected_info, vgpu_info)
@mock.patch.object(host.HostState, 'get_host_stats')
def test_get_vgpu_info_has_vgpu_alloc_except(self, mock_get_stats):
# Allocated vGPU but got exception due to no remaining vGPU.
alloc = {
'rp1': {
'resources': {
'VCPU': 1,
'MEMORY_MB': 512,
'DISK_GB': 1,
'VGPU': 1,
}
}
}
vgpu_stats = {
uuids.gpu_group: {
'uuid': uuids.vgpu_type,
'type_name': 'Intel GVT-g',
'max_heads': 1,
'total': 7,
'remaining': 0,
},
}
host_stats = self.host_stats()
host_stats.update(vgpu_stats=vgpu_stats)
mock_get_stats.return_value = host_stats
drv = self._get_driver()
self.assertRaises(exception.ComputeResourcesUnavailable,
drv._get_vgpu_info,
alloc)

View File

@ -1,76 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import network_utils
class NetworkUtilsTestCase(stubs.XenAPITestBaseNoDB):
def test_find_network_with_name_label_works(self):
session = mock.Mock()
session.network.get_by_name_label.return_value = ["net"]
result = network_utils.find_network_with_name_label(session, "label")
self.assertEqual("net", result)
session.network.get_by_name_label.assert_called_once_with("label")
def test_find_network_with_name_returns_none(self):
session = mock.Mock()
session.network.get_by_name_label.return_value = []
result = network_utils.find_network_with_name_label(session, "label")
self.assertIsNone(result)
def test_find_network_with_name_label_raises(self):
session = mock.Mock()
session.network.get_by_name_label.return_value = ["net", "net2"]
self.assertRaises(exception.NovaException,
network_utils.find_network_with_name_label,
session, "label")
def test_find_network_with_bridge_works(self):
session = mock.Mock()
session.network.get_all_records_where.return_value = {"net": "asdf"}
result = network_utils.find_network_with_bridge(session, "bridge")
self.assertEqual(result, "net")
expr = 'field "name__label" = "bridge" or field "bridge" = "bridge"'
session.network.get_all_records_where.assert_called_once_with(expr)
def test_find_network_with_bridge_raises_too_many(self):
session = mock.Mock()
session.network.get_all_records_where.return_value = {
"net": "asdf",
"net2": "asdf2"
}
self.assertRaises(exception.NovaException,
network_utils.find_network_with_bridge,
session, "bridge")
def test_find_network_with_bridge_raises_no_networks(self):
session = mock.Mock()
session.network.get_all_records_where.return_value = {}
self.assertRaises(exception.NovaException,
network_utils.find_network_with_bridge,
session, "bridge")

View File

@ -1,206 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.xenapi import host
class VGPUTestCase(test.NoDBTestCase):
"""Unit tests for Driver operations."""
@mock.patch.object(host.HostState, 'update_status',
return_value='fake_stats_1')
@mock.patch.object(host.HostState, '_get_vgpu_stats_in_group')
def test_get_vgpu_stats_empty_cfg(self, mock_get, mock_update):
# no vGPU type configured.
self.flags(enabled_vgpu_types=[], group='devices')
session = mock.Mock()
host_obj = host.HostState(session)
stats = host_obj._get_vgpu_stats()
session.call_xenapi.assert_not_called()
self.assertEqual(stats, {})
@mock.patch.object(host.HostState, 'update_status',
return_value='fake_stats_1')
@mock.patch.object(host.HostState, '_get_vgpu_stats_in_group')
def test_get_vgpu_stats_single_type(self, mock_get, mock_update):
# configured single vGPU type
self.flags(enabled_vgpu_types=['type_name_1'], group='devices')
session = mock.Mock()
# multiple GPU groups
session.call_xenapi.side_effect = [
['grp_ref1', 'grp_ref2'], # GPU_group.get_all
'uuid_1', # GPU_group.get_uuid
'uuid_2', # GPU_group.get_uuid
]
# Let it return None for the 2nd GPU group for the case
# that it doesn't have the specified vGPU type enabled.
mock_get.side_effect = ['fake_stats_1', None]
host_obj = host.HostState(session)
stats = host_obj._get_vgpu_stats()
self.assertEqual(session.call_xenapi.call_count, 3)
self.assertEqual(mock_update.call_count, 1)
self.assertEqual(mock_get.call_count, 2)
self.assertEqual(stats, {'uuid_1': 'fake_stats_1'})
@mock.patch.object(host.HostState, 'update_status',
return_value='fake_stats_1')
@mock.patch.object(host.HostState, '_get_vgpu_stats_in_group')
def test_get_vgpu_stats_multi_types(self, mock_get, mock_update):
# when multiple vGPU types configured, it use the first one.
self.flags(enabled_vgpu_types=['type_name_1', 'type_name_2'],
group='devices')
session = mock.Mock()
session.call_xenapi.side_effect = [
['grp_ref1'], # GPU_group.get_all
'uuid_1', # GPU_group.get_uuid
]
mock_get.side_effect = ['fake_stats_1']
host_obj = host.HostState(session)
stats = host_obj._get_vgpu_stats()
self.assertEqual(session.call_xenapi.call_count, 2)
self.assertEqual(mock_update.call_count, 1)
self.assertEqual(stats, {'uuid_1': 'fake_stats_1'})
# called with the first vGPU type: 'type_name_1'
mock_get.assert_called_with('grp_ref1', ['type_name_1'])
@mock.patch.object(host.HostState, 'update_status',
return_value='fake_stats_1')
@mock.patch.object(host.HostState, '_get_total_vgpu_in_grp',
return_value=7)
def test_get_vgpu_stats_in_group(self, mock_get, mock_update):
# Test it will return vGPU stat for the enabled vGPU type.
enabled_vgpu_types = ['type_name_2']
session = mock.Mock()
session.call_xenapi.side_effect = [
['type_ref_1', 'type_ref_2'], # GPU_group.get_enabled_VGPU_types
'type_name_1', # VGPU_type.get_model_name
'type_name_2', # VGPU_type.get_model_name
'type_uuid_2', # VGPU_type.get_uuid
'4', # VGPU_type.get_max_heads
'6', # GPU_group.get_remaining_capacity
]
host_obj = host.HostState(session)
stats = host_obj._get_vgpu_stats_in_group('grp_ref',
enabled_vgpu_types)
expect_stats = {'uuid': 'type_uuid_2',
'type_name': 'type_name_2',
'max_heads': 4,
'total': 7,
'remaining': 6,
}
self.assertEqual(session.call_xenapi.call_count, 6)
# It should get_uuid for the vGPU type passed via *enabled_vgpu_types*
# (the arg for get_uuid should be 'type_ref_2').
get_uuid_call = [mock.call('VGPU_type.get_uuid', 'type_ref_2')]
session.call_xenapi.assert_has_calls(get_uuid_call)
mock_get.assert_called_once()
self.assertEqual(expect_stats, stats)
@mock.patch.object(host.HostState, 'update_status')
@mock.patch.object(host.HostState, '_get_total_vgpu_in_grp',
return_value=7)
def test_get_vgpu_stats_in_group_multiple(self, mock_get, mock_update):
# Test when enabled multiple vGPU types in the same group.
# It should only return the first vGPU type's stats.
enabled_vgpu_types = ['type_name_1', 'type_name_2']
session = mock.Mock()
session.call_xenapi.side_effect = [
['type_ref_1', 'type_ref_2'], # GPU_group.get_enabled_VGPU_types
'type_name_1', # VGPU_type.get_model_name
'type_name_2', # VGPU_type.get_model_name
'type_uuid_1', # VGPU_type.get_uuid
'4', # VGPU_type.get_max_heads
'6', # GPU_group.get_remaining_capacity
]
host_obj = host.HostState(session)
stats = host_obj._get_vgpu_stats_in_group('grp_ref',
enabled_vgpu_types)
expect_stats = {
'uuid': 'type_uuid_1',
'type_name': 'type_name_1',
'max_heads': 4,
'total': 7,
'remaining': 6,
}
self.assertEqual(session.call_xenapi.call_count, 6)
# It should call get_uuid for the first vGPU type (the arg for get_uuid
# should be 'type_ref_1').
get_uuid_call = [mock.call('VGPU_type.get_uuid', 'type_ref_1')]
session.call_xenapi.assert_has_calls(get_uuid_call)
mock_get.assert_called_once()
self.assertEqual(expect_stats, stats)
@mock.patch.object(host.HostState, 'update_status')
@mock.patch.object(host.HostState, '_get_total_vgpu_in_grp',
return_value=7)
def test_get_vgpu_stats_in_group_cfg_not_in_grp(self, mock_get,
mock_update):
# Test when the enable_vgpu_types is not a valid
# type belong to the GPU group. It will return None.
enabled_vgpu_types = ['bad_type_name']
session = mock.Mock()
session.call_xenapi.side_effect = [
['type_ref_1', 'type_ref_2'], # GPU_group.get_enabled_VGPU_types
'type_name_1', # VGPU_type.get_model_name
'type_name_2', # VGPU_type.get_model_name
]
host_obj = host.HostState(session)
stats = host_obj._get_vgpu_stats_in_group('grp_ref',
enabled_vgpu_types)
expect_stats = None
self.assertEqual(session.call_xenapi.call_count, 3)
mock_get.assert_not_called()
self.assertEqual(expect_stats, stats)
@mock.patch.object(host.HostState, 'update_status')
def test_get_total_vgpu_in_grp(self, mock_update):
session = mock.Mock()
# The fake PGPU records returned from call_xenapi's string function:
# "PGPU.get_all_records_where".
pgpu_records = {
'pgpu_ref1': {
'enabled_VGPU_types': ['type_ref1', 'type_ref2'],
'supported_VGPU_max_capacities': {
'type_ref1': '1',
'type_ref2': '3',
}
},
'pgpu_ref2': {
'enabled_VGPU_types': ['type_ref1', 'type_ref2'],
'supported_VGPU_max_capacities': {
'type_ref1': '1',
'type_ref2': '3',
}
}
}
session.call_xenapi.return_value = pgpu_records
host_obj = host.HostState(session)
total = host_obj._get_total_vgpu_in_grp('grp_ref', 'type_ref1')
session.call_xenapi.assert_called_with(
'PGPU.get_all_records_where', 'field "GPU_group" = "grp_ref"')
# The total amount of VGPUs is equal to sum of vaiable VGPU of
# 'type_ref1' in all PGPUs.
self.assertEqual(total, 2)

View File

@ -1,554 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.compute import power_state
from nova import exception
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vif
from nova.virt.xenapi import vm_utils
import os_xenapi
fake_vif = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': '123456789123',
'address': '00:00:00:00:00:00',
'network_id': 123,
'instance_uuid': 'fake-uuid',
'uuid': 'fake-uuid-2',
}
def fake_call_xenapi(method, *args):
if method == "VM.get_VIFs":
return ["fake_vif_ref", "fake_vif_ref_A2"]
if method == "VIF.get_record":
if args[0] == "fake_vif_ref":
return {'uuid': fake_vif['uuid'],
'MAC': fake_vif['address'],
'network': 'fake_network',
'other_config': {'neutron-port-id': fake_vif['id']}
}
else:
raise exception.Exception("Failed get vif record")
if method == "VIF.unplug":
return
if method == "VIF.destroy":
if args[0] == "fake_vif_ref":
return
else:
raise exception.Exception("unplug vif failed")
if method == "VIF.create":
if args[0] == "fake_vif_rec":
return "fake_vif_ref"
else:
raise exception.Exception("VIF existed")
return "Unexpected call_xenapi: %s.%s" % (method, args)
class XenVIFDriverTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenVIFDriverTestBase, self).setUp()
self._session = mock.Mock()
self._session.call_xenapi.side_effect = fake_call_xenapi
def mock_patch_object(self, target, attribute, return_val=None,
side_effect=None):
"""Utilility function to mock object's attribute at runtime:
Some methods are dynamic, so standard mocking does not work
and we need to mock them at runtime.
e.g. self._session.VIF.get_record which is dynamically
created via the override function of __getattr__.
"""
patcher = mock.patch.object(target, attribute,
return_value=return_val,
side_effect=side_effect)
mock_one = patcher.start()
self.addCleanup(patcher.stop)
return mock_one
class XenVIFDriverTestCase(XenVIFDriverTestBase):
def setUp(self):
super(XenVIFDriverTestCase, self).setUp()
self.base_driver = vif.XenVIFDriver(self._session)
def test_get_vif_ref(self):
vm_ref = "fake_vm_ref"
vif_ref = 'fake_vif_ref'
ret_vif_ref = self.base_driver._get_vif_ref(fake_vif, vm_ref)
self.assertEqual(vif_ref, ret_vif_ref)
expected = [mock.call('VM.get_VIFs', vm_ref),
mock.call('VIF.get_record', vif_ref)]
self.assertEqual(expected, self._session.call_xenapi.call_args_list)
def test_get_vif_ref_none_and_exception(self):
vm_ref = "fake_vm_ref"
vif = {'address': "no_match_vif_address"}
ret_vif_ref = self.base_driver._get_vif_ref(vif, vm_ref)
self.assertIsNone(ret_vif_ref)
expected = [mock.call('VM.get_VIFs', vm_ref),
mock.call('VIF.get_record', 'fake_vif_ref'),
mock.call('VIF.get_record', 'fake_vif_ref_A2')]
self.assertEqual(expected, self._session.call_xenapi.call_args_list)
def test_create_vif(self):
vif_rec = "fake_vif_rec"
vm_ref = "fake_vm_ref"
ret_vif_ref = self.base_driver._create_vif(fake_vif, vif_rec, vm_ref)
self.assertEqual("fake_vif_ref", ret_vif_ref)
expected = [mock.call('VIF.create', vif_rec)]
self.assertEqual(expected, self._session.call_xenapi.call_args_list)
def test_create_vif_exception(self):
self.assertRaises(exception.NovaException,
self.base_driver._create_vif,
"fake_vif", "missing_vif_rec", "fake_vm_ref")
@mock.patch.object(vif.XenVIFDriver, 'hot_unplug')
@mock.patch.object(vif.XenVIFDriver, '_get_vif_ref',
return_value='fake_vif_ref')
def test_unplug(self, mock_get_vif_ref, mock_hot_unplug):
instance = {'name': "fake_instance"}
vm_ref = "fake_vm_ref"
self.base_driver.unplug(instance, fake_vif, vm_ref)
expected = [mock.call('VIF.destroy', 'fake_vif_ref')]
self.assertEqual(expected, self._session.call_xenapi.call_args_list)
mock_hot_unplug.assert_called_once_with(
fake_vif, instance, 'fake_vm_ref', 'fake_vif_ref')
@mock.patch.object(vif.XenVIFDriver, '_get_vif_ref',
return_value='missing_vif_ref')
def test_unplug_exception(self, mock_get_vif_ref):
instance = "fake_instance"
vm_ref = "fake_vm_ref"
self.assertRaises(exception.NovaException,
self.base_driver.unplug,
instance, fake_vif, vm_ref)
class XenAPIOpenVswitchDriverTestCase(XenVIFDriverTestBase):
def setUp(self):
super(XenAPIOpenVswitchDriverTestCase, self).setUp()
self.ovs_driver = vif.XenAPIOpenVswitchDriver(self._session)
@mock.patch.object(vif.XenAPIOpenVswitchDriver, 'hot_plug')
@mock.patch.object(vif.XenVIFDriver, '_create_vif',
return_value='fake_vif_ref')
@mock.patch.object(vif.XenAPIOpenVswitchDriver,
'create_vif_interim_network')
@mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', return_value=None)
@mock.patch.object(vif.vm_utils, 'lookup', return_value='fake_vm_ref')
def test_plug(self, mock_lookup, mock_get_vif_ref,
mock_create_vif_interim_network,
mock_create_vif, mock_hot_plug):
instance = {'name': "fake_instance_name"}
ret_vif_ref = self.ovs_driver.plug(
instance, fake_vif, vm_ref=None, device=1)
self.assertTrue(mock_lookup.called)
self.assertTrue(mock_get_vif_ref.called)
self.assertTrue(mock_create_vif_interim_network.called)
self.assertTrue(mock_create_vif.called)
self.assertEqual('fake_vif_ref', ret_vif_ref)
mock_hot_plug.assert_called_once_with(fake_vif, instance,
'fake_vm_ref', 'fake_vif_ref')
@mock.patch.object(vif.vm_utils, 'lookup', return_value=None)
def test_plug_exception(self, mock_lookup):
instance = {'name': "fake_instance_name"}
self.assertRaises(exception.VirtualInterfacePlugException,
self.ovs_driver.plug, instance, fake_vif,
vm_ref=None, device=1)
mock_lookup.assert_called_once_with(self._session, instance['name'])
@mock.patch.object(vif.XenAPIOpenVswitchDriver,
'delete_network_and_bridge')
@mock.patch.object(network_utils, 'find_network_with_name_label',
return_value='fake_network')
@mock.patch.object(vif.XenVIFDriver, 'unplug')
def test_unplug(self, mock_super_unplug,
mock_find_network_with_name_label,
mock_delete_network_bridge):
instance = {'name': "fake_instance"}
vm_ref = "fake_vm_ref"
self.ovs_driver.unplug(instance, fake_vif, vm_ref)
self.assertTrue(mock_super_unplug.called)
self.assertTrue(mock_delete_network_bridge.called)
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port')
@mock.patch.object(network_utils, 'find_network_with_name_label')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif')
def test_delete_network_and_bridge(self, mock_get_network,
mock_find_network,
mock_ovs_del_port, mock_ovs_del_br,
mock_delete_linux_port,
mock_delete_linux_bridge):
# Delete network and bridge
mock_get_network.return_value = 'fake_network'
instance = {'name': 'fake_instance'}
self._session.network = mock.Mock()
self._session.network.get_VIFs.return_value = None
self.ovs_driver.delete_network_and_bridge(instance, 'fake_vif_id')
self._session.network.get_bridge.assert_called_once_with(
'fake_network')
self._session.network.destroy.assert_called_once_with('fake_network')
self.assertEqual(mock_ovs_del_port.call_count, 2)
self.assertEqual(mock_delete_linux_port.call_count, 2)
self.assertTrue(mock_delete_linux_bridge.called)
self.assertTrue(mock_ovs_del_br.called)
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port')
@mock.patch.object(network_utils, 'find_network_with_name_label')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif')
def test_delete_network_and_bridge_with_remote_vif_on(
self,
mock_get_network,
mock_find_network,
mock_ovs_del_port,
mock_ovs_del_br,
mock_delete_linux_port,
mock_delete_linux_bridge):
# If still has vifs attached to the network on remote hosts, delete
# network function would not be called, while the bridge would
# be deleted
mock_get_network.return_value = 'fake_network'
instance = {'name': 'fake_instance'}
fake_local_host_ref = 'fake_host_ref'
fake_vif_id = 'fake_vif_id'
expected_qbr_name = 'qbr' + fake_vif_id
self._session.host_ref = fake_local_host_ref
self.mock_patch_object(
self._session.network, 'get_VIFs',
return_val=['fake_vif'])
self.mock_patch_object(
self._session.VIF, 'get_all_records_where',
return_val={'rec': 'fake_rec'})
self.mock_patch_object(
self._session.VIF, 'get_VM',
return_val='fake_vm_ref')
self.mock_patch_object(
self._session.network, 'get_bridge',
return_val='fake_bridge')
# The host ref which the remain vif resident on doesn't match the local
# host
self.mock_patch_object(
self._session.VM, 'get_resident_on',
return_val='fake_host_ref_remote')
self.ovs_driver.delete_network_and_bridge(instance, fake_vif_id)
self._session.network.get_bridge.assert_called_once_with(
'fake_network')
self._session.network.destroy.assert_not_called()
self.assertEqual(2, mock_ovs_del_port.call_count)
self.assertEqual(2, mock_delete_linux_port.call_count)
mock_delete_linux_bridge.assert_called_once_with(expected_qbr_name)
mock_ovs_del_br.assert_called_once_with(self._session, 'fake_bridge')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port')
@mock.patch.object(network_utils, 'find_network_with_name_label')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif')
def test_delete_network_and_bridge_abort(
self,
mock_get_network,
mock_find_network,
mock_ovs_del_port,
mock_ovs_del_br,
mock_delete_linux_port,
mock_delete_linux_bridge):
# If still has vifs attached to the network on local hosts, all the
# operations would be abort
mock_get_network.return_value = 'fake_network'
instance = {'name': 'fake_instance'}
fake_local_host_ref = 'fake_host_ref'
self._session.host_ref = fake_local_host_ref
self.mock_patch_object(
self._session.network, 'get_VIFs',
return_val=['fake_vif'])
self.mock_patch_object(
self._session.VIF, 'get_all_records_where',
return_val={'rec': 'fake_rec'})
self.mock_patch_object(
self._session.VIF, 'get_VM',
return_val='fake_vm_ref')
# The host ref which the remain vif resident on match the local host
self.mock_patch_object(
self._session.VM, 'get_resident_on',
return_val=fake_local_host_ref)
self.ovs_driver.delete_network_and_bridge(instance, 'fake_vif_id')
self._session.network.get_bridge.assert_called_once_with(
'fake_network')
self._session.network.destroy.assert_not_called()
mock_ovs_del_port.assert_not_called()
mock_delete_linux_port.assert_not_called()
mock_delete_linux_bridge.assert_not_called()
mock_ovs_del_br.assert_not_called()
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port')
@mock.patch.object(network_utils, 'find_network_with_name_label')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif')
@mock.patch.object(vif.XenAPIOpenVswitchDriver,
'_get_patch_port_pair_names')
def test_delete_network_and_bridge_del_port_exc(self, mock_get_port_name,
mock_get_network,
mock_find_network,
mock_ovs_del_port,
mock_ovs_del_br,
mock_delete_linux_port,
mock_delete_linux_bridge):
# Get an exception when deleting the patch port pair
mock_get_network.return_value = 'fake_network'
instance = {'name': 'fake_instance'}
self._session.network = mock.Mock()
self._session.network.get_VIFs.return_value = None
self._session.network.get_bridge.return_value = 'fake_bridge'
mock_get_port_name.return_value = ['fake_port', 'fake_tap']
mock_ovs_del_port.side_effect = test.TestingException
self.assertRaises(exception.VirtualInterfaceUnplugException,
self.ovs_driver.delete_network_and_bridge, instance,
'fake_vif_id')
self._session.network.get_bridge.assert_called_once_with(
'fake_network')
self._session.network.destroy.assert_called_once_with('fake_network')
mock_ovs_del_port.assert_called_once_with(self._session,
'fake_bridge',
'fake_port')
mock_delete_linux_port.assert_not_called()
mock_delete_linux_bridge.assert_not_called()
mock_ovs_del_br.assert_not_called()
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_bridge')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_delete_linux_port')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_br')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port')
@mock.patch.object(network_utils, 'find_network_with_name_label')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_get_network_by_vif')
@mock.patch.object(vif.XenAPIOpenVswitchDriver,
'_get_patch_port_pair_names')
def test_delete_network_and_bridge_del_br_exc(self, mock_get_port_name,
mock_get_network,
mock_find_network,
mock_ovs_del_port,
mock_ovs_del_br,
mock_delete_linux_port,
mock_delete_linux_bridge):
# Get an exception when deleting the bridge and the patch ports
# existing on this bridge
mock_get_network.return_value = 'fake_network'
instance = {'name': 'fake_instance'}
self._session.network = mock.Mock()
self._session.network.get_VIFs.return_value = None
self._session.network.get_bridge.return_value = 'fake_bridge'
mock_get_port_name.return_value = ['fake_port', 'fake_tap']
mock_ovs_del_br.side_effect = test.TestingException
self.assertRaises(exception.VirtualInterfaceUnplugException,
self.ovs_driver.delete_network_and_bridge, instance,
'fake_vif_id')
self._session.network.get_bridge.assert_called_once_with(
'fake_network')
self._session.network.destroy.assert_called_once_with('fake_network')
mock_ovs_del_port.assert_called_once_with(self._session,
'fake_bridge',
'fake_port')
mock_delete_linux_port.assert_not_called()
mock_delete_linux_bridge.assert_not_called()
mock_ovs_del_br.assert_called_once_with(self._session, 'fake_bridge')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_del_port')
@mock.patch.object(network_utils, 'find_network_with_name_label',
return_value='fake_network')
def test_delete_network_and_bridge_destroy_network_exception(
self,
mock_find_network,
mock_ovs_del_port):
# Get an exception when destroying the network
instance = {'name': "fake_instance"}
self.mock_patch_object(
self._session.network, 'get_VIFs', return_val=None)
self.mock_patch_object(
self._session.network, 'get_bridge', return_val='fake_bridge')
self.mock_patch_object(
self._session.network, 'destroy',
side_effect=test.TestingException)
self.assertRaises(exception.VirtualInterfaceUnplugException,
self.ovs_driver.delete_network_and_bridge, instance,
'fake_vif_id')
self.assertTrue(mock_find_network.called)
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_device_exists')
@mock.patch.object(os_xenapi.client.host_network, 'brctl_add_if')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_create_linux_bridge')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_add_port')
def test_post_start_actions(self, mock_ovs_add_port,
mock_create_linux_bridge,
mock_brctl_add_if, mock_device_exists):
vif_ref = "fake_vif_ref"
instance = {'name': 'fake_instance_name'}
fake_vif_rec = {'uuid': fake_vif['uuid'],
'MAC': fake_vif['address'],
'network': 'fake_network',
'other_config': {
'neutron-port-id': 'fake-neutron-port-id'}
}
mock_VIF_get_record = self.mock_patch_object(
self._session.VIF, 'get_record', return_val=fake_vif_rec)
mock_network_get_bridge = self.mock_patch_object(
self._session.network, 'get_bridge',
return_val='fake_bridge_name')
mock_network_get_uuid = self.mock_patch_object(
self._session.network, 'get_uuid',
return_val='fake_network_uuid')
mock_device_exists.return_value = False
self.ovs_driver.post_start_actions(instance, vif_ref)
self.assertTrue(mock_VIF_get_record.called)
self.assertTrue(mock_network_get_bridge.called)
self.assertTrue(mock_network_get_uuid.called)
self.assertEqual(mock_ovs_add_port.call_count, 1)
self.assertTrue(mock_brctl_add_if.called)
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_device_exists')
@mock.patch.object(os_xenapi.client.host_network, 'brctl_add_if')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, '_create_linux_bridge')
@mock.patch.object(os_xenapi.client.host_network, 'ovs_add_port')
def test_post_start_actions_tap_exist(self, mock_ovs_add_port,
mock_create_linux_bridge,
mock_brctl_add_if, mock_device_exists):
vif_ref = "fake_vif_ref"
instance = {'name': 'fake_instance_name'}
fake_vif_rec = {'uuid': fake_vif['uuid'],
'MAC': fake_vif['address'],
'network': 'fake_network',
'other_config': {
'neutron-port-id': 'fake-neutron-port-id'}
}
mock_VIF_get_record = self.mock_patch_object(
self._session.VIF, 'get_record', return_val=fake_vif_rec)
mock_network_get_bridge = self.mock_patch_object(
self._session.network, 'get_bridge',
return_val='fake_bridge_name')
mock_network_get_uuid = self.mock_patch_object(
self._session.network, 'get_uuid',
return_val='fake_network_uuid')
mock_device_exists.return_value = True
self.ovs_driver.post_start_actions(instance, vif_ref)
self.assertTrue(mock_VIF_get_record.called)
self.assertTrue(mock_network_get_bridge.called)
self.assertTrue(mock_network_get_uuid.called)
self.assertTrue(mock_create_linux_bridge.called)
self.assertFalse(mock_brctl_add_if.called)
self.assertFalse(mock_ovs_add_port.called)
@mock.patch.object(network_utils, 'find_network_with_name_label',
return_value="exist_network_ref")
def test_create_vif_interim_network_exist(self,
mock_find_network_with_name_label):
mock_network_create = self.mock_patch_object(
self._session.network, 'create', return_val='new_network_ref')
network_ref = self.ovs_driver.create_vif_interim_network(fake_vif)
self.assertFalse(mock_network_create.called)
self.assertEqual(network_ref, 'exist_network_ref')
@mock.patch.object(network_utils, 'find_network_with_name_label',
return_value=None)
def test_create_vif_interim_network_new(self,
mock_find_network_with_name_label):
mock_network_create = self.mock_patch_object(
self._session.network, 'create', return_val='new_network_ref')
network_ref = self.ovs_driver.create_vif_interim_network(fake_vif)
self.assertTrue(mock_network_create.called)
self.assertEqual(network_ref, 'new_network_ref')
@mock.patch.object(vif.XenAPIOpenVswitchDriver, 'post_start_actions')
@mock.patch.object(vm_utils, 'get_power_state')
def test_hot_plug_power_on(self, mock_get_power_state,
mock_post_start_actions):
vif_ref = "fake_vif_ref"
vif = "fake_vif"
instance = "fake_instance"
vm_ref = "fake_vm_ref"
mock_get_power_state.return_value = power_state.RUNNING
mock_VIF_plug = self.mock_patch_object(
self._session.VIF, 'plug', return_val=None)
self.ovs_driver.hot_plug(vif, instance, vm_ref, vif_ref)
mock_VIF_plug.assert_called_once_with(vif_ref)
mock_post_start_actions.assert_called_once_with(instance, vif_ref)
mock_get_power_state.assert_called_once_with(self._session, vm_ref)
@mock.patch.object(vm_utils, 'get_power_state')
def test_hot_plug_power_off(self, mock_get_power_state):
vif_ref = "fake_vif_ref"
vif = "fake_vif"
instance = "fake_instance"
vm_ref = "fake_vm_ref"
mock_get_power_state.return_value = power_state.SHUTDOWN
mock_VIF_plug = self.mock_patch_object(
self._session.VIF, 'plug', return_val=None)
self.ovs_driver.hot_plug(vif, instance, vm_ref, vif_ref)
mock_VIF_plug.assert_not_called()
mock_get_power_state.assert_called_once_with(self._session, vm_ref)
@mock.patch.object(vm_utils, 'get_power_state')
def test_hot_unplug_power_on(self, mock_get_power_state):
vm_ref = 'fake_vm_ref'
vif_ref = 'fake_vif_ref'
instance = 'fake_instance'
mock_get_power_state.return_value = power_state.RUNNING
mock_VIF_unplug = self.mock_patch_object(
self._session.VIF, 'unplug', return_val=None)
self.ovs_driver.hot_unplug(fake_vif, instance, vm_ref, vif_ref)
mock_VIF_unplug.assert_called_once_with(vif_ref)
mock_get_power_state.assert_called_once_with(self._session, vm_ref)
@mock.patch.object(vm_utils, 'get_power_state')
def test_hot_unplug_power_off(self, mock_get_power_state):
vm_ref = 'fake_vm_ref'
vif_ref = 'fake_vif_ref'
instance = 'fake_instance'
mock_get_power_state.return_value = power_state.SHUTDOWN
mock_VIF_unplug = self.mock_patch_object(
self._session.VIF, 'unplug', return_val=None)
self.ovs_driver.hot_unplug(fake_vif, instance, vm_ref, vif_ref)
mock_VIF_unplug.assert_not_called()
mock_get_power_state.assert_called_once_with(self._session, vm_ref)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,416 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import greenthread
import mock
import six
from nova import exception
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import volume_utils
class SROps(stubs.XenAPITestBaseNoDB):
def test_find_sr_valid_uuid(self):
self.session = mock.Mock()
self.session.call_xenapi.return_value = 'sr_ref'
self.assertEqual(volume_utils.find_sr_by_uuid(self.session,
'sr_uuid'),
'sr_ref')
def test_find_sr_invalid_uuid(self):
class UUIDException(Exception):
details = ["UUID_INVALID", "", "", ""]
self.session = mock.Mock()
self.session.XenAPI.Failure = UUIDException
self.session.call_xenapi.side_effect = UUIDException
self.assertIsNone(
volume_utils.find_sr_by_uuid(self.session, 'sr_uuid'))
def test_find_sr_from_vdi(self):
vdi_ref = 'fake-ref'
def fake_call_xenapi(method, *args):
self.assertEqual(method, 'VDI.get_SR')
self.assertEqual(args[0], vdi_ref)
return args[0]
session = mock.Mock()
session.call_xenapi.side_effect = fake_call_xenapi
self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref),
vdi_ref)
def test_find_sr_from_vdi_exception(self):
vdi_ref = 'fake-ref'
class FakeException(Exception):
pass
session = mock.Mock()
session.XenAPI.Failure = FakeException
session.call_xenapi.side_effect = FakeException
self.assertRaises(exception.StorageError,
volume_utils.find_sr_from_vdi, session, vdi_ref)
class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB):
def test_target_host(self):
self.assertEqual(volume_utils._get_target_host('host:port'),
'host')
self.assertEqual(volume_utils._get_target_host('host'),
'host')
# There is no default value
self.assertIsNone(volume_utils._get_target_host(':port'))
self.assertIsNone(volume_utils._get_target_host(None))
def test_target_port(self):
self.assertEqual(volume_utils._get_target_port('host:port'), 'port')
self.assertEqual(volume_utils._get_target_port('host'), 3260)
class IntroduceTestCase(stubs.XenAPITestBaseNoDB):
@mock.patch.object(volume_utils, '_get_vdi_ref')
@mock.patch.object(greenthread, 'sleep')
def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref):
def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
fake_get_vdi_ref.call_count += 1
if fake_get_vdi_ref.call_count == 2:
return 'vdi_ref'
def fake_call_xenapi(method, *args):
if method == 'SR.scan':
return
elif method == 'VDI.get_record':
return {'managed': 'true'}
session = mock.Mock()
session.call_xenapi.side_effect = fake_call_xenapi
mock_get_vdi_ref.side_effect = fake_get_vdi_ref
fake_get_vdi_ref.call_count = 0
self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'),
'vdi_ref')
mock_sleep.assert_called_once_with(20)
@mock.patch.object(volume_utils, '_get_vdi_ref')
@mock.patch.object(greenthread, 'sleep')
def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref):
def fake_call_xenapi(method, *args):
if method == 'SR.scan':
return
elif method == 'VDI.get_record':
return {'managed': 'true'}
session = mock.Mock()
session.call_xenapi.side_effect = fake_call_xenapi
mock_get_vdi_ref.return_value = None
self.assertRaises(exception.StorageError,
volume_utils.introduce_vdi, session, 'sr_ref')
mock_sleep.assert_called_once_with(20)
class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB):
def test_mountpoint_to_number(self):
cases = {
'sda': 0,
'sdp': 15,
'hda': 0,
'hdp': 15,
'vda': 0,
'xvda': 0,
'0': 0,
'10': 10,
'vdq': -1,
'sdq': -1,
'hdq': -1,
'xvdq': -1,
}
for (input, expected) in cases.items():
actual = volume_utils._mountpoint_to_number(input)
self.assertEqual(actual, expected,
'%s yielded %s, not %s' % (input, actual, expected))
@classmethod
def _make_connection_info(cls):
target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
return {'driver_volume_type': 'iscsi',
'data': {'volume_id': 1,
'target_iqn': target_iqn,
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'verybadpass'}}
def test_parse_volume_info_parsing_auth_details(self):
conn_info = self._make_connection_info()
result = volume_utils._parse_volume_info(conn_info['data'])
self.assertEqual('username', result['chapuser'])
self.assertEqual('verybadpass', result['chappassword'])
def test_parse_volume_info_missing_details(self):
# Tests that a StorageError is raised if volume_id, target_host, or
# target_ign is missing from connection_data. Also ensures that the
# auth_password value is not present in the StorageError message.
for data_key_to_null in ('volume_id', 'target_portal', 'target_iqn'):
conn_info = self._make_connection_info()
conn_info['data'][data_key_to_null] = None
ex = self.assertRaises(exception.StorageError,
volume_utils._parse_volume_info,
conn_info['data'])
self.assertNotIn('verybadpass', six.text_type(ex))
def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
self.assertRaises(
exception.StorageError,
volume_utils.get_device_number,
'dev/sd')
class FindVBDTestCase(stubs.XenAPITestBaseNoDB):
def test_find_vbd_by_number_works(self):
session = mock.Mock()
session.VM.get_VBDs.return_value = ["a", "b"]
session.VBD.get_userdevice.return_value = "1"
result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
self.assertEqual("a", result)
session.VM.get_VBDs.assert_called_once_with("vm_ref")
session.VBD.get_userdevice.assert_called_once_with("a")
def test_find_vbd_by_number_no_matches(self):
session = mock.Mock()
session.VM.get_VBDs.return_value = ["a", "b"]
session.VBD.get_userdevice.return_value = "3"
result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
self.assertIsNone(result)
session.VM.get_VBDs.assert_called_once_with("vm_ref")
expected = [mock.call("a"), mock.call("b")]
self.assertEqual(expected,
session.VBD.get_userdevice.call_args_list)
def test_find_vbd_by_number_no_vbds(self):
session = mock.Mock()
session.VM.get_VBDs.return_value = []
result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
self.assertIsNone(result)
session.VM.get_VBDs.assert_called_once_with("vm_ref")
self.assertFalse(session.VBD.get_userdevice.called)
def test_find_vbd_by_number_ignores_exception(self):
session = mock.Mock()
session.XenAPI.Failure = test.TestingException
session.VM.get_VBDs.return_value = ["a"]
session.VBD.get_userdevice.side_effect = test.TestingException
result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
self.assertIsNone(result)
session.VM.get_VBDs.assert_called_once_with("vm_ref")
session.VBD.get_userdevice.assert_called_once_with("a")
class IntroduceSRTestCase(stubs.XenAPITestBaseNoDB):
@mock.patch.object(volume_utils, '_create_pbd')
def test_backend_kind(self, create_pbd):
session = mock.Mock()
session.product_version = (6, 5, 0)
session.call_xenapi.return_value = 'sr_ref'
params = {'sr_type': 'iscsi'}
sr_uuid = 'sr_uuid'
label = 'label'
expected_params = {'backend-kind': 'vbd'}
volume_utils.introduce_sr(session, sr_uuid, label, params)
session.call_xenapi.assert_any_call('SR.introduce', sr_uuid,
label, '', 'iscsi',
'', False, expected_params)
@mock.patch.object(volume_utils, '_create_pbd')
def test_backend_kind_upstream_fix(self, create_pbd):
session = mock.Mock()
session.product_version = (7, 0, 0)
session.call_xenapi.return_value = 'sr_ref'
params = {'sr_type': 'iscsi'}
sr_uuid = 'sr_uuid'
label = 'label'
expected_params = {}
volume_utils.introduce_sr(session, sr_uuid, label, params)
session.call_xenapi.assert_any_call('SR.introduce', sr_uuid,
label, '', 'iscsi',
'', False, expected_params)
class BootedFromVolumeTestCase(stubs.XenAPITestBaseNoDB):
def test_booted_from_volume(self):
session = mock.Mock()
session.VM.get_VBDs.return_value = ['vbd_ref']
session.VBD.get_userdevice.return_value = '0'
session.VBD.get_other_config.return_value = {'osvol': True}
booted_from_volume = volume_utils.is_booted_from_volume(session,
'vm_ref')
self.assertTrue(booted_from_volume)
def test_not_booted_from_volume(self):
session = mock.Mock()
session.VM.get_VBDs.return_value = ['vbd_ref']
session.VBD.get_userdevice.return_value = '0'
session.VBD.get_other_config.return_value = {}
booted_from_volume = volume_utils.is_booted_from_volume(session,
'vm_ref')
self.assertFalse(booted_from_volume)
class MultipleVolumesTestCase(stubs.XenAPITestBaseNoDB):
def test_sr_info_two_luns(self):
data1 = {'target_portal': 'host:port',
'target_iqn': 'iqn',
'volume_id': 'vol_id_1',
'target_lun': 1}
data2 = {'target_portal': 'host:port',
'target_iqn': 'iqn',
'volume_id': 'vol_id_2',
'target_lun': 2}
(sr_uuid1, label1, params1) = volume_utils.parse_sr_info(data1)
(sr_uuid2, label2, params2) = volume_utils.parse_sr_info(data2)
self.assertEqual(sr_uuid1, sr_uuid2)
self.assertEqual(label1, label2)
@mock.patch.object(volume_utils, 'forget_sr')
def test_purge_sr_no_VBDs(self, mock_forget):
def _call_xenapi(func, *args):
if func == 'SR.get_VDIs':
return ['VDI1', 'VDI2']
if func == 'VDI.get_VBDs':
return []
self.session = mock.Mock()
self.session.call_xenapi = _call_xenapi
volume_utils.purge_sr(self.session, 'SR')
mock_forget.assert_called_once_with(self.session, 'SR')
@mock.patch.object(volume_utils, 'forget_sr')
def test_purge_sr_in_use(self, mock_forget):
def _call_xenapi(func, *args):
if func == 'SR.get_VDIs':
return ['VDI1', 'VDI2']
if func == 'VDI.get_VBDs':
if args[0] == 'VDI1':
return ['VBD1']
if args[0] == 'VDI2':
return ['VBD2']
self.session = mock.Mock()
self.session.call_xenapi = _call_xenapi
volume_utils.purge_sr(self.session, 'SR')
self.assertEqual([], mock_forget.mock_calls)
class TestStreamToVDI(stubs.XenAPITestBaseNoDB):
@mock.patch.object(volume_utils, '_stream_to_vdi')
@mock.patch.object(volume_utils, '_get_vdi_import_path',
return_value='vdi_import_path')
def test_creates_task_conn(self, mock_import_path, mock_stream):
session = self.get_fake_session()
session.custom_task = mock.MagicMock()
session.custom_task.return_value.__enter__.return_value = 'task'
session.http_connection = mock.MagicMock()
session.http_connection.return_value.__enter__.return_value = 'conn'
instance = {'name': 'instance-name'}
volume_utils.stream_to_vdi(session, instance, 'vhd', 'file_obj', 100,
'vdi_ref')
session.custom_task.assert_called_with('VDI_IMPORT_for_instance-name')
mock_stream.assert_called_with('conn', 'vdi_import_path', 100,
'file_obj')
self.assertTrue(session.http_connection.return_value.__exit__.called)
self.assertTrue(session.custom_task.return_value.__exit__.called)
def test_stream_to_vdi_tiny(self):
mock_file = mock.Mock()
mock_file.read.side_effect = ['a']
mock_conn = mock.Mock()
resp = mock.Mock()
resp.status = '200'
resp.reason = 'OK'
mock_conn.getresponse.return_value = resp
volume_utils._stream_to_vdi(mock_conn, '/path', 1, mock_file)
args, kwargs = mock_conn.request.call_args
self.assertEqual(kwargs['headers']['Content-Length'], '1')
mock_file.read.assert_called_once_with(1)
mock_conn.send.assert_called_once_with('a')
def test_stream_to_vdi_chunk_multiple(self):
mock_file = mock.Mock()
mock_file.read.side_effect = ['aaaaa', 'bbbbb']
mock_conn = mock.Mock()
resp = mock.Mock()
resp.status = '200'
resp.reason = 'OK'
mock_conn.getresponse.return_value = resp
tot_size = 2 * 16 * 1024
volume_utils._stream_to_vdi(mock_conn, '/path', tot_size, mock_file)
args, kwargs = mock_conn.request.call_args
self.assertEqual(kwargs['headers']['Content-Length'], str(tot_size))
mock_file.read.assert_has_calls([mock.call(16 * 1024),
mock.call(16 * 1024)])
mock_conn.send.assert_has_calls([mock.call('aaaaa'),
mock.call('bbbbb')])
def test_stream_to_vdi_chunk_remaining(self):
mock_file = mock.Mock()
mock_file.read.side_effect = ['aaaaa', 'bb']
mock_conn = mock.Mock()
resp = mock.Mock()
resp.status = '200'
resp.reason = 'OK'
mock_conn.getresponse.return_value = resp
tot_size = 16 * 1024 + 1024
volume_utils._stream_to_vdi(mock_conn, '/path', tot_size, mock_file)
args, kwargs = mock_conn.request.call_args
self.assertEqual(kwargs['headers']['Content-Length'], str(tot_size))
mock_file.read.assert_has_calls([mock.call(16 * 1024),
mock.call(1024)])
mock_conn.send.assert_has_calls([mock.call('aaaaa'), mock.call('bb')])

View File

@ -1,547 +0,0 @@
# Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VolumeOpsTestBase, self).setUp()
self._setup_mock_volumeops()
def _setup_mock_volumeops(self):
self.session = stubs.FakeSessionForVolumeTests('fake_uri')
self.ops = volumeops.VolumeOps(self.session)
class VolumeDetachTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.vm_utils, 'lookup', return_value='vmref')
@mock.patch.object(volumeops.volume_utils, 'find_vbd_by_number',
return_value='vbdref')
@mock.patch.object(volumeops.vm_utils, 'is_vm_shutdown',
return_value=False)
@mock.patch.object(volumeops.vm_utils, 'unplug_vbd')
@mock.patch.object(volumeops.vm_utils, 'destroy_vbd')
@mock.patch.object(volumeops.volume_utils, 'get_device_number',
return_value='devnumber')
@mock.patch.object(volumeops.volume_utils, 'find_sr_from_vbd',
return_value='srref')
@mock.patch.object(volumeops.volume_utils, 'purge_sr')
def test_detach_volume_call(self, mock_purge, mock_find_sr,
mock_get_device_num, mock_destroy_vbd,
mock_unplug_vbd, mock_is_vm, mock_find_vbd,
mock_lookup):
ops = volumeops.VolumeOps('session')
ops.detach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
mock_lookup.assert_called_once_with('session', 'instance_1')
mock_get_device_num.assert_called_once_with('mountpoint')
mock_find_vbd.assert_called_once_with('session', 'vmref', 'devnumber')
mock_is_vm.assert_called_once_with('session', 'vmref')
mock_unplug_vbd.assert_called_once_with('session', 'vbdref', 'vmref')
mock_destroy_vbd.assert_called_once_with('session', 'vbdref')
mock_find_sr.assert_called_once_with('session', 'vbdref')
mock_purge.assert_called_once_with('session', 'srref')
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = "vbd_ref"
self.ops.detach_volume({}, "name", "/dev/xvdd")
mock_vm.assert_called_once_with(self.session, "name")
mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = None
self.ops.detach_volume({}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_raises(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops.detach_volume, {}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = False
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
mock_shutdown.assert_called_once_with(self.session, "vm_ref")
mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
mock_destroy.assert_called_once_with(self.session, "vbd_ref")
mock_purge.assert_called_once_with(self.session, "sr_ref")
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = True
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
expected = [mock.call(self.session, "vbd_ref_1"),
mock.call(self.session, "vbd_ref_2")]
self.assertEqual(expected, mock_destroy.call_args_list)
mock_purge.assert_called_with(self.session, "sr_ref")
self.assertFalse(mock_unplug.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = []
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = ["1"]
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_detach.assert_called_once_with("vm_ref", ["1"])
def test_get_all_volume_vbd_refs_no_vbds(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = []
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
self.assertFalse(mock_conf.called)
def test_get_all_volume_vbd_refs_no_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1"]
mock_conf.return_value = {}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
mock_conf.assert_called_once_with("1")
def test_get_all_volume_vbd_refs_with_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1", "2"]
mock_conf.return_value = {"osvol": True}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual(["1", "2"], list(result))
mock_get.assert_called_once_with("vm_ref")
class AttachVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda")
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name",
'/dev/xvda', True)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name",
'/dev/xvda', False)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
self.ops.connect_volume({})
mock_attach.assert_called_once_with({})
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
self.assertFalse(mock_attach.called)
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.return_value = "vdi_ref"
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info, "vm_ref",
"name", 2, True)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, "name")
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
True)
@mock.patch.object(volume_utils, "forget_sr")
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver, mock_forget):
connection_info = {"data": {}}
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops._attach_volume, connection_info)
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_forget.assert_called_once_with(self.session, "sr_ref")
self.assertFalse(mock_attach.called)
def test_check_is_supported_driver_type_pass_iscsi(self):
conn_info = {"driver_volume_type": "iscsi"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_xensm(self):
conn_info = {"driver_volume_type": "xensm"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_bad(self):
conn_info = {"driver_volume_type": "bad"}
self.assertRaises(exception.VolumeDriverNotFound,
self.ops._check_is_supported_driver_type, conn_info)
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = None
mock_introduce_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
mock_introduce_sr.assert_called_once_with(self.session, "uuid",
"label", "params")
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
self.assertFalse(mock_introduce_sr.called)
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_regular(self, mock_intro):
mock_intro.return_value = "vdi"
result = self.ops._connect_hypervisor_to_volume("sr", {})
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"vdi_uuid": "id"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
vdi_uuid="id")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_lun(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"target_lun": "lun"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
target_lun="lun")
@mock.patch.object(volume_utils, "introduce_vdi")
@mock.patch.object(volumeops.LOG, 'debug')
def test_connect_hypervisor_to_volume_mask_password(self, mock_debug,
mock_intro):
# Tests that the connection_data is scrubbed before logging.
data = {'auth_password': 'verybadpass'}
self.ops._connect_hypervisor_to_volume("sr", data)
self.assertTrue(mock_debug.called, 'LOG.debug was not called')
password_logged = False
for call in mock_debug.call_args_list:
# The call object is a tuple of (args, kwargs)
if 'verybadpass' in call[0]:
password_logged = True
break
self.assertFalse(password_logged, 'connection_data was not scrubbed')
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = False
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", '/dev/2', True)
mock_plug.assert_called_once_with("vbd", "vm")
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = True
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", '/dev/2', True)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", '/dev/2', False)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
self.assertFalse(mock_shutdown.called)
class FindBadVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_vbds(self, mock_get_all):
mock_get_all.return_value = []
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["1", "2"]
mock_find_sr.return_value = "sr_ref"
with mock.patch.object(self.session.SR, "scan") as mock_scan:
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
expected_find = [mock.call(self.session, "1"),
mock.call(self.session, "2")]
self.assertEqual(expected_find, mock_find_sr.call_args_list)
expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
self.assertEqual(expected_scan, mock_scan.call_args_list)
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_scan.assert_called_once_with("sr_ref")
mock_get.assert_called_once_with("vbd_ref")
self.assertEqual(["/dev/xvdb"], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['foo', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
self.assertRaises(FakeException,
self.ops.find_bad_volumes, "vm_ref")
mock_scan.assert_called_once_with("sr_ref")
class CleanupFromVDIsTestCase(VolumeOpsTestBase):
def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs):
find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
in vdi_refs]
find_sr_from_vdi.assert_has_calls(find_sr_calls)
purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
in sr_refs]
purge_sr.assert_has_calls(purge_sr_calls)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi',
side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref2']
find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
sr_refs[0]]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
purge_sr.side_effect = [test.TestingException, None]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +0,0 @@
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI
==================================================================
"""
from nova.virt.xenapi import driver
XenAPIDriver = driver.XenAPIDriver

View File

@ -1,442 +0,0 @@
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
from distutils import version
import os
import sys
import time
from os_xenapi.client import host_agent
from os_xenapi.client import XenAPI
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from nova.api.metadata import password
from nova.compute import utils as compute_utils
import nova.conf
from nova import context
from nova import crypto
from nova import exception
from nova.i18n import _
from nova import objects
from nova import utils
USE_AGENT_KEY = "xenapi_use_agent"
USE_AGENT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + USE_AGENT_KEY
SKIP_SSH_KEY = "xenapi_skip_agent_inject_ssh"
SKIP_SSH_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + SKIP_SSH_KEY
SKIP_FILES_AT_BOOT_KEY = "xenapi_skip_agent_inject_files_at_boot"
SKIP_FILES_AT_BOOT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX \
+ SKIP_FILES_AT_BOOT_KEY
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def _call_agent(session, instance, vm_ref, method, addl_args=None,
timeout=None, success_codes=None):
"""Abstracts out the interaction with the agent xenapi plugin."""
if addl_args is None:
addl_args = {}
if timeout is None:
timeout = CONF.xenserver.agent_timeout
if success_codes is None:
success_codes = ['0']
# always fetch domid because VM may have rebooted
dom_id = session.VM.get_domid(vm_ref)
uuid = uuidutils.generate_uuid()
args = {
'id': uuid,
'dom_id': str(dom_id),
'timeout': str(timeout),
}
try:
ret = method(session, uuid, dom_id, timeout, **addl_args)
except XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r',
{'method': method, 'args': args}, instance=instance)
raise exception.AgentTimeout(method=method.__name__)
elif 'REBOOT:' in err_msg:
LOG.debug('REBOOT: The call to %(method)s detected a reboot. '
'args=%(args)r',
{'method': method, 'args': args}, instance=instance)
_wait_for_new_dom_id(session, vm_ref, dom_id, method)
return _call_agent(session, instance, vm_ref, method,
addl_args, timeout, success_codes)
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error('NOT IMPLEMENTED: The call to %(method)s is not '
'supported by the agent. args=%(args)r',
{'method': method, 'args': args}, instance=instance)
raise exception.AgentNotImplemented(method=method.__name__)
else:
LOG.error('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r',
{'method': method, 'args': args, 'e': e},
instance=instance)
raise exception.AgentError(method=method.__name__)
if not isinstance(ret, dict):
try:
ret = jsonutils.loads(ret)
except TypeError:
LOG.error('The agent call to %(method)s returned an invalid '
'response: %(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args},
instance=instance)
raise exception.AgentError(method=method.__name__)
if ret['returncode'] not in success_codes:
LOG.error('The agent call to %(method)s returned '
'an error: %(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args},
instance=instance)
raise exception.AgentError(method=method.__name__)
LOG.debug('The agent call to %(method)s was successful: '
'%(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args},
instance=instance)
# Some old versions of the Windows agent have a trailing \\r\\n
# (ie CRLF escaped) for some reason. Strip that off.
return ret['message'].replace('\\r\\n', '')
def _wait_for_new_dom_id(session, vm_ref, old_dom_id, method):
expiration = time.time() + CONF.xenserver.agent_timeout
while True:
dom_id = session.VM.get_domid(vm_ref)
if dom_id and dom_id != "-1" and dom_id != old_dom_id:
LOG.debug("Found new dom_id %s", dom_id)
return
if time.time() > expiration:
LOG.debug("Timed out waiting for new dom_id %s", dom_id)
raise exception.AgentTimeout(method=method.__name__)
time.sleep(1)
def is_upgrade_required(current_version, available_version):
# NOTE(johngarbutt): agent version numbers are four part,
# so we need to use the loose version to compare them
current = version.LooseVersion(current_version)
available = version.LooseVersion(available_version)
return available > current
class XenAPIBasedAgent(object):
def __init__(self, session, virtapi, instance, vm_ref):
self.session = session
self.virtapi = virtapi
self.instance = instance
self.vm_ref = vm_ref
def _add_instance_fault(self, error, exc_info):
LOG.warning("Ignoring error while configuring instance with agent: %s",
error, instance=self.instance, exc_info=True)
try:
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(
ctxt, self.instance, error, exc_info=exc_info)
except Exception:
LOG.debug("Error setting instance fault.", exc_info=True)
def _call_agent(self, method, addl_args=None, timeout=None,
success_codes=None, ignore_errors=True):
try:
return _call_agent(self.session, self.instance, self.vm_ref,
method, addl_args, timeout, success_codes)
except exception.AgentError as error:
if ignore_errors:
self._add_instance_fault(error, sys.exc_info())
else:
raise
def get_version(self):
LOG.debug('Querying agent version', instance=self.instance)
# The agent can be slow to start for a variety of reasons. On Windows,
# it will generally perform a setup process on first boot that can
# take a couple of minutes and then reboot. On Linux, the system can
# also take a while to boot.
expiration = time.time() + CONF.xenserver.agent_version_timeout
while True:
try:
# NOTE(johngarbutt): we can't use the xapi plugin
# timeout, because the domid may change when
# the server is rebooted
return self._call_agent(host_agent.version,
ignore_errors=False)
except exception.AgentError as error:
if time.time() > expiration:
self._add_instance_fault(error, sys.exc_info())
return
def _get_expected_build(self):
ctxt = context.get_admin_context()
agent_build = objects.Agent.get_by_triple(
ctxt, 'xen', self.instance['os_type'],
self.instance['architecture'])
if agent_build:
LOG.debug('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s', {
'hypervisor': agent_build.hypervisor,
'os': agent_build.os,
'architecture': agent_build.architecture,
'version': agent_build.version})
else:
LOG.debug('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s', {
'hypervisor': 'xen',
'os': self.instance['os_type'],
'architecture': self.instance['architecture']})
return agent_build
def update_if_needed(self, version):
agent_build = self._get_expected_build()
if version and agent_build and \
is_upgrade_required(version, agent_build.version):
LOG.debug('Updating agent to %s', agent_build.version,
instance=self.instance)
self._perform_update(agent_build)
else:
LOG.debug('Skipping agent update.', instance=self.instance)
def _perform_update(self, agent_build):
args = {'url': agent_build.url, 'md5sum': agent_build.md5hash}
try:
self._call_agent(host_agent.agent_update, args)
except exception.AgentError as exc:
# Silently fail for agent upgrades
LOG.warning("Unable to update the agent due to: %(exc)s",
dict(exc=exc), instance=self.instance)
def _exchange_key_with_agent(self):
dh = SimpleDH()
args = {'pub': str(dh.get_public())}
resp = self._call_agent(host_agent.key_init, args,
success_codes=['D0'], ignore_errors=False)
agent_pub = int(resp)
dh.compute_shared(agent_pub)
return dh
def _save_instance_password_if_sshkey_present(self, new_pass):
sshkey = self.instance.get('key_data')
if sshkey and sshkey.startswith("ssh-rsa"):
ctxt = context.get_admin_context()
enc = crypto.ssh_encrypt_text(sshkey, new_pass)
self.instance.system_metadata.update(
password.convert_password(ctxt, base64.encode_as_text(enc)))
self.instance.save()
def set_admin_password(self, new_pass):
"""Set the root/admin password on the VM instance.
This is done via an agent running on the VM. Communication between nova
and the agent is done via writing xenstore records. Since communication
is done over the XenAPI RPC calls, we need to encrypt the password.
We're using a simple Diffie-Hellman class instead of a more advanced
library (such as M2Crypto) for compatibility with the agent code.
"""
LOG.debug('Setting admin password', instance=self.instance)
try:
dh = self._exchange_key_with_agent()
except exception.AgentError as error:
self._add_instance_fault(error, sys.exc_info())
return
# Some old versions of Linux and Windows agent expect trailing \n
# on password to work correctly.
enc_pass = dh.encrypt(new_pass + '\n')
args = {'enc_pass': enc_pass}
self._call_agent(host_agent.password, args)
self._save_instance_password_if_sshkey_present(new_pass)
def inject_ssh_key(self):
sshkey = self.instance.get('key_data')
if not sshkey:
return
if self.instance['os_type'] == 'windows':
LOG.debug("Skipping setting of ssh key for Windows.",
instance=self.instance)
return
if self._skip_ssh_key_inject():
LOG.debug("Skipping agent ssh key injection for this image.",
instance=self.instance)
return
sshkey = str(sshkey)
keyfile = '/root/.ssh/authorized_keys'
key_data = ''.join([
'\n',
'# The following ssh key was injected by Nova',
'\n',
sshkey.strip(),
'\n',
])
return self.inject_file(keyfile, key_data)
def inject_files(self, injected_files):
if self._skip_inject_files_at_boot():
LOG.debug("Skipping agent file injection for this image.",
instance=self.instance)
else:
for path, contents in injected_files:
self.inject_file(path, contents)
def inject_file(self, path, contents):
LOG.debug('Injecting file path: %r', path, instance=self.instance)
# Files/paths must be base64-encoded for transmission to agent
b64_path = base64.encode_as_bytes(path)
b64_contents = base64.encode_as_bytes(contents)
args = {'b64_path': b64_path, 'b64_contents': b64_contents}
return self._call_agent(host_agent.inject_file, args)
def resetnetwork(self):
LOG.debug('Resetting network', instance=self.instance)
# NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success
return self._call_agent(host_agent.reset_network,
timeout=CONF.xenserver.agent_resetnetwork_timeout,
success_codes=['0', '500'])
def _skip_ssh_key_inject(self):
return self._get_sys_meta_key(SKIP_SSH_SM_KEY)
def _skip_inject_files_at_boot(self):
return self._get_sys_meta_key(SKIP_FILES_AT_BOOT_SM_KEY)
def _get_sys_meta_key(self, key):
sys_meta = utils.instance_sys_meta(self.instance)
raw_value = sys_meta.get(key, 'False')
return strutils.bool_from_string(raw_value, strict=False)
def find_guest_agent(base_dir):
"""tries to locate a guest agent at the path
specified by agent_rel_path
"""
if CONF.xenserver.disable_agent:
return False
agent_rel_path = CONF.xenserver.agent_path
agent_path = os.path.join(base_dir, agent_rel_path)
if os.path.isfile(agent_path):
# The presence of the guest agent
# file indicates that this instance can
# reconfigure the network from xenstore data,
# so manipulation of files in /etc is not
# required
LOG.info('XenServer tools installed in this '
'image are capable of network injection. '
'Networking files will not be manipulated')
return True
xe_daemon_filename = os.path.join(base_dir,
'usr', 'sbin', 'xe-daemon')
if os.path.isfile(xe_daemon_filename):
LOG.info('XenServer tools are present '
'in this image but are not capable '
'of network injection')
else:
LOG.info('XenServer tools are not installed in this image')
return False
def should_use_agent(instance):
sys_meta = utils.instance_sys_meta(instance)
if USE_AGENT_SM_KEY not in sys_meta:
return CONF.xenserver.use_agent_default
else:
use_agent_raw = sys_meta[USE_AGENT_SM_KEY]
try:
return strutils.bool_from_string(use_agent_raw, strict=True)
except ValueError:
LOG.warning("Invalid 'agent_present' value. "
"Falling back to the default.",
instance=instance)
return CONF.xenserver.use_agent_default
class SimpleDH(object):
"""This class wraps all the functionality needed to implement
basic Diffie-Hellman-Merkle key exchange in Python. It features
intelligent defaults for the prime and base numbers needed for the
calculation, while allowing you to supply your own. It requires that
the openssl binary be installed on the system on which this is run,
as it uses that to handle the encryption and decryption. If openssl
is not available, a RuntimeError will be raised.
"""
def __init__(self):
self._prime = 162259276829213363391578010288127
self._base = 5
self._public = None
self._shared = None
self.generate_private()
def generate_private(self):
self._private = int(binascii.hexlify(os.urandom(10)), 16)
return self._private
def get_public(self):
self._public = pow(self._base, self._private, self._prime)
return self._public
def compute_shared(self, other):
self._shared = pow(other, self._private, self._prime)
return self._shared
def _run_ssl(self, text, decrypt=False):
cmd = ['openssl', 'aes-128-cbc', '-A', '-a', '-pass',
'pass:%s' % self._shared, '-nosalt']
if decrypt:
cmd.append('-d')
try:
out, err = processutils.execute(
*cmd,
process_input=encodeutils.safe_encode(text),
check_exit_code=True)
if err:
LOG.warning("OpenSSL stderr: %s", err)
return out
except processutils.ProcessExecutionError as e:
raise RuntimeError(
_('OpenSSL errored with exit code %(exit_code)d: %(stderr)s') %
{'exit_code': e.exit_code, 'stderr': e.stderr})
def encrypt(self, text):
return self._run_ssl(text).strip('\n')
def decrypt(self, text):
return self._run_ssl(text, decrypt=True)

View File

@ -1,861 +0,0 @@
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A driver for XenServer or Xen Cloud Platform.
**Variable Naming Scheme**
- suffix "_ref" for opaque references
- suffix "_uuid" for UUIDs
- suffix "_rec" for record objects
"""
import os_resource_classes as orc
from os_xenapi.client import session
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import versionutils
import six.moves.urllib.parse as urlparse
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.virt import driver
from nova.virt.xenapi import host
from nova.virt.xenapi import pool
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def invalid_option(option_name, recommended_value):
LOG.exception('Current value of '
'CONF.xenserver.%(option)s option incompatible with '
'CONF.xenserver.independent_compute=True. '
'Consider using "%(recommended)s"',
{'option': option_name,
'recommended': recommended_value})
raise exception.NotSupportedWithOption(
operation=option_name,
option='CONF.xenserver.independent_compute')
class XenAPIDriver(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform."""
capabilities = {
"has_imagecache": False,
"supports_evacuate": False,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
"supports_multiattach": False,
"supports_trusted_certs": False,
"supports_pcpus": False,
"supports_accelerators": False,
# Image type support flags
"supports_image_type_aki": False,
"supports_image_type_ami": False,
"supports_image_type_ari": False,
"supports_image_type_iso": False,
"supports_image_type_qcow2": False,
"supports_image_type_raw": True,
"supports_image_type_vdi": True,
"supports_image_type_vhd": True,
"supports_image_type_vhdx": False,
"supports_image_type_vmdk": False,
"supports_image_type_ploop": False,
}
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
url = CONF.xenserver.connection_url
username = CONF.xenserver.connection_username
password = CONF.xenserver.connection_password
if not url or password is None:
raise Exception(_('Must specify connection_url, '
'connection_username (optionally), and '
'connection_password to use '
'compute_driver=xenapi.XenAPIDriver'))
self._session = session.XenAPISession(url, username, password,
originator="nova")
self._volumeops = volumeops.VolumeOps(self._session)
self._host_state = None
self._host = host.Host(self._session, self.virtapi)
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
if not self._host_state:
self._host_state = host.HostState(self._session)
return self._host_state
def init_host(self, host):
LOG.warning('The xenapi driver is deprecated and may be removed in a '
'future release. The driver is not tested by the '
'OpenStack project nor does it have clear maintainer(s) '
'and thus its quality can not be ensured. If you are '
'using the driver in production please let us know in '
'freenode IRC and/or the openstack-discuss mailing list.')
if CONF.xenserver.independent_compute:
# Check various options are in the correct state:
if CONF.xenserver.check_host:
invalid_option('CONF.xenserver.check_host', False)
if CONF.flat_injected:
invalid_option('CONF.flat_injected', False)
if CONF.default_ephemeral_format and \
CONF.default_ephemeral_format != 'ext3':
invalid_option('CONF.default_ephemeral_format', 'ext3')
if CONF.xenserver.check_host:
vm_utils.ensure_correct_host(self._session)
if not CONF.xenserver.independent_compute:
try:
vm_utils.cleanup_attached_vdis(self._session)
except Exception:
LOG.exception('Failure while cleaning up attached VDIs')
def instance_exists(self, instance):
"""Checks existence of an instance on the host.
:param instance: The instance to lookup
Returns True if supplied instance exists on the host, False otherwise.
NOTE(belliott): This is an override of the base method for
efficiency.
"""
return self._vmops.instance_exists(instance.name)
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
return self._vmops.list_instance_uuids()
def _is_vgpu_allocated(self, allocations):
# check if allocated vGPUs
if not allocations:
# If no allocations, there is no vGPU request.
return False
RC_VGPU = orc.VGPU
for rp in allocations:
res = allocations[rp]['resources']
if res and RC_VGPU in res and res[RC_VGPU] > 0:
return True
return False
def _get_vgpu_info(self, allocations):
"""Get vGPU info basing on the allocations.
:param allocations: Information about resources allocated to the
instance via placement, of the form returned by
SchedulerReportClient.get_allocations_for_consumer.
:returns: Dictionary describing vGPU info if any vGPU allocated;
None otherwise.
:raises: exception.ComputeResourcesUnavailable if there is no
available vGPUs.
"""
if not self._is_vgpu_allocated(allocations):
return None
# NOTE(jianghuaw): At the moment, we associate all vGPUs resource to
# the compute node regardless which GPU group the vGPUs belong to, so
# we need search all GPU groups until we got one group which has
# remaining capacity to supply one vGPU. Once we switch to the
# nested resource providers, the allocations will contain the resource
# provider which represents a particular GPU group. It's able to get
# the GPU group and vGPU type directly by using the resource provider's
# uuid. Then we can consider moving this function to vmops, as there is
# no need to query host stats to get all GPU groups.
host_stats = self.host_state.get_host_stats(refresh=True)
vgpu_stats = host_stats['vgpu_stats']
for grp_uuid in vgpu_stats:
if vgpu_stats[grp_uuid]['remaining'] > 0:
# NOTE(jianghuaw): As XenServer only supports single vGPU per
# VM, we've restricted the inventory data having `max_unit` as
# 1. If it reached here, surely only one GPU is allocated.
# So just return the GPU group uuid and vGPU type uuid once
# we got one group which still has remaining vGPUs.
return dict(gpu_grp_uuid=grp_uuid,
vgpu_type_uuid=vgpu_stats[grp_uuid]['uuid'])
# No remaining vGPU available: e.g. the vGPU resource has been used by
# other instance or the vGPU has been changed to be disabled.
raise exception.ComputeResourcesUnavailable(
reason='vGPU resource is not available')
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None, power_on=True, accel_info=None):
"""Create VM instance."""
vgpu_info = self._get_vgpu_info(allocations)
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info,
vgpu_info)
def confirm_migration(self, context, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
migration, block_device_info=None,
power_on=True):
"""Finish reverting a resize."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(context, instance,
block_device_info,
power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
allocations, block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
def post_interrupted_snapshot_cleanup(self, context, instance):
"""Cleans up any resources left after a failed snapshot."""
self._vmops.post_interrupted_snapshot_cleanup(context, instance)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None,
accel_info=None):
"""Reboot VM instance."""
self._vmops.reboot(instance, reboot_type,
bad_volumes_callback=bad_volumes_callback)
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def change_instance_metadata(self, context, instance, diff):
"""Apply a diff to the instance metadata."""
self._vmops.change_instance_metadata(instance, diff)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def pause(self, instance):
"""Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
"""Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk
"""
# NOTE(vish): Xen currently does not use network info.
# TODO(PhilDay): Add support for timeout (clean shutdown)
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor, block_device_info)
def suspend(self, context, instance):
"""suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password, block_device_info):
"""Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def set_bootable(self, instance, is_bootable):
"""Set the ability to power on/off an instance."""
self._vmops.set_bootable(instance, is_bootable)
def unrescue(
self,
context: nova_context.RequestContext,
instance: 'objects.Instance',
):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
# TODO(PhilDay): Add support for timeout (clean shutdown)
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
block_device_info=None, accel_info=None):
"""Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
"""Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
"""reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, nw_info)
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
self._vmops.plug_vifs(instance, network_info)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
self._vmops.unplug_vifs(instance, network_info)
def get_info(self, instance, use_cache=True):
"""Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_instance_diagnostics(instance)
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
# we only care about VMs that correspond to a nova-managed
# instance:
imap = {inst['name']: inst['uuid'] for inst in instances}
bwcounters = []
# get a dictionary of instance names. values are dictionaries
# of mac addresses with values that are the bw counters:
# e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}}
all_counters = self._vmops.get_all_bw_counters()
for instance_name, counters in all_counters.items():
if instance_name in imap:
# yes these are stats for a nova-managed vm
# correlate the stats with the nova instance uuid:
for vif_counter in counters.values():
vif_counter['uuid'] = imap[instance_name]
bwcounters.append(vif_counter)
return bwcounters
def get_console_output(self, context, instance):
"""Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
"""Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.host_state.get_host_stats(refresh=True)
try:
self._initiator = stats['host_other-config']['iscsi_iqn']
self._hypervisor_hostname = stats['host_hostname']
except (TypeError, KeyError) as err:
LOG.warning('Could not determine key: %s', err,
instance=instance)
self._initiator = None
return {
'ip': self._get_block_storage_ip(),
'initiator': self._initiator,
'host': self._hypervisor_hostname
}
def _get_block_storage_ip(self):
# If CONF.my_block_storage_ip is set, use it.
if CONF.my_block_storage_ip != CONF.my_ip:
return CONF.my_block_storage_ip
return self.get_host_ip_addr()
def get_host_ip_addr(self):
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return xs_url.netloc
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach volume storage to VM instance."""
self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
"""Detach volume storage from VM instance."""
self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
def get_console_pool_info(self, console_type):
xs_url = urlparse.urlparse(CONF.xenserver.connection_url)
return {'address': xs_url.netloc,
'username': CONF.xenserver.connection_username,
'password': CONF.xenserver.connection_password}
def _get_vgpu_total(self, vgpu_stats):
# NOTE(jianghuaw): Now we only enable one vGPU type in one
# compute node. So normally vgpu_stats should contain only
# one GPU group. If there are multiple GPU groups, they
# must contain the same vGPU type. So just add them up.
total = 0
for grp_id in vgpu_stats:
total += vgpu_stats[grp_id]['total']
return total
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
inventory information.
:param nova.compute.provider_tree.ProviderTree provider_tree:
A nova.compute.provider_tree.ProviderTree object representing all
the providers in the tree associated with the compute node, and any
sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
trait) associated via aggregate with any of those providers (but
not *their* tree- or aggregate-associated providers), as currently
known by placement. This object is fully owned by the
update_provider_tree method, and can therefore be modified without
locking/concurrency considerations. In other words, the parameter
is passed *by reference* with the expectation that the virt driver
will modify the object. Note, however, that it may contain
providers not directly owned/controlled by the compute host. Care
must be taken not to remove or modify such providers inadvertently.
In addition, providers may be associated with traits and/or
aggregates maintained by outside agents. The
`update_provider_tree`` method must therefore also be careful only
to add/remove traits/aggregates it explicitly controls.
:param nodename:
String name of the compute node (i.e.
ComputeNode.hypervisor_hostname) for which the caller is requesting
updated provider information. Drivers may use this to help identify
the compute node provider in the ProviderTree. Drivers managing
more than one node (e.g. ironic) may also use it as a cue to
indicate which node is being processed by the caller.
:param allocations:
Dict of allocation data of the form:
{ $CONSUMER_UUID: {
# The shape of each "allocations" dict below is identical
# to the return from GET /allocations/{consumer_uuid}
"allocations": {
$RP_UUID: {
"generation": $RP_GEN,
"resources": {
$RESOURCE_CLASS: $AMOUNT,
...
},
},
...
},
"project_id": $PROJ_ID,
"user_id": $USER_ID,
"consumer_generation": $CONSUMER_GEN,
},
...
}
If None, and the method determines that any inventory needs to be
moved (from one provider to another and/or to a different resource
class), the ReshapeNeeded exception must be raised. Otherwise, this
dict must be edited in place to indicate the desired final state of
allocations. Drivers should *only* edit allocation records for
providers whose inventories are being affected by the reshape
operation.
:raises ReshapeNeeded: If allocations is None and any inventory needs
to be moved from one provider to another and/or to a different
resource class.
:raises: ReshapeFailed if the requested tree reshape fails for
whatever reason.
"""
host_stats = self.host_state.get_host_stats(refresh=True)
vcpus = host_stats['host_cpu_info']['cpu_count']
memory_mb = int(host_stats['host_memory_total'] / units.Mi)
disk_gb = int(host_stats['disk_total'] / units.Gi)
vgpus = self._get_vgpu_total(host_stats['vgpu_stats'])
# If the inventory record does not exist, the allocation_ratio
# will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
# is set, and fallback to use the initial_xxx_allocation_ratio
# otherwise.
inv = provider_tree.data(nodename).inventory
ratios = self._get_allocation_ratios(inv)
result = {
orc.VCPU: {
'total': vcpus,
'min_unit': 1,
'max_unit': vcpus,
'step_size': 1,
'allocation_ratio': ratios[orc.VCPU],
'reserved': CONF.reserved_host_cpus,
},
orc.MEMORY_MB: {
'total': memory_mb,
'min_unit': 1,
'max_unit': memory_mb,
'step_size': 1,
'allocation_ratio': ratios[orc.MEMORY_MB],
'reserved': CONF.reserved_host_memory_mb,
},
orc.DISK_GB: {
'total': disk_gb,
'min_unit': 1,
'max_unit': disk_gb,
'step_size': 1,
'allocation_ratio': ratios[orc.DISK_GB],
'reserved': self._get_reserved_host_disk_gb_from_config(),
},
}
if vgpus > 0:
# Only create inventory for vGPU when driver can supply vGPUs.
# At the moment, XenAPI can support up to one vGPU per VM,
# so max_unit is 1.
result.update(
{
orc.VGPU: {
'total': vgpus,
'min_unit': 1,
'max_unit': 1,
'step_size': 1,
}
}
)
provider_tree.update_inventory(nodename, result)
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: ignored in this driver
:returns: dictionary describing resources
"""
host_stats = self.host_state.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / units.Mi
# NOTE(belliott) memory-free-computed is a value provided by XenServer
# for gauging free memory more conservatively than memory-free.
free_ram_mb = host_stats['host_memory_free_computed'] / units.Mi
total_disk_gb = host_stats['disk_total'] / units.Gi
used_disk_gb = host_stats['disk_used'] / units.Gi
allocated_disk_gb = host_stats['disk_allocated'] / units.Gi
hyper_ver = versionutils.convert_version_to_int(
self._session.product_version)
dic = {'vcpus': host_stats['host_cpu_info']['cpu_count'],
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': host_stats['vcpus_used'],
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'XenServer',
'hypervisor_version': hyper_ver,
'hypervisor_hostname': host_stats['host_hostname'],
'cpu_info': jsonutils.dumps(host_stats['cpu_model']),
'disk_available_least': total_disk_gb - allocated_disk_gb,
'supported_instances': host_stats['supported_instances'],
'pci_passthrough_devices': jsonutils.dumps(
host_stats['pci_passthrough_devices']),
'numa_topology': None}
return dic
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False, disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a XenapiLiveMigrateData object
"""
return self._vmops.check_can_live_migrate_destination(context,
instance,
block_migration,
disk_over_commit)
def cleanup_live_migration_destination_check(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
pass
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
:param block_device_info: result of _get_instance_block_device_info
:returns: a XenapiLiveMigrateData object
"""
return self._vmops.check_can_live_migrate_source(context, instance,
dest_check_data)
def get_instance_disk_info(self, instance,
block_device_info=None):
"""Used by libvirt for live migration. We rely on xenapi
checks to do this for us.
"""
pass
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Performs the live migration of the specified instance.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
:param migrate_data: a XenapiLiveMigrateData object
"""
self._vmops.live_migrate(context, instance, dest, post_method,
recover_method, block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Performs a live migration rollback.
:param context: security context
:param instance: instance object that was being migrated
:param network_info: instance network information
:param block_device_info: instance block device information
:param destroy_disks:
if true, destroy disks at destination during cleanup
:param migrate_data: A XenapiLiveMigrateData object
"""
# NOTE(johngarbutt) Destroying the VM is not appropriate here
# and in the cases where it might make sense,
# XenServer has already done it.
# NOTE(sulo): The only cleanup we do explicitly is to forget
# any volume that was attached to the destination during
# live migration. XAPI should take care of all other cleanup.
self._vmops.rollback_live_migration_at_destination(instance,
network_info,
block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data):
"""Preparation live migration.
:param block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
:returns: a XenapiLiveMigrateData object
"""
return self._vmops.pre_live_migration(context, instance,
block_device_info, network_info, disk_info, migrate_data)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
"""Post operation of live migration at source host.
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
:param migrate_data: a XenapiLiveMigrateData object
"""
self._vmops.post_live_migration(context, instance, migrate_data)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self._vmops.post_live_migration_at_source(context, instance,
network_info)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
self._vmops.post_live_migration_at_destination(context, instance,
network_info, block_device_info, block_device_info)
def get_available_nodes(self, refresh=False):
stats = self.host_state.get_host_stats(refresh=refresh)
return [stats["hypervisor_hostname"]]
def host_power_action(self, action):
"""The only valid values for 'action' on XenServer are 'reboot' or
'shutdown', even though the API also accepts 'startup'. As this is
not technically possible on XenServer, since the host is the same
physical machine as the hypervisor, if this is requested, we need to
raise an exception.
"""
if action in ("reboot", "shutdown"):
return self._host.host_power_action(action)
else:
msg = _("Host startup on XenServer is not supported.")
raise NotImplementedError(msg)
def set_host_enabled(self, enabled):
"""Sets the compute host's ability to accept new instances."""
return self._host.set_host_enabled(enabled)
def get_host_uptime(self):
"""Returns the result of calling "uptime" on the target host."""
return self._host.get_host_uptime()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self._host.host_maintenance_mode(host, mode)
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
return self._pool.add_to_aggregate(context, aggregate, host, **kwargs)
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return self._vmops.get_per_instance_usage()
def attach_interface(self, context, instance, image_meta, vif):
"""Use hotplug to add a network interface to a running instance.
The counter action to this is :func:`detach_interface`.
:param context: The request context.
:param nova.objects.instance.Instance instance:
The instance which will get an additional network interface.
:param nova.objects.ImageMeta image_meta:
The metadata of the image of the instance.
:param nova.network.model.VIF vif:
The object which has the information about the interface to attach.
:raise nova.exception.NovaException: If the attach fails.
:return: None
"""
self._vmops.attach_interface(instance, vif)
def detach_interface(self, context, instance, vif):
"""Use hotunplug to remove a network interface from a running instance.
The counter action to this is :func:`attach_interface`.
:param context: The request context.
:param nova.objects.instance.Instance instance:
The instance which gets a network interface removed.
:param nova.network.model.VIF vif:
The object which has the information about the interface to detach.
:raise nova.exception.NovaException: If the detach fails.
:return: None
"""
self._vmops.detach_interface(instance, vif)

File diff suppressed because it is too large Load Diff

View File

@ -1,570 +0,0 @@
# Copyright (c) 2012 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for host-related functions (start, reboot, etc).
"""
import re
from os_xenapi.client import host_management
from os_xenapi.client import XenAPI
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import fields as obj_fields
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Host(object):
"""Implements host related operations."""
def __init__(self, session, virtapi):
self._session = session
self._virtapi = virtapi
def host_power_action(self, action):
"""Reboots or shuts down the host."""
args = {"action": jsonutils.dumps(action)}
methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"}
response = call_xenhost(self._session, methods[action], args)
return response.get("power_action", response)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
host_list = [host_ref for host_ref in
self._session.host.get_all()
if host_ref != self._session.host_ref]
migrations_counter = vm_counter = 0
ctxt = context.get_admin_context()
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
for host_ref in host_list:
try:
# Ensure only guest instances are migrated
uuid = vm_rec['other_config'].get('nova_uuid')
if not uuid:
name = vm_rec['name_label']
uuid = _uuid_find(ctxt, host, name)
if not uuid:
LOG.info('Instance %(name)s running on '
'%(host)s could not be found in '
'the database: assuming it is a '
'worker VM and skip ping migration '
'to a new host',
{'name': name, 'host': host})
continue
instance = objects.Instance.get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
aggregate = objects.AggregateList.get_by_host(
ctxt, host, key=pool_states.POOL_FLAG)
if not aggregate:
msg = _('Aggregate for host %(host)s count not be'
' found.') % dict(host=host)
raise exception.NotFound(msg)
dest = _host_find(ctxt, self._session, aggregate[0],
host_ref)
instance.host = dest
instance.task_state = task_states.MIGRATING
instance.save()
self._session.VM.pool_migrate(vm_ref, host_ref,
{"live": "true"})
migrations_counter = migrations_counter + 1
instance.vm_state = vm_states.ACTIVE
instance.save()
break
except XenAPI.Failure:
LOG.exception(
'Unable to migrate VM %(vm_refs from %(host)s',
{'vm_ref': vm_ref, 'host': host},
)
instance.host = host
instance.vm_state = vm_states.ACTIVE
instance.save()
if vm_counter == migrations_counter:
return 'on_maintenance'
else:
raise exception.NoValidHost(reason=_('Unable to find suitable '
'host for VMs evacuation'))
def set_host_enabled(self, enabled):
"""Sets the compute host's ability to accept new instances."""
# Since capabilities are gone, use service table to disable a node
# in scheduler
cntxt = context.get_admin_context()
service = objects.Service.get_by_args(cntxt, CONF.host,
'nova-compute')
service.disabled = not enabled
service.disabled_reason = 'set by xenapi host_state'
service.save()
response = _call_host_management(self._session,
host_management.set_host_enabled,
jsonutils.dumps(enabled))
return response.get("status", response)
def get_host_uptime(self):
"""Returns the result of calling "uptime" on the target host."""
response = _call_host_management(self._session,
host_management.get_host_uptime)
return response.get("uptime", response)
class HostState(object):
"""Manages information about the XenServer host this compute
node is running on.
"""
def __init__(self, session):
super(HostState, self).__init__()
self._session = session
self._stats = {}
self.update_status()
def _get_passthrough_devices(self):
"""Get a list pci devices that are available for pci passthtough.
We use a plugin to get the output of the lspci command runs on dom0.
From this list we will extract pci devices that are using the pciback
kernel driver.
:returns: a list of pci devices on the node
"""
def _compile_hex(pattern):
r"""Return a compiled regular expression pattern into which we have
replaced occurrences of hex by [\da-fA-F].
"""
return re.compile(pattern.replace("hex", r"[\da-fA-F]"))
def _parse_pci_device_string(dev_string):
"""Exctract information from the device string about the slot, the
vendor and the product ID. The string is as follow:
"Slot:\tBDF\nClass:\txxxx\nVendor:\txxxx\nDevice:\txxxx\n..."
Return a dictionary with information about the device.
"""
slot_regex = _compile_hex(r"Slot:\t"
r"((?:hex{4}:)?" # Domain: (optional)
r"hex{2}:" # Bus:
r"hex{2}\." # Device.
r"hex{1})") # Function
vendor_regex = _compile_hex(r"\nVendor:\t(hex+)")
product_regex = _compile_hex(r"\nDevice:\t(hex+)")
slot_id = slot_regex.findall(dev_string)
vendor_id = vendor_regex.findall(dev_string)
product_id = product_regex.findall(dev_string)
if not slot_id or not vendor_id or not product_id:
raise exception.NovaException(
_("Failed to parse information about"
" a pci device for passthrough"))
type_pci = host_management.get_pci_type(self._session, slot_id[0])
return {'label': '_'.join(['label',
vendor_id[0],
product_id[0]]),
'vendor_id': vendor_id[0],
'product_id': product_id[0],
'address': slot_id[0],
'dev_id': '_'.join(['pci', slot_id[0]]),
'dev_type': type_pci,
'status': 'available'}
# Devices are separated by a blank line. That is why we
# use "\n\n" as separator.
lspci_out = host_management.get_pci_device_details(self._session)
pci_list = lspci_out.split("\n\n")
# For each device of the list, check if it uses the pciback
# kernel driver and if it does, get information and add it
# to the list of passthrough_devices. Ignore it if the driver
# is not pciback.
passthrough_devices = []
for dev_string_info in pci_list:
if "Driver:\tpciback" in dev_string_info:
new_dev = _parse_pci_device_string(dev_string_info)
passthrough_devices.append(new_dev)
return passthrough_devices
def _get_vgpu_stats(self):
"""Invoke XenAPI to get the stats for VGPUs.
The return value is a dict which has GPU groups' uuid as
the keys:
dict(grp_uuid_1=dict_vgpu_stats_in_grp_1,
grp_uuid_2=dict_vgpu_stats_in_grp_2,
...,
grp_uuid_n=dict_vgpu_stats_in_grp_n)
The `dict_vgpu_stats_in_grp_x` is a dict represents the
vGPU stats in GPU group x. For details, please refer to
the return value of the function of _get_vgpu_stats_in_group().
"""
if not CONF.devices.enabled_vgpu_types:
return {}
vgpu_stats = {}
# NOTE(jianghuaw): If there are multiple vGPU types enabled in
# the configure option, we only choose the first one so that
# we support only one vGPU type per compute node at the moment.
# Once we switch to use the nested resource providers, we will
# remove these lines to allow multiple vGPU types within multiple
# GPU groups (each group has a different vGPU type enabled).
if len(CONF.devices.enabled_vgpu_types) > 1:
LOG.warning('XenAPI only supports one GPU type per compute node,'
' only first type will be used.')
cfg_enabled_types = CONF.devices.enabled_vgpu_types[:1]
vgpu_grp_refs = self._session.call_xenapi('GPU_group.get_all')
for ref in vgpu_grp_refs:
grp_uuid = self._session.call_xenapi('GPU_group.get_uuid', ref)
stat = self._get_vgpu_stats_in_group(ref, cfg_enabled_types)
if stat:
vgpu_stats[grp_uuid] = stat
LOG.debug("Returning vGPU stats: %s", vgpu_stats)
return vgpu_stats
def _get_vgpu_stats_in_group(self, grp_ref, vgpu_types):
"""Get stats for the specified vGPU types in a GPU group.
NOTE(Jianghuaw): In XenAPI, a GPU group is the minimal unit
from where to create a vGPU for an instance. So here, we
report vGPU resources for a particular GPU group. When we use
nested resource providers to represent the vGPU resources,
each GPU group will be a child resource provider under the
compute node.
The return value is a dict. For example:
{'uuid': '6444c6ee-3a49-42f5-bebb-606b52175e67',
'type_name': 'Intel GVT-g',
'max_heads': 1,
'total': 7,
'remaining': 7,
}
"""
type_refs_in_grp = self._session.call_xenapi(
'GPU_group.get_enabled_VGPU_types', grp_ref)
type_names_in_grp = {self._session.call_xenapi(
'VGPU_type.get_model_name',
type_ref): type_ref
for type_ref in type_refs_in_grp}
# Get the vGPU types enabled both in this GPU group and in the
# nova conf.
enabled_types = set(vgpu_types) & set(type_names_in_grp)
if not enabled_types:
return
stat = {}
# Get the sorted enabled types, so that we can always choose the same
# type when there are multiple enabled vGPU types.
sorted_types = sorted(enabled_types)
chosen_type = sorted_types[0]
if len(sorted_types) > 1:
LOG.warning('XenAPI only supports one vGPU type per GPU group,'
' but enabled multiple vGPU types: %(available)s.'
' Choosing the first one: %(chosen)s.',
dict(available=sorted_types,
chosen=chosen_type))
type_ref = type_names_in_grp[chosen_type]
type_uuid = self._session.call_xenapi('VGPU_type.get_uuid', type_ref)
stat['uuid'] = type_uuid
stat['type_name'] = chosen_type
stat['max_heads'] = int(self._session.call_xenapi(
'VGPU_type.get_max_heads', type_ref))
stat['total'] = self._get_total_vgpu_in_grp(grp_ref, type_ref)
stat['remaining'] = int(self._session.call_xenapi(
'GPU_group.get_remaining_capacity',
grp_ref,
type_ref))
return stat
def _get_total_vgpu_in_grp(self, grp_ref, type_ref):
"""Get the total capacity of vGPUs in the group."""
pgpu_recs = self._session.call_xenapi(
'PGPU.get_all_records_where', 'field "GPU_group" = "%s"' % grp_ref)
total = 0
for pgpu_ref in pgpu_recs:
pgpu_rec = pgpu_recs[pgpu_ref]
if type_ref in pgpu_rec['enabled_VGPU_types']:
cap = pgpu_rec['supported_VGPU_max_capacities'][type_ref]
total += int(cap)
return total
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def get_disk_used(self, sr_ref):
"""Since glance images are downloaded and snapshotted before they are
used, only a small proportion of its VDI will be in use and it will
never grow. We only need to count the virtual size for disks that
are attached to a VM - every other disk can count physical.
"""
def _vdi_attached(vdi_ref):
try:
vbds = self._session.VDI.get_VBDs(vdi_ref)
for vbd in vbds:
if self._session.VBD.get_currently_attached(vbd):
return True
except self._session.XenAPI.Failure:
# VDI or VBD may no longer exist - in which case, it's
# not attached
pass
return False
allocated = 0
physical_used = 0
all_vdis = self._session.SR.get_VDIs(sr_ref)
for vdi_ref in all_vdis:
try:
vdi_physical = \
int(self._session.VDI.get_physical_utilisation(vdi_ref))
if _vdi_attached(vdi_ref):
allocated += \
int(self._session.VDI.get_virtual_size(vdi_ref))
else:
allocated += vdi_physical
physical_used += vdi_physical
except (ValueError, self._session.XenAPI.Failure):
LOG.exception('Unable to get size for vdi %s', vdi_ref)
return (allocated, physical_used)
def update_status(self):
"""Since under Xenserver, a compute node runs on a given host,
we can get host status information using xenapi.
"""
LOG.debug("Updating host stats")
data = _call_host_management(self._session,
host_management.get_host_data)
if data:
sr_ref = vm_utils.scan_default_sr(self._session)
sr_rec = self._session.SR.get_record(sr_ref)
total = int(sr_rec["physical_size"])
(allocated, used) = self.get_disk_used(sr_ref)
data["disk_total"] = total
data["disk_used"] = used
data["disk_allocated"] = allocated
data["disk_available"] = total - used
data["supported_instances"] = to_supported_instances(
data.get("host_capabilities")
)
data["cpu_model"] = to_cpu_model(
data.get("host_cpu_info")
)
host_memory = data.get('host_memory', None)
if host_memory:
data["host_memory_total"] = host_memory.get('total', 0)
data["host_memory_overhead"] = host_memory.get('overhead', 0)
data["host_memory_free"] = host_memory.get('free', 0)
data["host_memory_free_computed"] = host_memory.get(
'free-computed', 0)
del data['host_memory']
if (data['host_hostname'] !=
self._stats.get('host_hostname', data['host_hostname'])):
LOG.error('Hostname has changed from %(old)s to %(new)s. '
'A restart is required to take effect.',
{'old': self._stats['host_hostname'],
'new': data['host_hostname']})
data['host_hostname'] = self._stats['host_hostname']
data['hypervisor_hostname'] = data['host_hostname']
vcpus_used = 0
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vcpus_used = vcpus_used + int(vm_rec['VCPUs_max'])
data['vcpus_used'] = vcpus_used
data['pci_passthrough_devices'] = self._get_passthrough_devices()
data['vgpu_stats'] = self._get_vgpu_stats()
self._stats = data
def to_supported_instances(host_capabilities):
if not host_capabilities:
return []
result = []
for capability in host_capabilities:
try:
# 'capability'is unicode but we want arch/ostype
# to be strings to match the standard constants
capability = str(capability)
ostype, _version, guestarch = capability.split("-")
guestarch = obj_fields.Architecture.canonicalize(guestarch)
ostype = obj_fields.VMMode.canonicalize(ostype)
result.append((guestarch, obj_fields.HVType.XEN, ostype))
except ValueError:
LOG.warning("Failed to extract instance support from %s",
capability)
return result
def to_cpu_model(host_cpu_info):
# The XenAPI driver returns data in the format
#
# {"physical_features": "0098e3fd-bfebfbff-00000001-28100800",
# "modelname": "Intel(R) Xeon(R) CPU X3430 @ 2.40GHz",
# "vendor": "GenuineIntel",
# "features": "0098e3fd-bfebfbff-00000001-28100800",
# "family": 6,
# "maskable": "full",
# "cpu_count": 4,
# "socket_count": "1",
# "flags": "fpu de tsc msr pae mce cx8 apic sep mtrr mca cmov
# pat clflush acpi mmx fxsr sse sse2 ss ht nx
# constant_tsc nonstop_tsc aperfmperf pni vmx est
# ssse3 sse4_1 sse4_2 popcnt hypervisor ida
# tpr_shadow vnmi flexpriority ept vpid",
# "stepping": 5,
# "model": 30,
# "features_after_reboot": "0098e3fd-bfebfbff-00000001-28100800",
# "speed": "2394.086"}
if host_cpu_info is None:
return None
cpu_info = dict()
# TODO(berrange) the data we're putting in model is not
# exactly comparable to what libvirt puts in model. The
# libvirt model names are a well defined short string
# which is really an aliass for a particular set of
# feature flags. The Xen model names are raw printable
# strings from the kernel with no specific semantics
cpu_info["model"] = host_cpu_info["modelname"]
cpu_info["vendor"] = host_cpu_info["vendor"]
# TODO(berrange) perhaps we could fill in 'arch' field too
# by looking at 'host_capabilities' for the Xen host ?
topology = dict()
topology["sockets"] = int(host_cpu_info["socket_count"])
topology["cores"] = (int(host_cpu_info["cpu_count"]) /
int(host_cpu_info["socket_count"]))
# TODO(berrange): if 'ht' is present in the 'flags' list
# is it possible to infer that the 'cpu_count' is in fact
# sockets * cores * threads ? Unclear if 'ht' would remain
# visible when threads are disabled in BIOS ?
topology["threads"] = 1
cpu_info["topology"] = topology
cpu_info["features"] = host_cpu_info["flags"].split(" ")
return cpu_info
def call_xenhost(session, method, arg_dict):
"""There will be several methods that will need this general
handling for interacting with the xenhost plugin, so this abstracts
out that behavior.
"""
# Create a task ID as something that won't match any instance ID
try:
result = session.call_plugin('xenhost.py', method, args=arg_dict)
if not result:
return ''
return jsonutils.loads(result)
except ValueError:
LOG.exception("Unable to get updated status")
return None
except session.XenAPI.Failure as e:
LOG.error("The call to %(method)s returned "
"an error: %(e)s.", {'method': method, 'e': e})
return e.details[1]
def _call_host_management(session, method, *args):
"""There will be several methods that will need this general
handling for interacting with the dom0 plugin, so this abstracts
out that behavior. the call_xenhost will be removed once we deprecated
those functions which are not needed anymore
"""
try:
result = method(session, *args)
if not result:
return ''
return jsonutils.loads(result)
except ValueError:
LOG.exception("Unable to get updated status")
return None
except session.XenAPI.Failure as e:
LOG.error("The call to %(method)s returned an error: %(e)s.",
{'method': method.__name__, 'e': e})
return e.details[1]
def _uuid_find(context, host, name_label):
"""Return instance uuid by name_label."""
for i in objects.InstanceList.get_by_host(context, host):
if i.name == name_label:
return i.uuid
return None
def _host_find(context, session, src_aggregate, host_ref):
"""Return the host from the xenapi host reference.
:param src_aggregate: the aggregate that the compute host being put in
maintenance (source of VMs) belongs to
:param host_ref: the hypervisor host reference (destination of VMs)
:return: the compute host that manages host_ref
"""
# NOTE: this would be a lot simpler if nova-compute stored
# CONF.host in the XenServer host's other-config map.
# TODO(armando-migliaccio): improve according the note above
uuid = session.host.get_uuid(host_ref)
for compute_host, host_uuid in src_aggregate.metadetails.items():
if host_uuid == uuid:
return compute_host
raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found '
'from aggregate metadata: %(metadata)s.' %
{'host_uuid': uuid,
'metadata': src_aggregate.metadetails})

View File

@ -1,93 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import sys
from os_xenapi.client import exception as xenapi_exception
from os_xenapi.client import host_glance
from oslo_log import log as logging
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import exception
from nova.image import glance
from nova import utils
from nova.virt.xenapi import vm_utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class GlanceStore(object):
def _call_glance_plugin(self, context, instance, session, fn, image_id,
params):
glance_api_servers = glance.get_api_servers(context)
sr_path = vm_utils.get_sr_path(session)
extra_headers = glance.generate_identity_headers(context)
def pick_glance(kwargs):
server = next(glance_api_servers)
kwargs['endpoint'] = server
kwargs['api_version'] = 2
# NOTE(sdague): is the return significant here at all?
return server
def retry_cb(context, instance, exc=None):
if exc:
exc_info = sys.exc_info()
LOG.debug(six.text_type(exc), exc_info=exc_info)
compute_utils.add_instance_fault_from_exc(
context, instance, exc, exc_info)
cb = functools.partial(retry_cb, context, instance)
return fn(session, CONF.glance.num_retries, pick_glance, cb, image_id,
sr_path, extra_headers, **params)
def download_image(self, context, session, instance, image_id):
params = {'uuid_stack': vm_utils._make_uuid_stack()}
try:
vdis = self._call_glance_plugin(context, instance, session,
host_glance.download_vhd, image_id,
params)
except xenapi_exception.PluginRetriesExceeded:
raise exception.CouldNotFetchImage(image_id=image_id)
return vdis
def upload_image(self, context, session, instance, image_id, vdi_uuids):
params = {'vdi_uuids': vdi_uuids}
props = params['properties'] = {}
props['auto_disk_config'] = instance['auto_disk_config']
props['os_type'] = instance.get('os_type', None) or (
CONF.xenserver.default_os_type)
compression_level = vm_utils.get_compression_level()
if compression_level:
props['xenapi_image_compression_level'] = compression_level
auto_disk_config = utils.get_auto_disk_config_from_instance(instance)
if utils.is_auto_disk_config_disabled(auto_disk_config):
props["auto_disk_config"] = "disabled"
try:
self._call_glance_plugin(context, instance, session,
host_glance.upload_vhd, image_id, params)
except xenapi_exception.PluginRetriesExceeded:
raise exception.CouldNotUploadImage(image_id=image_id)
except xenapi_exception.PluginImageNotFound:
raise exception.ImageNotFound(image_id=image_id)

View File

@ -1,121 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shutil
import tarfile
from oslo_utils import importutils
from nova import exception
from nova.image import glance
_VDI_FORMAT_RAW = 1
IMAGE_API = glance.API()
IMAGE_HANDLERS = {'direct_vhd': 'glance.GlanceStore',
'vdi_local_dev': 'vdi_through_dev.VdiThroughDevStore',
'vdi_remote_stream': 'vdi_stream.VdiStreamStore'}
def get_image_handler(handler_name):
if handler_name not in IMAGE_HANDLERS:
raise exception.ImageHandlerUnsupported(image_handler=handler_name)
return importutils.import_object('nova.virt.xenapi.image.'
'%s' % IMAGE_HANDLERS[handler_name])
class GlanceImage(object):
def __init__(self, context, image_href_or_id):
self._context = context
self._image_id = image_href_or_id
self._cached_meta = None
@property
def meta(self):
if self._cached_meta is None:
self._cached_meta = IMAGE_API.get(self._context, self._image_id)
return self._cached_meta
def download_to(self, fileobj):
return IMAGE_API.download(self._context, self._image_id, fileobj)
def is_raw_tgz(self):
return ['raw', 'tgz'] == [
self.meta.get(key) for key in ('disk_format', 'container_format')]
def data(self):
return IMAGE_API.download(self._context, self._image_id)
class RawImage(object):
def __init__(self, glance_image):
self.glance_image = glance_image
def get_size(self):
return int(self.glance_image.meta['size'])
def stream_to(self, fileobj):
return self.glance_image.download_to(fileobj)
class IterableToFileAdapter(object):
"""A degenerate file-like so that an iterable could be read like a file.
As Glance client returns an iterable, but tarfile requires a file like,
this is the adapter between the two. This allows tarfile to access the
glance stream.
"""
def __init__(self, iterable):
self.iterator = iterable.__iter__()
self.remaining_data = ''
def read(self, size):
chunk = self.remaining_data
try:
while not chunk:
chunk = next(self.iterator)
except StopIteration:
return ''
return_value = chunk[0:size]
self.remaining_data = chunk[size:]
return return_value
class RawTGZImage(object):
def __init__(self, glance_image):
self.glance_image = glance_image
self._tar_info = None
self._tar_file = None
def _as_file(self):
return IterableToFileAdapter(self.glance_image.data())
def _as_tarfile(self):
return tarfile.open(mode='r|gz', fileobj=self._as_file())
def get_size(self):
if self._tar_file is None:
self._tar_file = self._as_tarfile()
self._tar_info = self._tar_file.next()
return self._tar_info.size
def stream_to(self, target_file):
if self._tar_file is None:
self._tar_file = self._as_tarfile()
self._tar_info = self._tar_file.next()
source_file = self._tar_file.extractfile(self._tar_info)
shutil.copyfileobj(source_file, target_file)
self._tar_file.close()

View File

@ -1,85 +0,0 @@
# Copyright 2017 Citrix Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" This class will stream image data directly between glance and VDI.
"""
from os_xenapi.client import exception as xenapi_exception
from os_xenapi.client import image as xenapi_image
from oslo_log import log as logging
import nova.conf
from nova import exception
from nova.image import glance
from nova import utils as nova_utils
from nova.virt.xenapi.image import utils
from nova.virt.xenapi import vm_utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = glance.API()
class VdiStreamStore(object):
def download_image(self, context, session, instance, image_id):
try:
host_url = CONF.xenserver.connection_url
image_data = IMAGE_API.download(context, image_id)
image_stream = utils.IterableToFileAdapter(image_data)
sr_ref = vm_utils.safe_find_sr(session)
vdis = xenapi_image.stream_to_vdis(context, session,
instance, host_url,
sr_ref, image_stream)
except xenapi_exception.OsXenApiException as e:
LOG.error("Image download failed with exception: %s", e)
raise exception.CouldNotFetchImage(image_id=image_id)
return vdis
def _get_metadata(self, context, instance, image_id):
metadata = IMAGE_API.get(context, image_id)
metadata['disk_format'] = 'vhd'
metadata['container_format'] = 'ovf'
metadata['auto_disk_config'] = str(instance['auto_disk_config'])
metadata['os_type'] = instance.get('os_type') or (
CONF.xenserver.default_os_type)
# Set size as zero, so that it will update the size in the end
# based on the uploaded image data.
metadata['size'] = 0
# Adjust the auto_disk_config value basing on instance's
# system metadata.
# TODO(mriedem): Consider adding an abstract base class for the
# various image handlers to contain common code like this.
auto_disk = nova_utils.get_auto_disk_config_from_instance(instance)
if nova_utils.is_auto_disk_config_disabled(auto_disk):
metadata['auto_disk_config'] = "disabled"
return metadata
def upload_image(self, context, session, instance, image_id, vdi_uuids):
try:
host_url = CONF.xenserver.connection_url
level = vm_utils.get_compression_level()
metadata = self._get_metadata(context, instance, image_id)
image_chunks = xenapi_image.stream_from_vdis(
context, session, instance, host_url, vdi_uuids,
compresslevel=level)
image_stream = utils.IterableToFileAdapter(image_chunks)
IMAGE_API.update(context, image_id, metadata,
data=image_stream)
except xenapi_exception.OsXenApiException as e:
LOG.error("Image upload failed with exception: %s", e)
raise exception.CouldNotUploadImage(image_id=image_id)

View File

@ -1,108 +0,0 @@
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import tarfile
import eventlet
from eventlet import greenio
from nova.image import glance
from nova import utils
from nova.virt.xenapi import vm_utils
class VdiThroughDevStore(object):
"""Deal with virtual disks by attaching them to the OS domU.
At the moment it supports upload to Glance, and the upload format is a raw
disk inside a tgz.
"""
def upload_image(self, context, session, instance, image_id, vdi_uuids):
command = UploadToGlanceAsRawTgz(
context, session, instance, image_id, vdi_uuids)
return command.upload_image()
def download_image(self, context, session, instance, image_id):
# TODO(matelakat) Move through-dev image download functionality to this
# method.
raise NotImplementedError()
class UploadToGlanceAsRawTgz(object):
def __init__(self, context, session, instance, image_id, vdi_uuids):
self.context = context
self.image_id = image_id
self.session = session
self.vdi_uuids = vdi_uuids
def _get_virtual_size(self):
return self.session.call_xenapi(
'VDI.get_virtual_size', self._get_vdi_ref())
def _get_vdi_ref(self):
return self.session.call_xenapi('VDI.get_by_uuid', self.vdi_uuids[0])
def _perform_upload(self, devpath):
readfile, writefile = self._create_pipe()
size = self._get_virtual_size()
producer = TarGzProducer(devpath, writefile, size, 'disk.raw')
consumer = glance.UpdateGlanceImage(
self.context, self.image_id, producer.get_metadata(), readfile)
pool = eventlet.GreenPool()
pool.spawn(producer.start)
pool.spawn(consumer.start)
pool.waitall()
def _create_pipe(self):
rpipe, wpipe = os.pipe()
rfile = greenio.GreenPipe(rpipe, 'rb', 0)
wfile = greenio.GreenPipe(wpipe, 'wb', 0)
return rfile, wfile
def upload_image(self):
vdi_ref = self._get_vdi_ref()
with vm_utils.vdi_attached(self.session, vdi_ref,
read_only=True) as dev:
devpath = utils.make_dev_path(dev)
with utils.temporary_chown(devpath):
self._perform_upload(devpath)
class TarGzProducer(object):
def __init__(self, devpath, writefile, size, fname):
self.fpath = devpath
self.output = writefile
self.size = size
self.fname = fname
def get_metadata(self):
return {
'disk_format': 'raw',
'container_format': 'tgz'
}
def start(self):
with contextlib.closing(self.output):
tinfo = tarfile.TarInfo(name=self.fname)
tinfo.size = int(self.size)
with tarfile.open(fileobj=self.output, mode='w|gz') as tfile:
with self._open_file(self.fpath, 'rb') as input_file:
tfile.addfile(tinfo, fileobj=input_file)
def _open_file(self, *args):
return open(*args)

View File

@ -1,52 +0,0 @@
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of network
records and their attributes like bridges, PIFs, QoS, as well as
their lookup functions.
"""
from nova import exception
from nova.i18n import _
def find_network_with_name_label(session, name_label):
networks = session.network.get_by_name_label(name_label)
if len(networks) == 1:
return networks[0]
elif len(networks) > 1:
raise exception.NovaException(
_('Found non-unique network for name_label %s') %
name_label)
else:
return None
def find_network_with_bridge(session, bridge):
"""Return the network on which the bridge is attached, if found.
The bridge is defined in the nova db and can be found either in the
'bridge' or 'name_label' fields of the XenAPI network record.
"""
expr = ('field "name__label" = "%s" or field "bridge" = "%s"' %
(bridge, bridge))
networks = session.network.get_all_records_where(expr)
if len(networks) == 1:
return list(networks.keys())[0]
elif len(networks) > 1:
raise exception.NovaException(
_('Found non-unique network for bridge %s') % bridge)
else:
raise exception.NovaException(
_('Found no network for bridge %s') % bridge)

View File

@ -1,240 +0,0 @@
# Copyright (c) 2012 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Pool-related functions (join, eject, etc).
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import exception
from nova.i18n import _
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class ResourcePool(object):
"""Implements resource pool operations."""
def __init__(self, session, virtapi):
host_rec = session.host.get_record(session.host_ref)
self._host_name = host_rec['hostname']
self._host_addr = host_rec['address']
self._host_uuid = host_rec['uuid']
self._session = session
self._virtapi = virtapi
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error):
"""Undo aggregate operation when pool error raised."""
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
aggregate.update_metadata(metadata)
op(host)
except Exception:
LOG.exception('Aggregate %(aggregate_id)s: unrecoverable '
'state during operation on %(host)s',
{'aggregate_id': aggregate.id, 'host': host})
def add_to_aggregate(self, context, aggregate, host, slave_info=None):
"""Add a compute host to an aggregate."""
if not pool_states.is_hv_pool(aggregate.metadata):
return
if CONF.xenserver.independent_compute:
raise exception.NotSupportedWithOption(
operation='adding to a XenServer pool',
option='CONF.xenserver.independent_compute')
invalid = {pool_states.CHANGING: _('setup in progress'),
pool_states.DISMISSED: _('aggregate deleted'),
pool_states.ERROR: _('aggregate in error')}
if (aggregate.metadata[pool_states.KEY] in invalid.keys()):
raise exception.InvalidAggregateActionAdd(
aggregate_id=aggregate.id,
reason=invalid[aggregate.metadata[pool_states.KEY]])
if (aggregate.metadata[pool_states.KEY] == pool_states.CREATED):
aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING})
if len(aggregate.hosts) == 1:
# this is the first host of the pool -> make it master
self._init_pool(aggregate.id, aggregate.name)
# save metadata so that we can find the master again
metadata = {'master_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
aggregate.update_metadata(metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
master_compute = aggregate.metadata['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
self._join_slave(aggregate.id, host,
slave_info.get('compute_uuid'),
slave_info.get('url'), slave_info.get('user'),
slave_info.get('passwd'))
metadata = {host: slave_info.get('xenhost_uuid'), }
aggregate.update_metadata(metadata)
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
# host with specified credentials.
slave_info = self._create_slave_info()
self.compute_rpcapi.add_aggregate_host(
context, host, aggregate, master_compute, slave_info)
def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
"""Remove a compute host from an aggregate."""
slave_info = slave_info or dict()
if not pool_states.is_hv_pool(aggregate.metadata):
return
invalid = {pool_states.CREATED: _('no hosts to remove'),
pool_states.CHANGING: _('setup in progress'),
pool_states.DISMISSED: _('aggregate deleted')}
if aggregate.metadata[pool_states.KEY] in invalid.keys():
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate.id,
reason=invalid[aggregate.metadata[pool_states.KEY]])
master_compute = aggregate.metadata['master_compute']
if master_compute == CONF.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
host_uuid = aggregate.metadata[host]
self._eject_slave(aggregate.id,
slave_info.get('compute_uuid'), host_uuid)
aggregate.update_metadata({host: None})
elif master_compute == host:
# Remove master from its own pool -> destroy pool only if the
# master is on its own, otherwise raise fault. Destroying a
# pool made only by master is fictional
if len(aggregate.hosts) > 1:
# NOTE: this could be avoided by doing a master
# re-election, but this is simpler for now.
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate.id,
reason=_('Unable to eject %s '
'from the pool; pool not empty')
% host)
self._clear_pool(aggregate.id)
aggregate.update_metadata({'master_compute': None, host: None})
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
slave_info = self._create_slave_info()
self.compute_rpcapi.remove_aggregate_host(
context, host, aggregate.id, master_compute, slave_info)
else:
# this shouldn't have happened
raise exception.AggregateError(aggregate_id=aggregate.id,
action='remove_from_aggregate',
reason=_('Unable to eject %s '
'from the pool; No master found')
% host)
def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
"""Joins a slave into a XenServer resource pool."""
try:
args = {'compute_uuid': compute_uuid,
'url': url,
'user': user,
'password': passwd,
'force': jsonutils.dumps(CONF.xenserver.use_join_force),
'master_addr': self._host_addr,
'master_user': CONF.xenserver.connection_username,
'master_pass': CONF.xenserver.connection_password, }
self._session.call_plugin('xenhost.py', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error("Pool-Join failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=_('Unable to join %s '
'in the pool') % host)
def _eject_slave(self, aggregate_id, compute_uuid, host_uuid):
"""Eject a slave from a XenServer resource pool."""
try:
# shutdown nova-compute; if there are other VMs running, e.g.
# guest instances, the eject will fail. That's a precaution
# to deal with the fact that the admin should evacuate the host
# first. The eject wipes out the host completely.
vm_ref = self._session.VM.get_by_uuid(compute_uuid)
self._session.VM.clean_shutdown(vm_ref)
host_ref = self._session.host.get_by_uuid(host_uuid)
self._session.pool.eject(host_ref)
except self._session.XenAPI.Failure as e:
LOG.error("Pool-eject failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=six.text_type(e.details))
def _init_pool(self, aggregate_id, aggregate_name):
"""Set the name label of a XenServer pool."""
try:
pool_ref = self._session.pool.get_all()[0]
self._session.pool.set_name_label(pool_ref, aggregate_name)
except self._session.XenAPI.Failure as e:
LOG.error("Unable to set up pool: %s.", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='add_to_aggregate',
reason=six.text_type(e.details))
def _clear_pool(self, aggregate_id):
"""Clear the name label of a XenServer pool."""
try:
pool_ref = self._session.pool.get_all()[0]
self._session.pool.set_name_label(pool_ref, '')
except self._session.XenAPI.Failure as e:
LOG.error("Pool-set_name_label failed: %s", e)
raise exception.AggregateError(aggregate_id=aggregate_id,
action='remove_from_aggregate',
reason=six.text_type(e.details))
def _create_slave_info(self):
"""XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
sender_url = swap_xapi_host(
CONF.xenserver.connection_url, self._host_addr)
return {
"url": sender_url,
"user": CONF.xenserver.connection_username,
"passwd": CONF.xenserver.connection_password,
"compute_uuid": vm_utils.get_this_vm_uuid(None),
"xenhost_uuid": self._host_uuid,
}
def swap_xapi_host(url, host_addr):
"""Replace the XenServer address present in 'url' with 'host_addr'."""
temp_url = urlparse.urlparse(url)
return url.replace(temp_url.hostname, '%s' % host_addr)

View File

@ -1,51 +0,0 @@
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Possible states for xen resource pools.
A pool may be 'created', in which case the admin has triggered its
creation, but the underlying hypervisor pool has not actually being set up
yet. A pool may be 'changing', meaning that the underlying hypervisor
pool is being setup. A pool may be 'active', in which case the underlying
hypervisor pool is up and running. A pool may be 'dismissed' when it has
no hosts and it has been deleted. A pool may be in 'error' in all other
cases.
A 'created' pool becomes 'changing' during the first request of
adding a host. During a 'changing' status no other requests will be accepted;
this is to allow the hypervisor layer to instantiate the underlying pool
without any potential race condition that may incur in master/slave-based
configurations. The pool goes into the 'active' state when the underlying
pool has been correctly instantiated.
All other operations (e.g. add/remove hosts) that succeed will keep the
pool in the 'active' state. If a number of continuous requests fail,
an 'active' pool goes into an 'error' state. To recover from such a state,
admin intervention is required. Currently an error state is irreversible,
that is, in order to recover from it a pool must be deleted.
"""
CREATED = 'created'
CHANGING = 'changing'
ACTIVE = 'active'
ERROR = 'error'
DISMISSED = 'dismissed'
# Metadata keys
KEY = 'operational_state'
POOL_FLAG = 'hypervisor_pool'
def is_hv_pool(metadata):
"""Checks if aggregate is a hypervisor_pool."""
return POOL_FLAG in metadata.keys()

View File

@ -1,443 +0,0 @@
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
# Copyright (C) 2011 Nicira, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for XenAPI."""
from os_xenapi.client import host_network
from oslo_log import log as logging
from nova.compute import power_state
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vm_utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class XenVIFDriver(object):
def __init__(self, xenapi_session):
self._session = xenapi_session
def _get_vif_ref(self, vif, vm_ref):
vif_refs = self._session.call_xenapi("VM.get_VIFs", vm_ref)
for vif_ref in vif_refs:
try:
vif_rec = self._session.call_xenapi('VIF.get_record', vif_ref)
if vif_rec['MAC'] == vif['address']:
return vif_ref
except Exception:
# When got exception here, maybe the vif is removed during the
# loop, ignore this vif and continue
continue
return None
def _create_vif(self, vif, vif_rec, vm_ref):
try:
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
except Exception as e:
LOG.warning("Failed to create vif, exception:%(exception)s, "
"vif:%(vif)s", {'exception': e, 'vif': vif})
raise exception.NovaException(
reason=_("Failed to create vif %s") % vif)
LOG.debug("create vif %(vif)s for vm %(vm_ref)s successfully",
{'vif': vif, 'vm_ref': vm_ref})
return vif_ref
def unplug(self, instance, vif, vm_ref):
try:
LOG.debug("unplug vif, vif:%(vif)s, vm_ref:%(vm_ref)s",
{'vif': vif, 'vm_ref': vm_ref}, instance=instance)
vif_ref = self._get_vif_ref(vif, vm_ref)
if not vif_ref:
LOG.debug("vif didn't exist, no need to unplug vif %s",
vif, instance=instance)
return
# hot unplug the VIF first
self.hot_unplug(vif, instance, vm_ref, vif_ref)
self._session.call_xenapi('VIF.destroy', vif_ref)
except Exception as e:
LOG.warning(
"Fail to unplug vif:%(vif)s, exception:%(exception)s",
{'vif': vif, 'exception': e}, instance=instance)
raise exception.NovaException(
reason=_("Failed to unplug vif %s") % vif)
def get_vif_interim_net_name(self, vif_id):
return ("net-" + vif_id)[:network_model.NIC_NAME_LEN]
def hot_plug(self, vif, instance, vm_ref, vif_ref):
"""hotplug virtual interface to running instance.
:param nova.network.model.VIF vif:
The object which has the information about the interface to attach.
:param nova.objects.instance.Instance instance:
The instance which will get an additional network interface.
:param string vm_ref:
The instance's reference from hypervisor's point of view.
:param string vif_ref:
The interface's reference from hypervisor's point of view.
:return: None
"""
pass
def hot_unplug(self, vif, instance, vm_ref, vif_ref):
"""hot unplug virtual interface from running instance.
:param nova.network.model.VIF vif:
The object which has the information about the interface to detach.
:param nova.objects.instance.Instance instance:
The instance which will remove additional network interface.
:param string vm_ref:
The instance's reference from hypervisor's point of view.
:param string vif_ref:
The interface's reference from hypervisor's point of view.
:return: None
"""
pass
def post_start_actions(self, instance, vif_ref):
"""post actions when the instance is power on.
:param nova.objects.instance.Instance instance:
The instance which will execute extra actions after power on
:param string vif_ref:
The interface's reference from hypervisor's point of view.
:return: None
"""
pass
def create_vif_interim_network(self, vif):
pass
def delete_network_and_bridge(self, instance, vif_id):
pass
class XenAPIOpenVswitchDriver(XenVIFDriver):
"""VIF driver for Open vSwitch with XenAPI."""
def plug(self, instance, vif, vm_ref=None, device=None):
"""create an interim network for this vif; and build
the vif_rec which will be used by xapi to create VM vif
"""
if not vm_ref:
vm_ref = vm_utils.lookup(self._session, instance['name'])
if not vm_ref:
raise exception.VirtualInterfacePlugException(
"Cannot find instance %s, discard vif plug" % instance['name'])
# if VIF already exists, return this vif_ref directly
vif_ref = self._get_vif_ref(vif, vm_ref)
if vif_ref:
LOG.debug("VIF %s already exists when plug vif",
vif_ref, instance=instance)
return vif_ref
if not device:
device = 0
# Create an interim network for each VIF, so dom0 has a single
# bridge for each device (the emulated and PV ethernet devices
# will both be on this bridge.
network_ref = self.create_vif_interim_network(vif)
vif_rec = {}
vif_rec['device'] = str(device)
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = vif['address']
vif_rec['MTU'] = '1500'
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
vif_rec['other_config'] = {'neutron-port-id': vif['id']}
vif_ref = self._create_vif(vif, vif_rec, vm_ref)
# call XenAPI to plug vif
self.hot_plug(vif, instance, vm_ref, vif_ref)
return vif_ref
def unplug(self, instance, vif, vm_ref):
super(XenAPIOpenVswitchDriver, self).unplug(instance, vif, vm_ref)
self.delete_network_and_bridge(instance, vif['id'])
def delete_network_and_bridge(self, instance, vif_id):
"""Delete network and bridge:
1. delete the patch port pair between the integration bridge and
the qbr linux bridge(if exist) and the interim network.
2. destroy the interim network
3. delete the OVS bridge service for the interim network
4. delete linux bridge qbr and related ports if exist
"""
network = self._get_network_by_vif(vif_id)
if not network:
return
vifs = self._session.network.get_VIFs(network)
bridge_name = self._session.network.get_bridge(network)
if vifs:
# Still has vifs attached to this network
for remain_vif in vifs:
# if the remain vifs are on the local server, give up all the
# operations. If the remain vifs are on the remote hosts, keep
# the network and delete the bridge
if self._get_host_by_vif(remain_vif) == self._session.host_ref:
return
else:
# No vif left, delete the network
try:
self._session.network.destroy(network)
except Exception as e:
LOG.warning("Failed to destroy network for vif (id=%(if)s), "
"exception:%(exception)s",
{'if': vif_id, 'exception': e}, instance=instance)
raise exception.VirtualInterfaceUnplugException(
reason=_("Failed to destroy network"))
# Two cases:
# 1) No vif left, just delete the bridge
# 2) For resize/intra-pool migrate, vifs on both of the
# source and target VM will be connected to the same
# interim network. If the VM is resident on a remote host,
# linux bridge on current host will be deleted.
self.delete_bridge(instance, vif_id, bridge_name)
def delete_bridge(self, instance, vif_id, bridge_name):
LOG.debug('destroying patch port pair for vif id: vif_id=%(vif_id)s',
{'vif_id': vif_id})
patch_port1, tap_name = self._get_patch_port_pair_names(vif_id)
try:
# delete the patch port pair
host_network.ovs_del_port(self._session, bridge_name, patch_port1)
except Exception as e:
LOG.warning("Failed to delete patch port pair for vif id %(if)s,"
" exception:%(exception)s",
{'if': vif_id, 'exception': e}, instance=instance)
raise exception.VirtualInterfaceUnplugException(
reason=_("Failed to delete patch port pair"))
LOG.debug('destroying bridge: bridge=%(br)s', {'br': bridge_name})
try:
# delete bridge if it still exists.
# As there are patch ports existing on this bridge when
# destroying won't be destroyed automatically by XAPI, let's
# destroy it at here.
host_network.ovs_del_br(self._session, bridge_name)
qbr_name = self._get_qbr_name(vif_id)
qvb_name, qvo_name = self._get_veth_pair_names(vif_id)
if self._device_exists(qbr_name):
# delete tap port, qvb port and qbr
LOG.debug(
"destroy linux bridge %(qbr)s when unplug vif id"
" %(vif_id)s", {'qbr': qbr_name, 'vif_id': vif_id})
self._delete_linux_port(qbr_name, tap_name)
self._delete_linux_port(qbr_name, qvb_name)
self._delete_linux_bridge(qbr_name)
host_network.ovs_del_port(self._session,
CONF.xenserver.ovs_integration_bridge,
qvo_name)
except Exception as e:
LOG.warning("Failed to delete bridge for vif id %(if)s, "
"exception:%(exception)s",
{'if': vif_id, 'exception': e}, instance=instance)
raise exception.VirtualInterfaceUnplugException(
reason=_("Failed to delete bridge"))
def _get_network_by_vif(self, vif_id):
net_name = self.get_vif_interim_net_name(vif_id)
network = network_utils.find_network_with_name_label(
self._session, net_name)
if network is None:
LOG.debug("Failed to find network for vif id %(if)s",
{'if': vif_id})
return
return network
def _get_host_by_vif(self, vif_id):
network = self._get_network_by_vif(vif_id)
if not network:
return
vif_info = self._session.VIF.get_all_records_where(
'field "network" = "%s"' % network)
if not vif_info or len(vif_info) != 1:
raise exception.NovaException(
"Couldn't find vif id information in network %s"
% network)
vm_ref = self._session.VIF.get_VM(list(vif_info.keys())[0])
return self._session.VM.get_resident_on(vm_ref)
def hot_plug(self, vif, instance, vm_ref, vif_ref):
# hot plug vif only when VM's power state is running
LOG.debug("Hot plug vif, vif: %s", vif, instance=instance)
state = vm_utils.get_power_state(self._session, vm_ref)
if state != power_state.RUNNING:
LOG.debug("Skip hot plug VIF, VM is not running, vif: %s", vif,
instance=instance)
return
self._session.VIF.plug(vif_ref)
self.post_start_actions(instance, vif_ref)
def hot_unplug(self, vif, instance, vm_ref, vif_ref):
# hot unplug vif only when VM's power state is running
LOG.debug("Hot unplug vif, vif: %s", vif, instance=instance)
state = vm_utils.get_power_state(self._session, vm_ref)
if state != power_state.RUNNING:
LOG.debug("Skip hot unplug VIF, VM is not running, vif: %s", vif,
instance=instance)
return
self._session.VIF.unplug(vif_ref)
def _get_qbr_name(self, iface_id):
return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def _get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def _device_exists(self, device):
"""Check if ethernet device exists."""
try:
host_network.ip_link_get_dev(self._session, device)
return True
except Exception:
# Swallow exception from plugin, since this indicates the device
# doesn't exist
return False
def _delete_net_dev(self, dev):
"""Delete a network device only if it exists."""
if self._device_exists(dev):
LOG.debug("delete network device '%s'", dev)
host_network.ip_link_del_dev(self._session, dev)
def _create_veth_pair(self, dev1_name, dev2_name):
"""Create a pair of veth devices with the specified names,
deleting any previous devices with those names.
"""
LOG.debug("Create veth pair, port1:%(qvb)s, port2:%(qvo)s",
{'qvb': dev1_name, 'qvo': dev2_name})
for dev in [dev1_name, dev2_name]:
self._delete_net_dev(dev)
host_network.ip_link_add_veth_pair(self._session, dev1_name, dev2_name)
for dev in [dev1_name, dev2_name]:
host_network.ip_link_set_dev(self._session, dev, 'up')
host_network.ip_link_set_promisc(self._session, dev, 'on')
def _create_linux_bridge(self, vif_rec):
"""create a qbr linux bridge for neutron security group
"""
iface_id = vif_rec['other_config']['neutron-port-id']
linux_br_name = self._get_qbr_name(iface_id)
if not self._device_exists(linux_br_name):
LOG.debug("Create linux bridge %s", linux_br_name)
host_network.brctl_add_br(self._session, linux_br_name)
host_network.brctl_set_fd(self._session, linux_br_name, '0')
host_network.brctl_set_stp(self._session, linux_br_name, 'off')
host_network.ip_link_set_dev(self._session, linux_br_name, 'up')
qvb_name, qvo_name = self._get_veth_pair_names(iface_id)
if not self._device_exists(qvo_name):
self._create_veth_pair(qvb_name, qvo_name)
host_network.brctl_add_if(self._session, linux_br_name, qvb_name)
host_network.ovs_create_port(
self._session, CONF.xenserver.ovs_integration_bridge,
qvo_name, iface_id, vif_rec['MAC'], 'active')
return linux_br_name
def _delete_linux_port(self, qbr_name, port_name):
try:
# delete port in linux bridge
host_network.brctl_del_if(self._session, qbr_name, port_name)
self._delete_net_dev(port_name)
except Exception:
LOG.debug("Fail to delete linux port %(port_name)s on bridge "
"%(qbr_name)s",
{'port_name': port_name, 'qbr_name': qbr_name})
def _delete_linux_bridge(self, qbr_name):
try:
# delete linux bridge qbrxxx
host_network.ip_link_set_dev(self._session, qbr_name, 'down')
host_network.brctl_del_br(self._session, qbr_name)
except Exception:
LOG.debug("Fail to delete linux bridge %s", qbr_name)
def post_start_actions(self, instance, vif_ref):
"""Do needed actions post vif start:
plug the interim ovs bridge to the integration bridge;
set external_ids to the int-br port which will service
for this vif.
"""
vif_rec = self._session.VIF.get_record(vif_ref)
network_ref = vif_rec['network']
bridge_name = self._session.network.get_bridge(network_ref)
network_uuid = self._session.network.get_uuid(network_ref)
iface_id = vif_rec['other_config']['neutron-port-id']
patch_port1, tap_name = self._get_patch_port_pair_names(iface_id)
LOG.debug('plug_ovs_bridge: port1=%(port1)s, port2=%(port2)s,'
'network_uuid=%(uuid)s, bridge_name=%(bridge_name)s',
{'port1': patch_port1, 'port2': tap_name,
'uuid': network_uuid, 'bridge_name': bridge_name})
if bridge_name is None:
raise exception.VirtualInterfacePlugException(
_("Failed to find bridge for vif"))
# Create Linux bridge qbrXXX
linux_br_name = self._create_linux_bridge(vif_rec)
if not self._device_exists(tap_name):
LOG.debug("create veth pair for interim bridge %(interim_bridge)s "
"and linux bridge %(linux_bridge)s",
{'interim_bridge': bridge_name,
'linux_bridge': linux_br_name})
self._create_veth_pair(tap_name, patch_port1)
host_network.brctl_add_if(self._session, linux_br_name, tap_name)
# Add port to interim bridge
host_network.ovs_add_port(self._session, bridge_name, patch_port1)
def create_vif_interim_network(self, vif):
net_name = self.get_vif_interim_net_name(vif['id'])
# In a pooled environment, make the network shared in order to ensure
# it can also be used in the target host while live migrating.
# "assume_network_is_shared" flag does not affect environments where
# storage pools are not used.
network_rec = {'name_label': net_name,
'name_description': "interim network for vif[%s]"
% vif['id'],
'other_config': {'assume_network_is_shared': 'true'}}
network_ref = network_utils.find_network_with_name_label(
self._session, net_name)
if network_ref:
# already exist, just return
# in some scenarios: e..g resize/migrate, it won't create new
# interim network.
return network_ref
try:
network_ref = self._session.network.create(network_rec)
except Exception as e:
LOG.warning("Failed to create interim network for vif %(if)s, "
"exception:%(exception)s",
{'if': vif, 'exception': e})
raise exception.VirtualInterfacePlugException(
_("Failed to create the interim network for vif"))
return network_ref
def _get_patch_port_pair_names(self, iface_id):
return (("vif%s" % iface_id)[:network_model.NIC_NAME_LEN],
("tap%s" % iface_id)[:network_model.NIC_NAME_LEN])

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,394 +0,0 @@
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
"""
import re
import uuid
from eventlet import greenthread
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import versionutils
import nova.conf
from nova import exception
from nova.i18n import _
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
# Namespace for SRs so we can reliably generate a UUID
# Generated from uuid.uuid5(uuid.UUID(int=0), 'volume_utils-SR_UUID')
SR_NAMESPACE = uuid.UUID("3cca4135-a809-5bb3-af62-275fbfe87178")
def parse_sr_info(connection_data, description=''):
params = {}
if 'sr_uuid' not in connection_data:
params = _parse_volume_info(connection_data)
sr_identity = "%s/%s/%s" % (params['target'], params['port'],
params['targetIQN'])
sr_uuid = str(uuid.uuid5(SR_NAMESPACE, sr_identity))
else:
sr_uuid = connection_data['sr_uuid']
for k in connection_data.get('introduce_sr_keys', {}):
params[k] = connection_data[k]
label = connection_data.pop('name_label',
'tempSR-%s' % sr_uuid)
params['name_description'] = connection_data.get('name_description',
description)
return (sr_uuid, label, params)
def _parse_volume_info(connection_data):
"""Parse device_path and mountpoint as they can be used by XenAPI.
In particular, the mountpoint (e.g. /dev/sdc) must be translated
into a numeric literal.
"""
volume_id = connection_data['volume_id']
target_portal = connection_data['target_portal']
target_host = _get_target_host(target_portal)
target_port = _get_target_port(target_portal)
target_iqn = connection_data['target_iqn']
log_params = {
"vol_id": volume_id,
"host": target_host,
"port": target_port,
"iqn": target_iqn
}
LOG.debug('(vol_id,host,port,iqn): '
'(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)', log_params)
if (volume_id is None or
target_host is None or
target_iqn is None):
raise exception.StorageError(
reason=_('Unable to obtain target information %s') %
strutils.mask_password(connection_data))
volume_info = {}
volume_info['id'] = volume_id
volume_info['target'] = target_host
volume_info['port'] = target_port
volume_info['targetIQN'] = target_iqn
if ('auth_method' in connection_data and
connection_data['auth_method'] == 'CHAP'):
volume_info['chapuser'] = connection_data['auth_username']
volume_info['chappassword'] = connection_data['auth_password']
return volume_info
def _get_target_host(iscsi_string):
"""Retrieve target host."""
if iscsi_string:
host = iscsi_string.split(':')[0]
if len(host) > 0:
return host
return CONF.xenserver.target_host
def _get_target_port(iscsi_string):
"""Retrieve target port."""
if iscsi_string and ':' in iscsi_string:
return iscsi_string.split(':')[1]
return CONF.xenserver.target_port
def introduce_sr(session, sr_uuid, label, params):
LOG.debug('Introducing SR %s', label)
sr_type, sr_desc = _handle_sr_params(params)
if _requires_backend_kind(session.product_version) and sr_type == 'iscsi':
params['backend-kind'] = 'vbd'
sr_ref = session.call_xenapi('SR.introduce', sr_uuid, label, sr_desc,
sr_type, '', False, params)
LOG.debug('Creating PBD for SR')
pbd_ref = _create_pbd(session, sr_ref, params)
LOG.debug('Plugging SR')
session.call_xenapi("PBD.plug", pbd_ref)
session.call_xenapi("SR.scan", sr_ref)
return sr_ref
def _requires_backend_kind(version):
# Fix for Bug #1502929
version_as_string = '.'.join(str(v) for v in version)
return (versionutils.is_compatible('6.5', version_as_string))
def _handle_sr_params(params):
if 'id' in params:
del params['id']
sr_type = params.pop('sr_type', 'iscsi')
sr_desc = params.pop('name_description', '')
return sr_type, sr_desc
def _create_pbd(session, sr_ref, params):
pbd_rec = {}
pbd_rec['host'] = session.host_ref
pbd_rec['SR'] = sr_ref
pbd_rec['device_config'] = params
pbd_ref = session.call_xenapi("PBD.create", pbd_rec)
return pbd_ref
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
"""Introduce VDI in the host."""
try:
vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun)
if vdi_ref is None:
greenthread.sleep(CONF.xenserver.introduce_vdi_retry_wait)
session.call_xenapi("SR.scan", sr_ref)
vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun)
except session.XenAPI.Failure:
LOG.exception('Unable to introduce VDI on SR')
raise exception.StorageError(
reason=_('Unable to introduce VDI on SR %s') % sr_ref)
if not vdi_ref:
raise exception.StorageError(
reason=_('VDI not found on SR %(sr)s (vdi_uuid '
'%(vdi_uuid)s, target_lun %(target_lun)s)') %
{'sr': sr_ref, 'vdi_uuid': vdi_uuid,
'target_lun': target_lun})
try:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
LOG.debug(vdi_rec)
except session.XenAPI.Failure:
LOG.exception('Unable to get record of VDI')
raise exception.StorageError(
reason=_('Unable to get record of VDI %s on') % vdi_ref)
if vdi_rec['managed']:
# We do not need to introduce the vdi
return vdi_ref
try:
return session.call_xenapi("VDI.introduce",
vdi_rec['uuid'],
vdi_rec['name_label'],
vdi_rec['name_description'],
vdi_rec['SR'],
vdi_rec['type'],
vdi_rec['sharable'],
vdi_rec['read_only'],
vdi_rec['other_config'],
vdi_rec['location'],
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except session.XenAPI.Failure:
LOG.exception('Unable to introduce VDI for SR')
raise exception.StorageError(
reason=_('Unable to introduce VDI for SR %s') % sr_ref)
def _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
if vdi_uuid:
LOG.debug("vdi_uuid: %s", vdi_uuid)
return session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
elif target_lun:
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for curr_ref in vdi_refs:
curr_rec = session.call_xenapi("VDI.get_record", curr_ref)
if ('sm_config' in curr_rec and
'LUNid' in curr_rec['sm_config'] and
curr_rec['sm_config']['LUNid'] == str(target_lun)):
return curr_ref
else:
return (session.call_xenapi("SR.get_VDIs", sr_ref))[0]
return None
def purge_sr(session, sr_ref):
# Make sure no VBDs are referencing the SR VDIs
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for vdi_ref in vdi_refs:
vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
if vbd_refs:
LOG.warning('Cannot purge SR with referenced VDIs')
return
forget_sr(session, sr_ref)
def forget_sr(session, sr_ref):
"""Forgets the storage repository without destroying the VDIs within."""
LOG.debug('Forgetting SR...')
_unplug_pbds(session, sr_ref)
session.call_xenapi("SR.forget", sr_ref)
def _unplug_pbds(session, sr_ref):
try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
except session.XenAPI.Failure as exc:
LOG.warning('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s', {'exc': exc, 'sr_ref': sr_ref})
return
for pbd in pbds:
try:
session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure as exc:
LOG.warning('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s', {'exc': exc, 'pbd': pbd})
def get_device_number(mountpoint):
device_number = _mountpoint_to_number(mountpoint)
if device_number < 0:
raise exception.StorageError(
reason=_('Unable to obtain target information %s') %
mountpoint)
return device_number
def _mountpoint_to_number(mountpoint):
"""Translate a mountpoint like /dev/sdc into a numeric."""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
return (ord(mountpoint[2:3]) - ord('a'))
elif re.match('^x?vd[a-p]$', mountpoint):
return (ord(mountpoint[-1]) - ord('a'))
elif re.match('^[0-9]+$', mountpoint):
return int(mountpoint, 10)
else:
LOG.warning('Mountpoint cannot be translated: %s', mountpoint)
return -1
def find_sr_by_uuid(session, sr_uuid):
"""Return the storage repository given a uuid."""
try:
return session.call_xenapi("SR.get_by_uuid", sr_uuid)
except session.XenAPI.Failure as exc:
if exc.details[0] == 'UUID_INVALID':
return None
raise
def find_sr_from_vbd(session, vbd_ref):
"""Find the SR reference from the VBD reference."""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure:
LOG.exception('Unable to find SR from VBD')
raise exception.StorageError(
reason=_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
def find_sr_from_vdi(session, vdi_ref):
"""Find the SR reference from the VDI reference."""
try:
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure:
LOG.exception('Unable to find SR from VDI')
raise exception.StorageError(
reason=_('Unable to find SR from VDI %s') % vdi_ref)
return sr_ref
def find_vbd_by_number(session, vm_ref, dev_number):
"""Get the VBD reference from the device number."""
vbd_refs = session.VM.get_VBDs(vm_ref)
requested_device = str(dev_number)
if vbd_refs:
for vbd_ref in vbd_refs:
try:
user_device = session.VBD.get_userdevice(vbd_ref)
if user_device == requested_device:
return vbd_ref
except session.XenAPI.Failure:
msg = "Error looking up VBD %s for %s" % (vbd_ref, vm_ref)
LOG.debug(msg, exc_info=True)
def is_booted_from_volume(session, vm_ref, user_device=0):
"""Determine if the root device is a volume."""
# TODO(bkaminski): We have opened the scope of this method to accept
# userdevice. We should rename this method and its references for clarity.
vbd_ref = find_vbd_by_number(session, vm_ref, user_device)
vbd_other_config = session.VBD.get_other_config(vbd_ref)
if vbd_other_config.get('osvol', False):
return True
return False
def _get_vdi_import_path(session, task_ref, vdi_ref, disk_format):
session_id = session.get_session_id()
str_fmt = '/import_raw_vdi?session_id={}&task_id={}&vdi={}&format={}'
return str_fmt.format(session_id, task_ref, vdi_ref, disk_format)
def _stream_to_vdi(conn, vdi_import_path, file_size, file_obj):
headers = {'Content-Type': 'application/octet-stream',
'Content-Length': '%s' % file_size}
CHUNK_SIZE = 16 * 1024
LOG.debug('Initialising PUT request to %s (Headers: %s)',
vdi_import_path, headers)
conn.request('PUT', vdi_import_path, headers=headers)
remain_size = file_size
while remain_size >= CHUNK_SIZE:
trunk = file_obj.read(CHUNK_SIZE)
remain_size -= CHUNK_SIZE
conn.send(trunk)
if remain_size != 0:
trunk = file_obj.read(remain_size)
conn.send(trunk)
resp = conn.getresponse()
LOG.debug("Connection response status:reason is "
"%(status)s:%(reason)s",
{'status': resp.status, 'reason': resp.reason})
def stream_to_vdi(session, instance, disk_format,
file_obj, file_size, vdi_ref):
task_name_label = 'VDI_IMPORT_for_' + instance['name']
with session.custom_task(task_name_label) as task_ref:
vdi_import_path = _get_vdi_import_path(session, task_ref, vdi_ref,
disk_format)
with session.http_connection() as conn:
try:
_stream_to_vdi(conn, vdi_import_path, file_size, file_obj)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Streaming disk to VDI failed with error: %s',
e, instance=instance)

View File

@ -1,226 +0,0 @@
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import strutils
from nova import exception
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
class VolumeOps(object):
"""Management class for Volume-related tasks."""
def __init__(self, session):
self._session = session
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""Attach volume to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
return self._attach_volume(connection_info, vm_ref,
instance_name, mountpoint, hotplug)
def connect_volume(self, connection_info):
"""Attach volume to hypervisor, but not the VM."""
return self._attach_volume(connection_info)
def _attach_volume(self, connection_info, vm_ref=None, instance_name=None,
dev_number=None, hotplug=False):
self._check_is_supported_driver_type(connection_info)
connection_data = connection_info['data']
sr_ref, sr_uuid = self._connect_to_volume_provider(connection_data,
instance_name)
try:
vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
connection_data)
vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
LOG.info('Connected volume (vdi_uuid): %s', vdi_uuid)
if vm_ref:
self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
dev_number, hotplug)
return (sr_uuid, vdi_uuid)
except Exception:
with excutils.save_and_reraise_exception():
# NOTE(sirp): Forgetting the SR will have the effect of
# cleaning up the VDI and VBD records, so no need to handle
# that explicitly.
volume_utils.forget_sr(self._session, sr_ref)
def _check_is_supported_driver_type(self, connection_info):
driver_type = connection_info['driver_volume_type']
if driver_type not in ['iscsi', 'xensm']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
def _connect_to_volume_provider(self, connection_data, instance_name):
sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info(
connection_data, 'Disk-for:%s' % instance_name)
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if not sr_ref:
# introduce SR because not already present
sr_ref = volume_utils.introduce_sr(
self._session, sr_uuid, sr_label, sr_params)
return (sr_ref, sr_uuid)
def _connect_hypervisor_to_volume(self, sr_ref, connection_data):
# connection_data can have credentials in it so make sure to scrub
# those before logging.
LOG.debug("Connect volume to hypervisor: %s",
strutils.mask_password(connection_data))
if 'vdi_uuid' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
vdi_uuid=connection_data['vdi_uuid'])
elif 'target_lun' in connection_data:
vdi_ref = volume_utils.introduce_vdi(
self._session, sr_ref,
target_lun=connection_data['target_lun'])
else:
# NOTE(sirp): This will introduce the first VDI in the SR
vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref)
return vdi_ref
def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, mountpoint,
hotplug):
LOG.debug('Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s',
{'vdi_ref': vdi_ref, 'vm_ref': vm_ref})
dev_number = volume_utils.get_device_number(mountpoint)
# osvol is added to the vbd so we can spot which vbds are volumes
vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
dev_number, bootable=False,
osvol=True)
if hotplug:
# NOTE(johngarbutt) can only call VBD.plug on a running vm
running = not vm_utils.is_vm_shutdown(self._session, vm_ref)
if running:
LOG.debug("Plugging VBD: %s", vbd_ref)
self._session.VBD.plug(vbd_ref, vm_ref)
LOG.info('Dev %(dev_number)s attached to'
' instance %(instance_name)s',
{'instance_name': instance_name, 'dev_number': dev_number})
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance."""
LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s",
{'instance_name': instance_name, 'mountpoint': mountpoint})
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
device_number = volume_utils.get_device_number(mountpoint)
vbd_ref = volume_utils.find_vbd_by_number(self._session, vm_ref,
device_number)
if vbd_ref is None:
# NOTE(sirp): If we don't find the VBD then it must have been
# detached previously.
LOG.warning('Skipping detach because VBD for %s was not found',
instance_name)
else:
self._detach_vbds_and_srs(vm_ref, [vbd_ref])
LOG.info('Mountpoint %(mountpoint)s detached from instance'
' %(instance_name)s',
{'instance_name': instance_name,
'mountpoint': mountpoint})
def _detach_vbds_and_srs(self, vm_ref, vbd_refs):
is_vm_shutdown = vm_utils.is_vm_shutdown(self._session, vm_ref)
for vbd_ref in vbd_refs:
# find sr before we destroy the vbd
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
if not is_vm_shutdown:
vm_utils.unplug_vbd(self._session, vbd_ref, vm_ref)
vm_utils.destroy_vbd(self._session, vbd_ref)
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
def detach_all(self, vm_ref):
"""Detach all cinder volumes."""
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
if vbd_refs:
self._detach_vbds_and_srs(vm_ref, vbd_refs)
def _get_all_volume_vbd_refs(self, vm_ref):
"""Return VBD refs for all Nova/Cinder volumes."""
vbd_refs = self._session.VM.get_VBDs(vm_ref)
for vbd_ref in vbd_refs:
other_config = self._session.VBD.get_other_config(vbd_ref)
if other_config.get('osvol'):
yield vbd_ref
def find_bad_volumes(self, vm_ref):
"""Find any volumes with their connection severed.
Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not
work when a VBD is present that points to a non-working volume. To work
around this, we scan for non-working volumes and detach them before
retrying a failed operation.
"""
bad_devices = []
vbd_refs = self._get_all_volume_vbd_refs(vm_ref)
for vbd_ref in vbd_refs:
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
try:
# TODO(sirp): bug1152401 This relies on a 120 sec timeout
# within XenServer, update this to fail-fast when this is fixed
# upstream
self._session.SR.scan(sr_ref)
except self._session.XenAPI.Failure as exc:
if exc.details[0] == 'SR_BACKEND_FAILURE_40':
device = self._session.VBD.get_device(vbd_ref)
bad_devices.append('/dev/%s' % device)
else:
raise
return bad_devices
def safe_cleanup_from_vdis(self, vdi_refs):
# A helper method to detach volumes that are not associated with an
# instance
for vdi_ref in vdi_refs:
try:
sr_ref = volume_utils.find_sr_from_vdi(self._session, vdi_ref)
except exception.StorageError as exc:
LOG.debug(exc.format_message())
continue
try:
# Forget (i.e. disconnect) SR only if not in use
volume_utils.purge_sr(self._session, sr_ref)
except Exception:
LOG.debug('Ignoring error while purging sr: %s', sr_ref,
exc_info=True)

View File

@ -0,0 +1,46 @@
---
upgrade:
- |
The ``XenAPI`` driver, which was deprecated in the 20.0.0 (Train), has now
been removed.
- |
The following config options only apply when using the ``XenAPI`` virt
driver which has now been removed. The config options have therefore been
removed also.
* ``[xenserver] agent_timeout``
* ``[xenserver] agent_version_timeout``
* ``[xenserver] agent_resetnetwork_timeout``
* ``[xenserver] agent_path``
* ``[xenserver] disable_agent``
* ``[xenserver] use_agent_default``
* ``[xenserver] login_timeout``
* ``[xenserver] connection_concurrent``
* ``[xenserver] cache_images``
* ``[xenserver] image_compression_level``
* ``[xenserver] default_os_type``
* ``[xenserver] block_device_creation_timeout``
* ``[xenserver] max_kernel_ramdisk_size``
* ``[xenserver] sr_matching_filter``
* ``[xenserver] sparse_copy``
* ``[xenserver] num_vbd_unplug_retries``
* ``[xenserver] ipxe_network_name``
* ``[xenserver] ipxe_boot_menu_url``
* ``[xenserver] ipxe_mkisofs_cmd``
* ``[xenserver] connection_url``
* ``[xenserver] connection_username``
* ``[xenserver] connection_password``
* ``[xenserver] vhd_coalesce_poll_interval``
* ``[xenserver] check_host``
* ``[xenserver] vhd_coalesce_max_attempts``
* ``[xenserver] sr_base_path``
* ``[xenserver] target_host``
* ``[xenserver] target_port``
* ``[xenserver] independent_compute``
* ``[xenserver] running_timeout``
* ``[xenserver] image_upload_handler``
* ``[xenserver] image_handler``
* ``[xenserver] introduce_vdi_retry_wait``
* ``[xenserver] ovs_integration_bridge``
* ``[xenserver] use_join_force``
* ``[xenserver] console_public_hostname``