OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

9191 lines
412 KiB

# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, XEN and Parallels.
import binascii
import collections
from collections import deque
import contextlib
import errno
import functools
import glob
import itertools
import operator
import os
import pwd
import random
import shutil
import tempfile
import time
import uuid
from castellan import key_manager
from copy import deepcopy
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick import encryptors
from os_brick.encryptors import luks as luks_encryptor
from os_brick import exception as brick_exception
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import netutils as oslo_netutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.api.metadata import password
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import crypto
from nova import exception
from nova.i18n import _
from nova import image
from import model as network_model
from nova import objects
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.libvirt
import nova.privsep.path
import nova.privsep.utils
from nova import rc_fields
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk_api
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt import migration as libvirt_migrate
from import dmcrypt
from import lvm
from import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import mount
from nova.virt.libvirt.volume import remotefs
from nova.virt import netutils
from nova.volume import cinder
libvirt = None
uefi_logged = False
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
"x86_64": "/usr/share/OVMF/OVMF_CODE.fd",
"aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd"
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
# Disable reason for the service which was enabled or disabled without reason
# Guest config console string
CONSOLE = "console=tty0 console=ttyS0 console=hvc0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
class InjectionInfo(collections.namedtuple(
'InjectionInfo', ['network_info', 'files', 'admin_pass'])):
__slots__ = ()
def __repr__(self):
return ('InjectionInfo(network_info=%r, files=%r, '
'admin_pass=<SANITIZED>)') % (self.network_info, self.files)
libvirt_volume_drivers = [
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
# For information about when MIN_LIBVIRT_VERSION and
# NEXT_MIN_LIBVIRT_VERSION can be changed, consult
# Currently this is effectively the min version for i686/x86_64
# + KVM/QEMU, as other architectures/hypervisors require newer
# versions. Over time, this will become a common min version
# for all architectures/hypervisors, as this value rises to
# meet them.
MIN_QEMU_VERSION = (2, 5, 0)
# TODO(berrange): Re-evaluate this at start of each release cycle
# to decide if we want to plan a future min version bump.
# MIN_LIBVIRT_VERSION can be updated to match this after
# NEXT_MIN_LIBVIRT_VERSION has been at a higher value for
# one cycle
# Virtuozzo driver support
# Ability to set the user guest password with parallels
# libvirt < 1.3 reported virt_functions capability
# only when VFs are enabled.
# libvirt 1.3 fix f391889f4e942e22b9ef8ecca492de05106ce41e
# Use the "logd" backend for handling stdout/stderr from QEMU processes.
# aarch64 architecture with KVM
# 'chardev' support got sorted out in 3.6.0
# Names of the types that do not get compressed during migration
# number of serial console limit
# Qemu supports 4 serial consoles, we remove 1 because of the PTY one defined
# libvirt postcopy support
# perf events support
'mbml': 'mbm_local',
'mbmt': 'mbm_total',
# Mediated devices support
# libvirt>=3.10 is required for volume multiattach if qemu<2.10.
# See
# for details.
# If the host has this libvirt version, then we skip the retry loop of
# instance destroy() call, as libvirt itself increased the wait time
# before the SIGKILL signal takes effect.
VGPU_RESOURCE_SEMAPHORE = "vgpu_resources"
# libvirt >= v1.3.4 introduced VIR_MIGRATE_PARAM_PERSIST_XML that needs to be
# provided when the VIR_MIGRATE_PERSIST_DEST flag is used to ensure the updated
# domain XML is persisted on the destination.
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
"supports_tagged_attach_interface": True,
"supports_tagged_attach_volume": True,
"supports_extend_volume": True,
# Multiattach support is conditional on qemu and libvirt versions
# determined in init_host.
"supports_multiattach": False,
"supports_trusted_certs": True,
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
libvirt_migrate.libvirt = libvirt
self._host = host.Host(self._uri(), read_only,
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._caps = None
self._supported_perf_events = []
self.firewall_driver = firewall.load_driver(
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
# TODO(mriedem): Long-term we should load up the volume drivers on
# demand as needed rather than doing this on startup, as there might
# be unsupported volume drivers in this list based on the underlying
# platform.
self.volume_drivers = self._get_volume_drivers()
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warning('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.',
{'cache_mode': cache_mode, 'disk_type': disk_type})
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = cinder.API()
self._image_api = image.API()
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
self.job_tracker = instancejobtracker.InstanceJobTracker()
self._remotefs = remotefs.RemoteFilesystem()
self._live_migration_flags = self._block_migration_flags = 0
self.active_migrations = {}
# Compute reserved hugepages from conf file at the very
# beginning to ensure any syntax error will be reported and
# avoid any re-calculation when computing resources.
self._reserved_hugepages = hardware.numa_get_reserved_huge_pages()
def _get_volume_drivers(self):
driver_registry = dict()
for driver_str in libvirt_volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
driver_registry[driver_type] = driver_class(self._host)
except brick_exception.InvalidConnectorProtocol:
LOG.debug('Unable to load volume driver %s. It is not '
'supported on this host.', driver)
return driver_registry
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems don't support O_DIRECT though. For those we
# fallback to 'writethrough' which gives host crash safety, and
# is safe for migration provided the filesystem is cache coherent
# (cluster filesystems typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not nova.privsep.utils.supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
# Shareable disks like for a multi-attach volume need to have the
# driver cache disabled.
if getattr(conf, 'shareable', False):
conf.driver_cache = 'none'
cache_mode = self.disk_cachemodes.get(source_type,
conf.driver_cache = cache_mode
def _do_quality_warnings(self):
"""Warn about potential configuration issues.
This will log a warning message for things such as untested driver or
host arch configurations in order to indicate potential issues to
caps = self._host.get_capabilities()
hostarch =
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (fields.Architecture.I686,
LOG.warning('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see:'
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
if CONF.vnc.keymap:
LOG.warning('The option "[vnc] keymap" has been deprecated '
'in favor of configuration within the guest. '
'Update nova.conf to address this change and '
'refer to bug #1682020 for more information.')
if CONF.spice.keymap:
LOG.warning('The option "[spice] keymap" has been deprecated '
'in favor of configuration within the guest. '
'Update nova.conf to address this change and '
'refer to bug #1682020 for more information.')
def _handle_conn_event(self, enabled, reason):"Connection event '%(enabled)d' reason '%(reason)s'",
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def init_host(self, host):
self._supported_perf_events = self._get_supported_perf_events()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warning("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment.")
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.InternalError(
_('Nova requires libvirt version %s or greater.') %
if CONF.libvirt.virt_type in ("qemu", "kvm"):
if self._host.has_min_version(hv_ver=MIN_QEMU_VERSION):
# "qemu-img info" calls are version dependent, so we need to
# store the version in the images module.
images.QEMU_VERSION = self._host.get_connection().getVersion()
raise exception.InternalError(
_('Nova requires QEMU version %s or greater.') %
if CONF.libvirt.virt_type == 'parallels':
if not self._host.has_min_version(hv_ver=MIN_VIRTUOZZO_VERSION):
raise exception.InternalError(
_('Nova requires Virtuozzo version %s or greater.') %
# Give the cloud admin a heads up if we are intending to
# change the MIN_LIBVIRT_VERSION in the next release.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
LOG.warning('Running Nova with a libvirt version less than '
'%(version)s is deprecated. The required minimum '
'version of libvirt will be raised to %(version)s '
'in the next release.',
{'version': libvirt_utils.version_to_string(
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
LOG.warning('Running Nova with a QEMU version less than '
'%(version)s is deprecated. The required minimum '
'version of QEMU will be raised to %(version)s '
'in the next release.',
{'version': libvirt_utils.version_to_string(
kvm_arch = fields.Architecture.from_host()
if (CONF.libvirt.virt_type in ('kvm', 'qemu') and
kvm_arch in MIN_LIBVIRT_OTHER_ARCH and
not self._host.has_min_version(
raise exception.InternalError(
_('Running Nova with qemu/kvm virt_type on %(arch)s '
'requires libvirt version %(libvirt_ver)s or greater') %
{'arch': kvm_arch,
'libvirt_ver': libvirt_utils.version_to_string(
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
if self._host.has_min_version(MIN_LIBVIRT_MDEV_SUPPORT):
def _is_existing_mdev(uuid):
# FIXME(sbauza): Some kernel can have a uevent race meaning that the
# libvirt daemon won't know when a mediated device is created unless
# you restart that daemon. Until all kernels we support are not having
# that possible race, check the sysfs directly instead of asking the
# libvirt API.
# See for ref.
return os.path.exists('/sys/bus/mdev/devices/{0}'.format(uuid))
def _recreate_assigned_mediated_devices(self):
"""Recreate assigned mdevs that could have disappeared if we reboot
the host.
mdevs = self._get_all_assigned_mediated_devices()
requested_types = self._get_supported_vgpu_types()
for (mdev_uuid, instance_uuid) in six.iteritems(mdevs):
if not self._is_existing_mdev(mdev_uuid):
self._create_new_mediated_device(requested_types, mdev_uuid)
def _set_multiattach_support(self):
# Check to see if multiattach is supported. Based on bugzilla
# and related
# clones, the shareable flag on a disk device will only work with
# qemu<2.10 or libvirt>=3.10. So check those versions here and set
# the capability appropriately.
if (self._host.has_min_version(lv_ver=MIN_LIBVIRT_MULTIATTACH) or
not self._host.has_min_version(hv_ver=(2, 10, 0))):
self.capabilities['supports_multiattach'] = True
LOG.debug('Volume multiattach is not supported based on current '
'versions of QEMU and libvirt. QEMU must be less than '
'2.10 or libvirt must be greater than or equal to 3.10.')
def _check_file_backed_memory_support(self):
if CONF.libvirt.file_backed_memory:
# file_backed_memory is only compatible with qemu/kvm virts
if CONF.libvirt.virt_type not in ("qemu", "kvm"):
raise exception.InternalError(
_('Running Nova with file_backed_memory and virt_type '
'%(type)s is not supported. file_backed_memory is only '
'supported with qemu and kvm types.') %
{'type': CONF.libvirt.virt_type})
# Check needed versions for file_backed_memory
if not self._host.has_min_version(
raise exception.InternalError(
_('Running Nova with file_backed_memory requires libvirt '
'version %(libvirt)s and qemu version %(qemu)s') %
{'libvirt': libvirt_utils.version_to_string(
'qemu': libvirt_utils.version_to_string(
# file-backed memory doesn't work with memory overcommit.
# Block service startup if file-backed memory is enabled and
# ram_allocation_ratio is not 1.0
if CONF.ram_allocation_ratio != 1.0:
raise exception.InternalError(
'Running Nova with file_backed_memory requires '
'ram_allocation_ratio configured to 1.0')
def _check_my_ip(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warning('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s',
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
def _prepare_migration_flags(self):
migration_flags = 0
migration_flags |= libvirt.VIR_MIGRATE_LIVE
# Adding p2p flag only if xen is not in use, because xen does not
# support p2p migrations
if CONF.libvirt.virt_type != 'xen':
migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER
# Adding VIR_MIGRATE_UNDEFINE_SOURCE because, without it, migrated
# instance will remain defined on the source host
migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
# Adding VIR_MIGRATE_PERSIST_DEST to persist the VM on the
# destination host
migration_flags |= libvirt.VIR_MIGRATE_PERSIST_DEST
live_migration_flags = block_migration_flags = migration_flags
# Adding VIR_MIGRATE_NON_SHARED_INC, otherwise all block-migrations
# will be live-migrations instead
block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC
return (live_migration_flags, block_migration_flags)
def _handle_live_migration_tunnelled(self, migration_flags):
if CONF.libvirt.live_migration_tunnelled:
migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED
return migration_flags
def _is_post_copy_available(self):
return self._host.has_min_version(lv_ver=MIN_LIBVIRT_POSTCOPY_VERSION)
def _is_virtlogd_available(self):
return self._host.has_min_version(MIN_LIBVIRT_VIRTLOGD,
def _is_native_luks_available(self):
return self._host.has_min_version(MIN_LIBVIRT_LUKS_VERSION,
def _handle_live_migration_post_copy(self, migration_flags):
if CONF.libvirt.live_migration_permit_post_copy:
if self._is_post_copy_available():
migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY
else:'The live_migration_permit_post_copy is set '
'to True, but it is not supported.')
return migration_flags
def _handle_live_migration_auto_converge(self, migration_flags):
if (self._is_post_copy_available() and
(migration_flags & libvirt.VIR_MIGRATE_POSTCOPY) != 0):'The live_migration_permit_post_copy is set to '
'True and post copy live migration is available '
'so auto-converge will not be in use.')
elif CONF.libvirt.live_migration_permit_auto_converge:
migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
return migration_flags
def _parse_migration_flags(self):
block_migration_flags) = self._prepare_migration_flags()
live_migration_flags = self._handle_live_migration_tunnelled(
block_migration_flags = self._handle_live_migration_tunnelled(
live_migration_flags = self._handle_live_migration_post_copy(
block_migration_flags = self._handle_live_migration_post_copy(
live_migration_flags = self._handle_live_migration_auto_converge(
block_migration_flags = self._handle_live_migration_auto_converge(
self._live_migration_flags = live_migration_flags
self._block_migration_flags = block_migration_flags
# TODO(sahid): This method is targeted for removal when the tests
# have been updated to avoid its use
# All libvirt API calls on the libvirt.Connect object should be
# encapsulated by methods on the
# object, rather than directly invoking the libvirt APIs. The goal
# is to avoid a direct dependency on the libvirt API from the
# file.
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
def _uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
elif CONF.libvirt.virt_type == 'parallels':
uri = CONF.libvirt.connection_uri or 'parallels:///system'
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
def _live_migration_uri(dest):
uris = {
'kvm': 'qemu+%s://%s/system',
'qemu': 'qemu+%s://%s/system',
'xen': 'xenmigr://%s/system',
'parallels': 'parallels+tcp://%s/system',
dest = oslo_netutils.escape_ipv6(dest)
virt_type = CONF.libvirt.virt_type
# TODO(pkoniszewski): Remove fetching live_migration_uri in Pike
uri = CONF.libvirt.live_migration_uri
if uri:
return uri % dest
uri = uris.get(virt_type)
if uri is None:
raise exception.LiveMigrationURINotAvailable(virt_type=virt_type)
str_format = (dest,)
if virt_type in ('kvm', 'qemu'):
scheme = CONF.libvirt.live_migration_scheme or 'tcp'
str_format = (scheme, dest)
return uris.get(virt_type) % str_format
def _migrate_uri(dest):
uri = None
dest = oslo_netutils.escape_ipv6(dest)
# Only QEMU live migrations supports migrate-uri parameter
virt_type = CONF.libvirt.virt_type
if virt_type in ('qemu', 'kvm'):
# QEMU accept two schemes: tcp and rdma. By default
# libvirt build the URI using the remote hostname and the
# tcp schema.
uri = 'tcp://%s' % dest
# Because dest might be of type unicode, here we might return value of
# type unicode as well which is not acceptable by libvirt python
# binding when Python 2.7 is in use, so let's convert it explicitly
# back to string. When Python 3.x is in use, libvirt python binding
# accepts unicode type so it is completely fine to do a no-op str(uri)
# conversion which will return value of type unicode.
return uri and str(uri)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
return True
except (exception.InternalError, exception.InstanceNotFound):
return False
def estimate_instance_overhead(self, instance_info):
overhead = super(LibvirtDriver, self).estimate_instance_overhead(
if isinstance(instance_info, objects.Flavor):
# A flavor object is passed during case of migrate
emu_policy = hardware.get_emulator_thread_policy_constraint(
if emu_policy == fields.CPUEmulatorThreadsPolicy.ISOLATE:
overhead['vcpus'] += 1
# An instance object is passed during case of spawing or a
# dict is passed when computing resource for an instance
numa_topology = hardware.instance_topology_from_instance(
if numa_topology and numa_topology.emulator_threads_isolated:
overhead['vcpus'] += 1
return overhead
def list_instances(self):
names = []
for guest in self._host.list_guests(only_running=False):
return names
def list_instance_uuids(self):
uuids = []
for guest in self._host.list_guests(only_running=False):
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
LOG.debug('Attempting to teardown container at path %(dir)s with '
'root device: %(rootfs_dev)s',
{'dir': container_dir, 'rootfs_dev': rootfs_dev},
disk_api.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance, attempt=1):
guest = self._host.get_guest(instance)
if CONF.serial_console.enabled:
# This method is called for several events: destroy,
# rebuild, hard-reboot, power-off - For all of these
# events we want to release the serial ports acquired
# for the guest before destroying it.
serials = self._get_serial_ports_from_guest(guest)
for hostname, port in serials:
serial_console.release_port(host=hostname, port=port)
except exception.InstanceNotFound:
guest = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if guest is not None:
old_domid =
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
# Domain already gone. This can safely be ignored.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = guest.get_power_state(self._host)
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
errmsg = e.get_error_message()
if (CONF.libvirt.virt_type == 'lxc' and
errmsg == 'internal error: '
'Some processes refused to die'):
# Some processes in the container didn't die
# fast enough for libvirt. The container will
# eventually die. For now, move on and let
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warning("Cannot destroy instance, operation time out",
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
if e.get_int1() == errno.EBUSY:
# NOTE(danpb): When libvirt kills a process it sends it
# SIGTERM first and waits 10 seconds. If it hasn't gone
# it sends SIGKILL and waits another 5 seconds. If it
# still hasn't gone then you get this EBUSY error.
# Usually when a QEMU process fails to go away upon
# SIGKILL it is because it is stuck in an
# uninterruptible kernel sleep waiting on I/O from
# some non-responsive server.
# Given the CPU load of the gate tests though, it is
# conceivable that the 15 second timeout is too short,
# particularly if the VM running tempest has a high
# steal time from the cloud host. ie 15 wallclock
# seconds may have passed, but the VM might have only
# have a few seconds of scheduled run time.
# reaches v4.7.0, (a) rewrite the above note,
# and (b) remove the following code that retries
# _destroy() API call (which gives SIGKILL 30
# seconds to take effect) -- because from v4.7.0
# onwards, libvirt _automatically_ increases the
# timeout to 30 seconds. This was added in the
# following libvirt commits:
# - 9a4e4b942 (process: wait longer 5->30s on
# hard shutdown)
# - be2ca0444 (process: wait longer on kill
# per assigned Hostdev)
with excutils.save_and_reraise_exception() as ctxt:
if not self._host.has_min_version(
LOG.warning('Error from libvirt during '
'destroy. Code=%(errcode)s '
'Error=%(e)s; attempt '
'%(attempt)d of 6 ',
{'errcode': errcode, 'e': e,
'attempt': attempt},
# Try up to 6 times before giving up.
if attempt < 6:
ctxt.reraise = False
self._destroy(instance, attempt + 1)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s',
{'errcode': errcode, 'e': e},
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.internal_id
except exception.InstanceNotFound:
LOG.debug("During wait destroy, instance disappeared.",
state = power_state.SHUTDOWN
if state == power_state.SHUTDOWN:"Instance destroyed successfully.", instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be an endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see for more details
if new_domid != expected_domid:"Instance may be started again.", instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
if kwargs['is_running']:"Going to destroy instance again.", instance=instance)
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
def destroy(self, context, instance, network_info, block_device_info=None,
self.cleanup(context, instance, network_info, block_device_info,
def _undefine_domain(self, instance):
guest = self._host.get_guest(instance)
support_uefi = self._has_uefi_support()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception() as ctxt:
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug("Called undefine, but domain already gone.",
ctxt.reraise = False
LOG.error('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s',
{'errcode': errcode,
'e': encodeutils.exception_to_unicode(e)},
except exception.InstanceNotFound:
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup the instance from the host.
Identify if the instance disks and instance path should be removed
from the host before calling down into the _cleanup method for the
actual removal of resources from the host.
:param context: security context
:param instance: instance object for the instance being cleaned up
:param network_info: instance network information
:param block_device_info: optional instance block device information
:param destroy_disks: if local ephemeral disks should be destroyed
:param migrate_data: optional migrate_data object
:param destroy_vifs: if plugged vifs should be unplugged
cleanup_instance_dir = False
cleanup_instance_disks = False
# We assume destroy_disks means destroy instance directory and disks
if destroy_disks:
cleanup_instance_dir = True
cleanup_instance_disks = True
# NOTE(mdbooth): I think the theory here was that if this is a
# migration with shared block storage then we need to delete the
# instance directory because that's not shared. I'm pretty sure
# this is wrong.
if migrate_data and 'is_shared_block_storage' in migrate_data:
cleanup_instance_dir = migrate_data.is_shared_block_storage
# NOTE(lyarwood): The following workaround allows operators to
# ensure that non-shared instance directories are removed after an
# evacuation or revert resize when using the shared RBD
# imagebackend. This workaround is not required when cleaning up
# migrations that provide migrate_data to this method as the
# existing is_shared_block_storage conditional will cause the
# instance directory to be removed.
if not cleanup_instance_dir:
if CONF.workarounds.ensure_libvirt_rbd_instance_dir_cleanup:
cleanup_instance_dir = CONF.libvirt.images_type == 'rbd'
return self._cleanup(
context, instance, network_info,
def _cleanup(self, context, instance, network_info, block_device_info=None,
destroy_vifs=True, cleanup_instance_dir=False,
"""Cleanup the domain and any attached resources from the host.
This method cleans up any pmem devices, unplugs VIFs, disconnects
attached volumes and undefines the instance domain within libvirt.
It also optionally removes the ephemeral disks and the instance
directory from the host depending on the cleanup_instance_dir|disks
kwargs provided.
:param context: security context
:param instance: instance object for the instance being cleaned up
:param network_info: instance network information
:param block_device_info: optional instance block device information
:param destroy_vifs: if plugged vifs should be unplugged
:param cleanup_instance_dir: If the instance dir should be removed
:param cleanup_instance_disks: If the instance disks should be removed
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
# Continue attempting to remove firewall filters for the instance
# until it's done or there is a failure to remove the filters. If
# unfilter fails because the instance is not yet shutdown, try to
# destroy the guest again and then retry the unfilter.
while True:
self.unfilter_instance(instance, network_info)
except libvirt.libvirtError as e:
state = self.get_info(instance).state
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warning("Instance may be still running, destroy "
"it again.", instance=instance)
errcode = e.get_error_code()
LOG.exception(_('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
reason = _("Error unfiltering instance.")
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
# FIXME(wangpan): if the instance is booted again here, such as the
# soft reboot operation boot it here, it will become
# "running deleted", should we check and destroy it
# at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device']
if disk_dev is not None:
disk_dev = disk_dev.rpartition("/")[2]
self._disconnect_volume(context, connection_info, instance)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if cleanup_instance_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
"Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s",
{'vol_id': vol.get('volume_id'),
'exc': encodeutils.exception_to_unicode(exc)},
if cleanup_instance_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance, block_device_info)
if CONF.libvirt.images_type == 'rbd':
if cleanup_instance_dir:
attempts = int(instance.system_metadata.get('clean_attempts',
success = self.delete_instance_files(instance)
# NOTE(mriedem): This is used in the _run_pending_deletes periodic
# task in the compute manager. The tight coupling is not great...
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
def _detach_encrypted_volumes(self, instance, block_device_info):
"""Detaches encrypted volumes attached to instance."""
disks = self._get_instance_disk_info(instance, block_device_info)
encrypted_volumes = filter(dmcrypt.is_encrypted,
[disk['path'] for disk in disks])
for path in encrypted_volumes:
def _get_serial_ports_from_guest(self, guest, mode=None):
"""Returns an iterator over serial port(s) configured on guest.
:param mode: Should be a value in (None, bind, connect)
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# The 'serial' device is the base for x86 platforms. Other platforms
# (e.g. kvm on system z = S390X) can only use 'console' devices.
xpath_mode = "[@mode='%s']" % mode if mode else ""
serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode
tcp_devices = tree.findall(serial_tcp)
if len(tcp_devices) == 0:
tcp_devices = tree.findall(console_tcp)
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
def _get_scsi_controller_next_unit(self, guest):
"""Returns the max disk unit used by scsi controller"""
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
addrs = "./devices/disk[target/@bus='scsi']/address[@type='drive']"
ret = []
for obj in tree.xpath(addrs):
ret.append(int(obj.get('unit', 0)))
return max(ret) + 1 if ret else 0
def _get_rbd_driver():
return rbd_utils.RBDDriver(
def _cleanup_rbd(self, instance):
# NOTE(nic): On revert_resize, the cleanup steps for the root
# volume are handled with an "rbd snap rollback" command,
# and none of this is needed (and is, in fact, harmful) so
# filter out non-ephemerals from the list
if instance.task_state == task_states.RESIZE_REVERTING:
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
filter_fn = lambda disk: disk.startswith(instance.uuid)
def _cleanup_lvm(self, instance, block_device_info):
"""Delete all LVM disks for given instance object."""
if instance.get('ephemeral_key_uuid') is not None:
self._detach_encrypted_volumes(instance, block_device_info)
disks = self._lvm_disks(instance)
if disks:
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance.uuid
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disks = [fullpath(disk) for disk in logical_volumes
if belongs_to_instance(disk)]
return disks
return []
def get_volume_connector(self, instance):
root_helper = utils.get_root_helper()
return connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
def _cleanup_resize(self, context, instance, network_info):
inst_base = libvirt_utils.get_instance_path(instance)
target = inst_base + '_resize'
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
attempts = 0
while(os.path.exists(target) and attempts < 5):
shutil.rmtree(target, ignore_errors=True)
if os.path.exists(target):
time.sleep(random.randint(20, 200) / 100.0)
attempts += 1
# NOTE(mriedem): Some image backends will recreate the instance path
# and during init, and all we need the root disk for
# here is removing cloned snapshots which is backend-specific, so
# check that first before initializing the image backend object. If
# there is ever an image type that supports clone *and* re-creates
# the instance directory and on init, this condition will
# need to be re-visited to make sure that backend doesn't re-create
# the disk. Refer to bugs: 1666831 1728603 1769131
if self.image_backend.backend(CONF.libvirt.images_type).SUPPORTS_CLONE:
root_disk = self.image_backend.by_name(instance, 'disk')
if root_disk.exists():
if !=
self.unplug_vifs(instance, network_info)
self.unfilter_instance(instance, network_info)
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def _connect_volume(self, context, connection_info, instance,
encryption=None, allow_native_luks=True):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.connect_volume(connection_info, instance)
context, connection_info, encryption, allow_native_luks)
except Exception:
# Encryption failed so rollback the volume connection.
with excutils.save_and_reraise_exception(logger=LOG):
LOG.exception("Failure attaching encryptor; rolling back "
"volume connection", instance=instance)
vol_driver.disconnect_volume(connection_info, instance)
def _should_disconnect_target(self, context, connection_info, instance):
connection_count = 0
# NOTE(jdg): Multiattach is a special case (not to be confused
# with shared_targets). With multiattach we may have a single volume
# attached multiple times to *this* compute node (ie Server-1 and
# Server-2). So, if we receive a call to delete the attachment for
# Server-1 we need to take special care to make sure that the Volume
# isn't also attached to another Server on this Node. Otherwise we
# will indiscriminantly delete the connection for all Server and that's
# no good. So check if it's attached multiple times on this node
# if it is we skip the call to brick to delete the connection.
if connection_info.get('multiattach', False):
volume = self._volume_api.get(
attachments = volume.get('attachments', {})
if len(attachments) > 1:
# First we get a list of all Server UUID's associated with
# this Host (Compute Node). We're going to use this to
# determine if the Volume being detached is also in-use by
# another Server on this Host, ie just check to see if more
# than one attachment.server_id for this volume is in our
# list of Server UUID's for this Host
servers_this_host = objects.InstanceList.get_uuids_by_host(
# NOTE(jdg): nova.volume.cinder translates the
# volume['attachments'] response into a dict which includes
# the Server UUID as the key, so we're using that
# here to check against our server_this_host list
for server_id, data in attachments.items():
if server_id in servers_this_host:
connection_count += 1
return (False if connection_count > 1 else True)
def _disconnect_volume(self, context, connection_info, instance,
self._detach_encryptor(context, connection_info, encryption=encryption)
if self._should_disconnect_target(context, connection_info, instance):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.disconnect_volume(connection_info, instance)
else:"Detected multiple connections on this host for volume: "
"%s, skipping target disconnect.",
def _extend_volume(self, connection_info, instance):
vol_driver = self._get_volume_driver(connection_info)
return vol_driver.extend_volume(connection_info, instance)
def _use_native_luks(self, encryption=None):
"""Is LUKS the required provider and native QEMU LUKS available
provider = None
if encryption:
provider = encryption.get('provider', None)
if provider in encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP:
provider = encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP[provider]
return provider == encryptors.LUKS and self._is_native_luks_available()
def _get_volume_config(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
conf = vol_driver.get_config(connection_info, disk_info)
return conf
def _get_volume_encryptor(self, connection_info, encryption):
root_helper = utils.get_root_helper()
return encryptors.get_volume_encryptor(root_helper=root_helper,
def _get_volume_encryption(self, context, connection_info):
"""Get the encryption metadata dict if it is not provided
encryption = {}
volume_id = driver_block_device.get_volume_id(connection_info)
if volume_id:
encryption = encryptors.get_encryption_metadata(context,
self._volume_api, volume_id, connection_info)
return encryption
def _attach_encryptor(self, context, connection_info, encryption,
"""Attach the frontend encryptor if one is required by the volume.
The request context is only used when an encryption metadata dict is
not provided. The encryption metadata dict being populated is then used
to determine if an attempt to attach the encryptor should be made.
If native LUKS decryption is enabled then create a Libvirt volume
secret containing the LUKS passphrase for the volume.
if encryption is None:
encryption = self._get_volume_encryption(context, connection_info)
if (encryption and allow_native_luks and
# NOTE(lyarwood): Fetch the associated key for the volume and
# decode the passphrase from the key.
# FIXME(lyarwood): c-vol currently creates symmetric keys for use
# with volumes, leading to the binary to hex to string conversion
# below.
keymgr = key_manager.API(CONF)
key = keymgr.get(context, encryption['encryption_key_id'])
key_encoded = key.get_encoded()
passphrase = binascii.hexlify(key_encoded).decode('utf-8')
# NOTE(lyarwood): Retain the behaviour of the original os-brick
# encryptors and format any volume that does not identify as
# encrypted with LUKS.
# FIXME(lyarwood): Remove this once c-vol correctly formats
# encrypted volumes during their initial creation:
device_path = connection_info.get('data').get('device_path')
if device_path:
root_helper = utils.get_root_helper()
if not luks_encryptor.is_luks(root_helper, device_path):
encryptor = self._get_volume_encryptor(connection_info,
encryptor._format_volume(passphrase, **encryption)
# NOTE(lyarwood): Store the passphrase as a libvirt secret locally
# on the compute node. This secret is used later when generating
# the volume config.
volume_id = driver_block_device.get_volume_id(connection_info)
self._host.create_secret('volume', volume_id, password=passphrase)
elif encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryptor.attach_volume(context, **encryption)
def _detach_encryptor(self, context, connection_info, encryption):
"""Detach the frontend encryptor if one is required by the volume.
The request context is only used when an encryption metadata dict is
not provided. The encryption metadata dict being populated is then used
to determine if an attempt to detach the encryptor should be made.
If native LUKS decryption is enabled then delete previously created
Libvirt volume secret from the host.
volume_id = driver_block_device.get_volume_id(connection_info)
if volume_id and self._host.find_secret('volume', volume_id):
return self._host.delete_secret('volume', volume_id)
if encryption is None:
encryption = self._get_volume_encryption(context, connection_info)
# NOTE(lyarwood): Handle bug #1821696 where volume secrets have been
# removed manually by returning if native LUKS decryption is available
# and device_path is not present in the connection_info. This avoids
# VolumeEncryptionNotSupported being thrown when we incorrectly build
# the encryptor below due to the secrets not being present above.
if (encryption and self._use_native_luks(encryption) and
not connection_info['data'].get('device_path')):
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
def _check_discard_for_attach_volume(self, conf, instance):
"""Perform some checks for volumes configured for discard support.
If discard is configured for the volume, and the guest is using a
configuration known to not work, we will log a message explaining
the reason why.
if conf.driver_discard == 'unmap' and conf.target_bus == 'virtio':
LOG.debug('Attempting to attach volume %(id)s with discard '
'support enabled to an instance using an '
'unsupported configuration. target_bus = '
'%(bus)s. Trim commands will not be issued to '
'the storage device.',
{'bus': conf.target_bus,
'id': conf.serial},
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
self._connect_volume(context, connection_info, instance,
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
if disk_info['bus'] == 'scsi':
disk_info['unit'] = self._get_scsi_controller_next_unit(guest)
conf = self._get_volume_config(connection_info, disk_info)
self._check_discard_for_attach_volume(conf, instance)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(conf, persistent=True, live=live)
# NOTE(artom) If we're attaching with a device role tag, we need to
# rebuild device_metadata. If we're attaching without a role
# tag, we're rebuilding it here needlessly anyways. This isn't a
# massive deal, and it helps reduce code complexity by not having
# to indicate to the virt driver that the attach is tagged. The
# really important optimization of not calling the database unless
# device_metadata has actually changed is done for us by
instance.device_metadata = self._build_device_metadata(
context, instance)
# TODO(lyarwood) Remove the following breadcrumb once all supported
# distributions provide Libvirt 3.3.0 or earlier with
#;a=commit;h=7189099 applied.
except libvirt.libvirtError as ex:
with excutils.save_and_reraise_exception():
if 'Incorrect number of padding bytes' in six.text_type(ex):
LOG.warning(_('Failed to attach encrypted volume due to a '
'known Libvirt issue, see the following bug '
'for details: '
LOG.exception(_('Failed to attach volume at mountpoint: '
'%s'), mountpoint, instance=instance)
self._disconnect_volume(context, connection_info, instance,
except Exception:
LOG.exception(_('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
with excutils.save_and_reraise_exception():
self._disconnect_volume(context, connection_info, instance,
def _swap_volume(self, guest, disk_path, conf, resize_to):
"""Swap existing disk with a new block device."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file. We'll use this
# to redefine the domain if anything fails during the volume swap.
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
except Exception:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
support_uefi = self._has_uefi_support()
# Start copy with VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file. Use
# VIR_DOMAIN_BLOCK_REBASE_COPY_DEV if it's a block device to
# make sure XML is generated correctly (bug 1691195)
copy_dev = conf.source_type == 'block'
dev.rebase(conf.source_path, copy=True, reuse_ext=True,
while not dev.is_job_complete():
except Exception as exc:
LOG.exception("Failure rebasing volume %(new_path)s on "
"%(old_path)s.", {'new_path': conf.source_path,
'old_path': disk_path})
raise exception.VolumeRebaseFailed(reason=six.text_type(exc))
if resize_to:
dev.resize(resize_to * units.Gi / units.Ki)
# Make sure we will redefine the domain using the updated
# configuration after the volume was swapped. The dump_inactive
# keyword arg controls whether we pull the inactive (persistent)
# or active (live) config from the domain. We want to pull the
# live config after the volume was updated to use when we redefine
# the domain.
xml = guest.get_xml_desc(dump_inactive=False, dump_sensitive=True)
def swap_volume(self, context, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
# NOTE(lyarwood):
old_encrypt = self._get_volume_encryption(context, old_connection_info)
new_encrypt = self._get_volume_encryption(context, new_connection_info)
if ((old_encrypt and self._use_native_luks(old_encrypt)) or
(new_encrypt and self._use_native_luks(new_encrypt))):
raise NotImplementedError(_("Swap volume is not supported for "
"encrypted volumes when native LUKS decryption is enabled."))
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
if not guest.get_disk(disk_dev):
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
# NOTE (lyarwood): new_connection_info will be modified by the
# following _connect_volume call down into the volume drivers. The
# majority of the volume drivers will add a device_path that is in turn
# used by _get_volume_config to set the source_path of the
# LibvirtConfigGuestDisk object it returns. We do not explicitly save
# this to the BDM here as the upper compute swap_volume method will
# eventually do this for us.
self._connect_volume(context, new_connection_info, instance)
conf = self._get_volume_config(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(context, new_connection_info, instance)
raise NotImplementedError(_("Swap only supports host devices"))
self._swap_volume(guest, disk_dev, conf, resize_to)
except exception.VolumeRebaseFailed:
with excutils.save_and_reraise_exception():
self._disconnect_volume(context, new_connection_info, instance)
self._disconnect_volume(context, old_connection_info, instance)
def _get_existing_domain_xml(self, instance, network_info,
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
return xml
def detach_volume(self, context, connection_info, instance, mountpoint,
disk_dev = mountpoint.rpartition("/")[2]
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
# NOTE(lyarwood): The volume must be detached from the VM before
# detaching any attached encryptors or disconnecting the underlying
# volume in _disconnect_volume. Otherwise, the encryptor or volume
# driver may report that the volume is still in use.
wait_for_detach = guest.detach_device_with_retry(guest.get_disk,
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warning("During detach_volume, instance disappeared.",
except exception.DeviceNotFound:
# We should still try to disconnect logical device from
# host, an error might have happened during a previous
# call."Device %s not found in instance.",
disk_dev, instance=instance)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warning("During detach_volume, instance disappeared.",
self._disconnect_volume(context, connection_info, instance,
def extend_volume(self, connection_info, instance):
new_size = self._extend_volume(connection_info, instance)
except NotImplementedError:
raise exception.ExtendVolumeNotSupported()
# Resize the device in QEMU so its size is updated and
# detected by the instance without rebooting.
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
active_state = state in (power_state.RUNNING, power_state.PAUSED)
if active_state:
disk_path = connection_info['data']['device_path']
LOG.debug('resizing block device %(dev)s to %(size)u kb',
{'dev': disk_path, 'size': new_size})
dev = guest.get_block_device(disk_path)
dev.resize(new_size // units.Ki)
LOG.debug('Skipping block device resize, guest is not running',
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
LOG.warning('During extend_volume, instance disappeared.',
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.exception('resizing block device failed.',
def attach_interface(self, context, instance, image_meta, vif):
guest = self._host.get_guest(instance)
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError:
LOG.error('attaching network adapter failed.',
instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
# NOTE(artom) If we're attaching with a device role tag, we need to
# rebuild device_metadata. If we're attaching without a role
# tag, we're rebuilding it here needlessly anyways. This isn't a
# massive deal, and it helps reduce code complexity by not having
# to indicate to the virt driver that the attach is tagged. The
# really important optimization of not calling the database unless
# device_metadata has actually changed is done for us by
instance.device_metadata = self._build_device_metadata(
context, instance)
except Exception:
# NOTE(artom) If we fail here it means the interface attached
# successfully but building and/or saving the device metadata
# failed. Just unplugging the vif is therefore not enough cleanup,
# we need to detach the interface.
with excutils.save_and_reraise_exception(reraise=False):
LOG.error('Interface attached successfully but building '
'and/or saving device metadata failed.',
instance=instance, exc_info=True)
self.detach_interface(context, instance, vif)
raise exception.InterfaceAttachFailed(
def detach_interface(self, context, instance, vif):
guest = self._host.get_guest(instance)
cfg = self.vif_driver.get_config(instance, vif,
CONF.libvirt.virt_type, self._host)
interface = guest.get_interface_by_cfg(cfg)
self.vif_driver.unplug(instance, vif)
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so if the interface is not found then
# we can just log it as a warning.
if not interface:
mac = vif.get('address')
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': mac}, instance=instance)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
# Now we are going to loop until the interface is detached or we
# timeout.
wait_for_detach = guest.detach_device_with_retry(
guest.get_interface_by_cfg, cfg, live=live,
except exception.DeviceDetachFailed:
# We failed to detach the device even with the retry loop, so let's
# dump some debug information to the logs before raising back up.
with excutils.save_and_reraise_exception():
devname = self.vif_driver.get_vif_devname(vif)
interface = guest.get_interface_by_cfg(cfg)
if interface:
'Failed to detach interface %(devname)s after '
'repeated attempts. Final interface xml:\n'
'%(interface_xml)s\nFinal guest xml:\n%(guest_xml)s',
{'devname': devname,
'interface_xml': interface.to_xml(),
'guest_xml': guest.get_xml_desc()},
except exception.DeviceNotFound:
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': vif.get('address')}, instance=instance)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning("During detach_interface, instance disappeared.",
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so we might have failed because the
# network device no longer exists. Libvirt will fail with
# "operation failed: no matching network device was found"
# which unfortunately does not have a unique error code so we
# need to look up the interface by config and if it's not found
# then we can just log it as a warning rather than tracing an
# error.
mac = vif.get('address')
interface = guest.get_interface_by_cfg(cfg)
if interface:
LOG.error('detaching network adapter failed.',
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': mac}, instance=instance)
def _create_snapshot_metadata(self, image_meta, instance,
img_fmt, snp_name):
metadata = {'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance.kernel_id,
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
'ramdisk_id': instance.ramdisk_id,
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type
# NOTE(vish): glance forces ami disk format to be ami
if image_meta.disk_format == 'ami':
metadata['disk_format'] = 'ami'
metadata['disk_format'] = img_fmt
if image_meta.obj_attr_is_set("container_format"):
metadata['container_format'] = image_meta.container_format
metadata['container_format'] = "bare"
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
guest = self._host.get_guest(instance)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
# We should be able to remove virt_dom at the end.
virt_dom = guest._domain
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
snapshot = self._image_api.get(context, image_id)
# source_format is an on-disk format
# source_type is a backend type
disk_path, source_format = libvirt_utils.find_disk(guest)
source_type = libvirt_utils.get_disk_type_from_path(disk_path)
# We won't have source_type for raw or qcow2 disks, because we can't
# determine that from the path. We should have it from the libvirt
# xml, though.
if source_type is None:
source_type = source_format
# For lxc instances we won't have it either from libvirt xml
# (because we just gave libvirt the mounted filesystem), or the path,
# so source_type is still going to be None. In this case,
# root_disk is going to default to CONF.libvirt.images_type
# below, which is still safe.
image_format = CONF.libvirt.snapshot_image_format or source_type
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(instance.image_meta,
snapshot_name = uuidutils.generate_uuid(dashed=False)
state = guest.get_power_state(self._host)
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.
# It is necessary in case this situation changes in the
# future.
if (self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU)
and source_type not in ('lvm')
and not CONF.ephemeral_storage_encryption.enabled
and not CONF.workarounds.disable_libvirt_livesnapshot
# NOTE(rmk): We cannot perform live snapshots when a
# managedSave file is present, so we will use the cold/legacy
# method for instances which are shutdown or paused.
# NOTE(mriedem): Live snapshot doesn't work with paused
# instances on older versions of libvirt/qemu. We can likely
# remove the restriction on PAUSED once we require
# libvirt>=3.6.0 and qemu>=2.10 since that works with the
# Pike Ubuntu Cloud Archive testing in Queens.
and state not in (power_state.SHUTDOWN, power_state.PAUSED)):
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
live_snapshot = False
self._prepare_domain_for_snapshot(context, live_snapshot, state,
root_disk = self.image_backend.by_libvirt_path(
instance, disk_path, image_type=source_type)
if live_snapshot:"Beginning live snapshot process", instance=instance)
else:"Beginning cold snapshot process", instance=instance)
metadata['location'] = root_disk.direct_snapshot(
context, snapshot_name, image_format, image_id,
self._snapshot_domain(context, live_snapshot, virt_dom, state,
self._image_api.update(context, image_id, metadata,
except (NotImplementedError, exception.ImageUnacceptable,
exception.Forbidden) as e:
if type(e) != NotImplementedError:
LOG.warning('Performing standard snapshot because direct '
'snapshot failed: %(error)s',
{'error': encodeutils.exception_to_unicode(e)})
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
# TODO(nic): possibly abstract this out to the root_disk
if source_type == 'rbd' and live_snapshot:
# Standard snapshot uses qemu-img convert from RBD which is
# not safe to run with live_snapshot.
live_snapshot = False
# Suspend the guest, so this is no longer a live snapshot
self._prepare_domain_for_snapshot(context, live_snapshot,
state, instance)
snapshot_directory = CONF.libvirt.snapshots_directory
with utils.tempdir(dir=snapshot_directory) as tmpdir:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the tempdir
os.chmod(tmpdir, 0o701)
self._live_snapshot(context, instance, guest,
disk_path, out_path, source_format,
image_format, instance.image_meta)
root_disk.snapshot_extract(out_path, image_format)"Snapshot extracted, beginning image upload",
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:'Instance %(instance_name)s disappeared '
'while taking snapshot of it: [Error Code '
'%(error_code)s] %(ex)s',
'error_code': error_code,
'ex': ex},
raise exception.InstanceNotFound(
self._snapshot_domain(context, live_snapshot, virt_dom,
state, instance)
# Upload that image to the image service
with libvirt_utils.file_open(out_path, 'rb') as image_file:
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to snapshot image"))
failed_snap = metadata.pop('location', None)
if failed_snap:
failed_snap = {'url': str(failed_snap)}
failed_snap, also_destroy_volume=True,
ignore_errors=True)"Snapshot image upload complete", instance=instance)
def _prepare_domain_for_snapshot(self, context, live_snapshot, state,
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self.suspend(context, instance)
def _snapshot_domain(self, context, live_snapshot, virt_dom, state,
guest = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
guest = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
guest = self._create_domain(domain=virt_dom, pause=True)
if guest is not None:
guest, pci_manager.get_instance_pci_devs(instance))
context, instance, guest)
def _can_set_admin_password(self, image_meta):
if CONF.libvirt.virt_type == 'parallels':
if not self._host.has_min_version(
raise exception.SetAdminPasswdNotSupported()
elif CONF.libvirt.virt_type in ('kvm', 'qemu'):
if not'hw_qemu_guest_agent', False):
raise exception.QemuGuestAgentNotEnabled()
raise exception.SetAdminPasswdNotSupported()
# TODO(melwitt): Combine this with the similar xenapi code at some point.
def _save_instance_password_if_sshkey_present(self, instance, new_pass):
sshkey = instance.key_data if 'key_data' in instance else None
if sshkey and sshkey.startswith("ssh-rsa"):
enc = crypto.ssh_encrypt_text(sshkey, new_pass)
# NOTE(melwitt): The convert_password method doesn't actually do
# anything with the context argument, so we can pass None.
password.convert_password(None, base64.encode_as_text(enc)))
def set_admin_password(self, instance, new_pass):
guest = self._host.get_guest(instance)
user ="os_admin_user")
if not user:
if instance.os_type == "windows":
user = "Administrator"
user = "root"
guest.set_user_password(user, new_pass)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
LOG.debug('Failed to set password: QEMU agent unresponsive',
raise NotImplementedError()
err_msg = encodeutils.exception_to_unicode(ex)
msg = (_('Error from libvirt while set password for username '
'"%(user)s": [Error Code %(error_code)s] %(ex)s')
% {'user': user, 'error_code': error_code, 'ex': err_msg})
raise exception.InternalError(msg)
# Save the password in sysmeta so it may be retrieved from the
# metadata service.
self._save_instance_password_if_sshkey_present(instance, new_pass)
def _can_quiesce(self, instance, image_meta):
if CONF.libvirt.virt_type not in ('kvm', 'qemu'):
raise exception.InstanceQuiesceNotSupported(
if not'hw_qemu_guest_agent', False):
raise exception.QemuGuestAgentNotEnabled()
def _requires_quiesce(self, image_meta):
return'os_require_quiesce', False)
def _set_quiesced(self, context, instance, image_meta, quiesced):
self._can_quiesce(instance, image_meta)
guest = self._host.get_guest(instance)
if quiesced:
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
err_msg = encodeutils.exception_to_unicode(ex)
msg = (_('Error from libvirt while quiescing %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name':,
'error_code': error_code, 'ex': err_msg})
raise exception.InternalError(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
The qemu-guest-agent must be setup to execute fsfreeze.
self._set_quiesced(context, instance, image_meta, True)
def unquiesce(self, context, instance, image_meta):
"""Thaw the guest filesystems after snapshot."""
self._set_quiesced(context, instance, image_meta, False)
def _live_snapshot(self, context, instance, guest, disk_path, out_path,
source_format, image_format, image_meta):
"""Snapshot an instance without downtime."""
dev = guest.get_block_device(disk_path)
# Save a copy of the domain's persistent XML file
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
except Exception:
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path,
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
quiesced = False
self._set_quiesced(context, instance, image_meta, True)
quiesced = True
except exception.NovaException as err:
if self._requires_quiesce(image_meta):
raise'Skipping quiescing instance: %(reason)s.',
{'reason': err}, instance=instance)
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
support_uefi = self._has_uefi_support()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
dev.rebase(disk_delta, copy=True, reuse_ext=True, shallow=True)