OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

11485 lines
511 KiB

# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, and Parallels.
"""
import binascii
import collections
from collections import deque
import contextlib
import copy
import errno
import functools
import glob
import grp
import itertools
import operator
import os
import pwd
import random
import shutil
import sys
import tempfile
import threading
import time
import typing as ty
import uuid
from castellan import key_manager
from copy import deepcopy
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick import encryptors
from os_brick.encryptors import luks as luks_encryptor
from os_brick import exception as brick_exception
from os_brick.initiator import connector
import os_resource_classes as orc
import os_traits as ot
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import netutils as oslo_netutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova.api.metadata import base as instance_metadata
from nova.api.metadata import password
from nova import block_device
from nova.compute import power_state
from nova.compute import provider_tree
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import crypto
from nova.db import constants as db_const
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova.network import model as network_model
from nova import objects
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.libvirt
import nova.privsep.path
import nova.privsep.utils
from nova.storage import rbd_utils
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk_api
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import fs
from nova.virt.libvirt.volume import mount
from nova.virt.libvirt.volume import remotefs
from nova.virt.libvirt.volume import volume
from nova.virt import netutils
from nova.volume import cinder
libvirt: ty.Any = None
uefi_logged = False
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
MAX_CONSOLE_BYTES = 100 * units.Ki
VALID_DISK_CACHEMODES = [
"default", "none", "writethrough", "writeback", "directsync", "unsafe",
]
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = None
# Guest config console string
CONSOLE = "console=tty0 console=ttyS0 console=hvc0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
class InjectionInfo(collections.namedtuple(
'InjectionInfo', ['network_info', 'files', 'admin_pass'])):
__slots__ = ()
def __repr__(self):
return ('InjectionInfo(network_info=%r, files=%r, '
'admin_pass=<SANITIZED>)') % (self.network_info, self.files)
# NOTE(lyarwood): Dict of volume drivers supported by the libvirt driver, keyed
# by the connection_info['driver_volume_type'] returned by Cinder for each
# volume type it supports
# TODO(lyarwood): Add host configurables to allow this list to be changed.
# Allowing native iSCSI to be reintroduced etc.
VOLUME_DRIVERS = {
'iscsi': 'nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
'iser': 'nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
'local': 'nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'fake': 'nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd': 'nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'nfs': 'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs': 'nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'fibre_channel': 'nova.virt.libvirt.volume.fibrechannel.LibvirtFibreChannelVolumeDriver', # noqa:E501
'gpfs': 'nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte': 'nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'scaleio': 'nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
'vzstorage': 'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver', # noqa:E501
'storpool': 'nova.virt.libvirt.volume.storpool.LibvirtStorPoolVolumeDriver', # noqa:E501
'nvmeof': 'nova.virt.libvirt.volume.nvme.LibvirtNVMEVolumeDriver',
}
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
# For information about when MIN_{LIBVIRT,QEMU}_VERSION and
# NEXT_MIN_{LIBVIRT,QEMU}_VERSION can be changed, consult the following:
#
# doc/source/reference/libvirt-distro-support-matrix.rst
#
# DO NOT FORGET to update this document when touching any versions below!
MIN_LIBVIRT_VERSION = (6, 0, 0)
MIN_QEMU_VERSION = (4, 2, 0)
NEXT_MIN_LIBVIRT_VERSION = (7, 0, 0)
NEXT_MIN_QEMU_VERSION = (5, 2, 0)
# Virtuozzo driver support
MIN_VIRTUOZZO_VERSION = (7, 0, 0)
# Names of the types that do not get compressed during migration
NO_COMPRESSION_TYPES = ('qcow2',)
# number of serial console limit
QEMU_MAX_SERIAL_PORTS = 4
# Qemu supports 4 serial consoles, we remove 1 because of the PTY one defined
ALLOWED_QEMU_SERIAL_PORTS = QEMU_MAX_SERIAL_PORTS - 1
VGPU_RESOURCE_SEMAPHORE = 'vgpu_resources'
LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'
# VDPA interface support
MIN_LIBVIRT_VDPA = (6, 9, 0)
MIN_QEMU_VDPA = (5, 1, 0)
class AsyncDeviceEventsHandler:
"""A synchornization point between libvirt events an clients waiting for
such events.
It provides an interface for the clients to wait for one or more libvirt
event types. It implements event delivery by expecting the libvirt driver
to forward libvirt specific events to notify_waiters()
It handles multiple clients for the same instance, device and event
type and delivers the event to each clients.
"""
class Waiter:
def __init__(
self,
instance_uuid: str,
device_name: str,
event_types: ty.Set[ty.Type[libvirtevent.DeviceEvent]]
):
self.instance_uuid = instance_uuid
self.device_name = device_name
self.event_types = event_types
self.threading_event = threading.Event()
self.result: ty.Optional[libvirtevent.DeviceEvent] = None
def matches(self, event: libvirtevent.DeviceEvent) -> bool:
"""Returns true if the event is one of the expected event types
for the given instance and device.
"""
return (
self.instance_uuid == event.uuid and
self.device_name == event.dev and
isinstance(event, tuple(self.event_types)))
def __repr__(self) -> str:
return (
"AsyncDeviceEventsHandler.Waiter("
f"instance_uuid={self.instance_uuid}, "
f"device_name={self.device_name}, "
f"event_types={self.event_types})")
def __init__(self):
self._lock = threading.Lock()
# Ongoing device operations in libvirt where we wait for the events
# about success or failure.
self._waiters: ty.Set[AsyncDeviceEventsHandler.Waiter] = set()
def create_waiter(
self,
instance_uuid: str,
device_name: str,
event_types: ty.Set[ty.Type[libvirtevent.DeviceEvent]]
) -> 'AsyncDeviceEventsHandler.Waiter':
"""Returns an opaque token the caller can use in wait() to
wait for the libvirt event
:param instance_uuid: The UUID of the instance.
:param device_name: The device name alias used by libvirt for this
device.
:param event_type: A set of classes derived from DeviceEvent
specifying which event types the caller waits for. Specifying more
than one event type means waiting for either of the events to be
received.
:returns: an opaque token to be used with wait_for_event().
"""
waiter = AsyncDeviceEventsHandler.Waiter(
instance_uuid, device_name, event_types)
with self._lock:
self._waiters.add(waiter)
return waiter
def delete_waiter(self, token: 'AsyncDeviceEventsHandler.Waiter'):
"""Deletes the waiter
:param token: the opaque token returned by create_waiter() to be
deleted
"""
with self._lock:
self._waiters.remove(token)
def wait(
self, token: 'AsyncDeviceEventsHandler.Waiter', timeout: float,
) -> ty.Optional[libvirtevent.DeviceEvent]:
"""Blocks waiting for the libvirt event represented by the opaque token
:param token: A token created by calling create_waiter()
:param timeout: Maximum number of seconds this call blocks waiting for
the event to be received
:returns: The received libvirt event, or None in case of timeout
"""
token.threading_event.wait(timeout)
with self._lock:
self._waiters.remove(token)
return token.result
def notify_waiters(self, event: libvirtevent.DeviceEvent) -> bool:
"""Unblocks the client waiting for this event.
:param event: the libvirt event that is received
:returns: True if there was a client waiting and False otherwise.
"""
dispatched = False
with self._lock:
for waiter in self._waiters:
if waiter.matches(event):
waiter.result = event
waiter.threading_event.set()
dispatched = True
return dispatched
def cleanup_waiters(self, instance_uuid: str) -> None:
"""Deletes all waiters and unblock all clients related to the specific
instance.
param instance_uuid: The instance UUID for which the cleanup is
requested
"""
with self._lock:
instance_waiters = set()
for waiter in self._waiters:
if waiter.instance_uuid == instance_uuid:
# unblock any waiting thread
waiter.threading_event.set()
instance_waiters.add(waiter)
self._waiters -= instance_waiters
if instance_waiters:
LOG.debug(
'Cleaned up device related libvirt event waiters: %s',
instance_waiters)
class LibvirtDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
# NOTE(aspiers) Some of these are dynamic, so putting
# capabilities on the instance rather than on the class.
# This prevents the risk of one test setting a capability
# which bleeds over into other tests.
# LVM and RBD require raw images. If we are not configured to
# force convert images into raw format, then we _require_ raw
# images only.
raw_only = ('rbd', 'lvm')
requires_raw_image = (CONF.libvirt.images_type in raw_only and
not CONF.force_raw_images)
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
self.capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
"supports_tagged_attach_interface": True,
"supports_tagged_attach_volume": True,
"supports_extend_volume": True,
"supports_multiattach": True,
"supports_trusted_certs": True,
# Supported image types
"supports_image_type_aki": True,
"supports_image_type_ari": True,
"supports_image_type_ami": True,
"supports_image_type_raw": True,
"supports_image_type_iso": True,
# NOTE(danms): Certain backends do not work with complex image
# formats. If we are configured for those backends, then we
# should not expose the corresponding support traits.
"supports_image_type_qcow2": not requires_raw_image,
"supports_image_type_ploop": requires_ploop_image,
"supports_pcpus": True,
"supports_accelerators": True,
"supports_bfv_rescue": True,
"supports_vtpm": CONF.libvirt.swtpm_enabled,
}
super(LibvirtDriver, self).__init__(virtapi)
if not sys.platform.startswith('linux'):
raise exception.InternalError(
_('The libvirt driver only works on Linux'))
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
libvirt_migrate.libvirt = libvirt
self._host = host.Host(self._uri(), read_only,
lifecycle_event_handler=self.emit_event,
conn_event_handler=self._handle_conn_event)
self._supported_perf_events = []
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver(self._host)
# NOTE(lyarwood): Volume drivers are loaded on-demand
self.volume_drivers: ty.Dict[str, volume.LibvirtBaseVolumeDriver] = {}
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in VALID_DISK_CACHEMODES:
LOG.warning('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.',
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = cinder.API()
self._image_api = glance.API()
# The default choice for the sysinfo_serial config option is "unique"
# which does not have a special function since the value is just the
# instance.uuid.
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial, lambda: None)
self.job_tracker = instancejobtracker.InstanceJobTracker()
self._remotefs = remotefs.RemoteFilesystem()
self._live_migration_flags = self._block_migration_flags = 0
self.active_migrations = {}
# Compute reserved hugepages from conf file at the very
# beginning to ensure any syntax error will be reported and
# avoid any re-calculation when computing resources.
self._reserved_hugepages = hardware.numa_get_reserved_huge_pages()
# Copy of the compute service ProviderTree object that is updated
# every time update_provider_tree() is called.
# NOTE(sbauza): We only want a read-only cache, this attribute is not
# intended to be updatable directly
self.provider_tree: provider_tree.ProviderTree = None
# driver traits will not change during the runtime of the agent
# so calcuate them once and save them
self._static_traits = None
# The CPU models in the configuration are case-insensitive, but the CPU
# model in the libvirt is case-sensitive, therefore create a mapping to
# map the lower case CPU model name to normal CPU model name.
self.cpu_models_mapping = {}
self.cpu_model_flag_mapping = {}
self._vpmems_by_name, self._vpmems_by_rc = self._discover_vpmems(
vpmem_conf=CONF.libvirt.pmem_namespaces)
# We default to not support vGPUs unless the configuration is set.
self.pgpu_type_mapping = collections.defaultdict(str)
self.supported_vgpu_types = self._get_supported_vgpu_types()
def _discover_vpmems(self, vpmem_conf=None):
"""Discover vpmems on host and configuration.
:param vpmem_conf: pmem namespaces configuration from CONF
:returns: a dict of vpmem keyed by name, and
a dict of vpmem list keyed by resource class
:raises: exception.InvalidConfiguration if Libvirt or QEMU version
does not meet requirement.
"""
if not vpmem_conf:
return {}, {}
# vpmem keyed by name {name: objects.LibvirtVPMEMDevice,...}
vpmems_by_name: ty.Dict[str, 'objects.LibvirtVPMEMDevice'] = {}
# vpmem list keyed by resource class
# {'RC_0': [objects.LibvirtVPMEMDevice, ...], 'RC_1': [...]}
vpmems_by_rc: ty.Dict[str, ty.List['objects.LibvirtVPMEMDevice']] = (
collections.defaultdict(list)
)
vpmems_host = self._get_vpmems_on_host()
for ns_conf in vpmem_conf:
try:
ns_label, ns_names = ns_conf.split(":", 1)
except ValueError:
reason = _("The configuration doesn't follow the format")
raise exception.PMEMNamespaceConfigInvalid(
reason=reason)
ns_names = ns_names.split("|")
for ns_name in ns_names:
if ns_name not in vpmems_host:
reason = _("The PMEM namespace %s isn't on host") % ns_name
raise exception.PMEMNamespaceConfigInvalid(
reason=reason)
if ns_name in vpmems_by_name:
reason = (_("Duplicated PMEM namespace %s configured") %
ns_name)
raise exception.PMEMNamespaceConfigInvalid(
reason=reason)
pmem_ns_updated = vpmems_host[ns_name]
pmem_ns_updated.label = ns_label
vpmems_by_name[ns_name] = pmem_ns_updated
rc = orc.normalize_name(
"PMEM_NAMESPACE_%s" % ns_label)
vpmems_by_rc[rc].append(pmem_ns_updated)
return vpmems_by_name, vpmems_by_rc
def _get_vpmems_on_host(self):
"""Get PMEM namespaces on host using ndctl utility."""
try:
output = nova.privsep.libvirt.get_pmem_namespaces()
except Exception as e:
reason = _("Get PMEM namespaces by ndctl utility, "
"please ensure ndctl is installed: %s") % e
raise exception.GetPMEMNamespacesFailed(reason=reason)
if not output:
return {}
namespaces = jsonutils.loads(output)
vpmems_host = {} # keyed by namespace name
for ns in namespaces:
# store namespace info parsed from ndctl utility return
if not ns.get('name'):
# The name is used to identify namespaces, it's optional
# config when creating namespace. If an namespace don't have
# name, it can not be used by Nova, we will skip it.
continue
vpmems_host[ns['name']] = objects.LibvirtVPMEMDevice(
name=ns['name'],
devpath= '/dev/' + ns['daxregion']['devices'][0]['chardev'],
size=ns['size'],
align=ns['daxregion']['align'])
return vpmems_host
@property
def disk_cachemode(self):
# It can be confusing to understand the QEMU cache mode
# behaviour, because each cache=$MODE is a convenient shorthand
# to toggle _three_ cache.* booleans. Consult the below table
# (quoting from the QEMU man page):
#
# | cache.writeback | cache.direct | cache.no-flush
# --------------------------------------------------------------
# writeback | on | off | off
# none | on | on | off
# writethrough | off | off | off
# directsync | off | on | off
# unsafe | on | off | on
#
# Where:
#
# - 'cache.writeback=off' means: QEMU adds an automatic fsync()
# after each write request.
#
# - 'cache.direct=on' means: Use Linux's O_DIRECT, i.e. bypass
# the kernel page cache. Caches in any other layer (disk
# cache, QEMU metadata caches, etc.) can still be present.
#
# - 'cache.no-flush=on' means: Ignore flush requests, i.e.
# never call fsync(), even if the guest explicitly requested
# it.
#
# Use cache mode "none" (cache.writeback=on, cache.direct=on,
# cache.no-flush=off) for consistent performance and
# migration correctness. Some filesystems don't support
# O_DIRECT, though. For those we fallback to the next
# reasonable option that is "writeback" (cache.writeback=on,
# cache.direct=off, cache.no-flush=off).
if self._disk_cachemode is None:
self._disk_cachemode = "none"
if not nova.privsep.utils.supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writeback"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
# Shareable disks like for a multi-attach volume need to have the
# driver cache disabled.
if getattr(conf, 'shareable', False):
conf.driver_cache = 'none'
else:
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
# NOTE(acewit): If the [libvirt]disk_cachemodes is set as
# `block=writeback` or `block=writethrough` or `block=unsafe`,
# whose correponding Linux's IO semantic is not O_DIRECT in
# file nova.conf, then it will result in an attachment failure
# because of the libvirt bug
# (https://bugzilla.redhat.com/show_bug.cgi?id=1086704)
if ((getattr(conf, 'driver_io', None) == "native") and
conf.driver_cache not in [None, 'none', 'directsync']):
conf.driver_io = "threads"
LOG.warning("The guest disk driver io mode has fallen back "
"from 'native' to 'threads' because the "
"disk cache mode is set as %(cachemode)s, which does "
"not use O_DIRECT. See the following bug report "
"for more details: https://launchpad.net/bugs/1841363",
{'cachemode': conf.driver_cache})
def _do_quality_warnings(self):
"""Warn about potential configuration issues.
This will log a warning message for things such as untested driver or
host arch configurations in order to indicate potential issues to
administrators.
"""
if CONF.libvirt.virt_type not in ('qemu', 'kvm'):
LOG.warning(
"Support for the '%(type)s' libvirt backend has been "
"deprecated and will be removed in a future release.",
{'type': CONF.libvirt.virt_type},
)
caps = self._host.get_capabilities()
hostarch = caps.host.cpu.arch
if hostarch not in (
fields.Architecture.I686, fields.Architecture.X86_64,
):
LOG.warning(
'The libvirt driver is not tested on %(arch)s by the '
'OpenStack project and thus its quality can not be ensured. '
'For more information, see: https://docs.openstack.org/'
'nova/latest/user/support-matrix.html',
{'arch': hostarch},
)
def _handle_conn_event(self, enabled, reason):
LOG.info("Connection event '%(enabled)d' reason '%(reason)s'",
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def init_host(self, host):
self._host.initialize()
self._update_host_specific_capabilities()
self._check_cpu_set_configuration()
self._do_quality_warnings()
self._parse_migration_flags()
self._supported_perf_events = self._get_supported_perf_events()
self._check_file_backed_memory_support()
self._check_my_ip()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warning("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment.")
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.InternalError(
_('Nova requires libvirt version %s or greater.') %
libvirt_utils.version_to_string(MIN_LIBVIRT_VERSION))
if CONF.libvirt.virt_type in ("qemu", "kvm"):
if not self._host.has_min_version(hv_ver=MIN_QEMU_VERSION):
raise exception.InternalError(
_('Nova requires QEMU version %s or greater.') %
libvirt_utils.version_to_string(MIN_QEMU_VERSION))
if CONF.libvirt.virt_type == 'parallels':
if not self._host.has_min_version(hv_ver=MIN_VIRTUOZZO_VERSION):
raise exception.InternalError(
_('Nova requires Virtuozzo version %s or greater.') %
libvirt_utils.version_to_string(MIN_VIRTUOZZO_VERSION))
# Give the cloud admin a heads up if we are intending to
# change the MIN_LIBVIRT_VERSION in the next release.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
LOG.warning('Running Nova with a libvirt version less than '
'%(version)s is deprecated. The required minimum '
'version of libvirt will be raised to %(version)s '
'in the next release.',
{'version': libvirt_utils.version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
LOG.warning('Running Nova with a QEMU version less than '
'%(version)s is deprecated. The required minimum '
'version of QEMU will be raised to %(version)s '
'in the next release.',
{'version': libvirt_utils.version_to_string(
NEXT_MIN_QEMU_VERSION)})
# Allowing both "tunnelling via libvirtd" (which will be
# deprecated once the MIN_{LIBVIRT,QEMU}_VERSION is sufficiently
# new enough) and "native TLS" options at the same time is
# nonsensical.
if (CONF.libvirt.live_migration_tunnelled and
CONF.libvirt.live_migration_with_native_tls):
msg = _("Setting both 'live_migration_tunnelled' and "
"'live_migration_with_native_tls' at the same "
"time is invalid. If you have the relevant "
"libvirt and QEMU versions, and TLS configured "
"in your environment, pick "
"'live_migration_with_native_tls'.")
raise exception.Invalid(msg)
# Some imagebackends are only able to import raw disk images,
# and will fail if given any other format. See the bug
# https://bugs.launchpad.net/nova/+bug/1816686 for more details.
if CONF.libvirt.images_type in ('rbd',):
if not CONF.force_raw_images:
msg = _("'[DEFAULT]/force_raw_images = False' is not "
"allowed with '[libvirt]/images_type = rbd'. "
"Please check the two configs and if you really "
"do want to use rbd as images_type, set "
"force_raw_images to True.")
raise exception.InvalidConfiguration(msg)
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
self._recreate_assigned_mediated_devices()
self._check_cpu_compatibility()
self._check_vtpm_support()
self._register_instance_machine_type()
def _update_host_specific_capabilities(self) -> None:
"""Update driver capabilities based on capabilities of the host."""
# TODO(stephenfin): We should also be reporting e.g. SEV functionality
# or UEFI bootloader support in this manner
self.capabilities.update({
'supports_secure_boot': self._host.supports_secure_boot,
})
def _register_instance_machine_type(self):
"""Register the machine type of instances on this host
For each instance found on this host by InstanceList.get_by_host ensure
a machine type is registered within the system metadata of the instance
"""
context = nova_context.get_admin_context()
hostname = self._host.get_hostname()
for instance in objects.InstanceList.get_by_host(context, hostname):
# NOTE(lyarwood): Skip if hw_machine_type is set already in the
# image_meta of the instance. Note that this value comes from the
# system metadata of the instance where it is stored under the
# image_hw_machine_type key.
if instance.image_meta.properties.get('hw_machine_type'):
continue
# Fetch and record the machine type from the config
hw_machine_type = libvirt_utils.get_machine_type(
instance.image_meta)
# NOTE(lyarwood): As above this updates
# image_meta.properties.hw_machine_type within the instance and
# will be returned the next time libvirt_utils.get_machine_type is
# called for the instance image meta.
instance.system_metadata['image_hw_machine_type'] = hw_machine_type
instance.save()
LOG.debug("Instance machine_type updated to %s", hw_machine_type,
instance=instance)
def _prepare_cpu_flag(self, flag):
# NOTE(kchamart) This helper method will be used while computing
# guest CPU compatibility. It will take into account a
# comma-separated list of CPU flags from
# `[libvirt]cpu_model_extra_flags`. If the CPU flag starts
# with '+', it is enabled for the guest; if it starts with '-',
# it is disabled. If neither '+' nor '-' is specified, the CPU
# flag is enabled.
if flag.startswith('-'):
flag = flag.lstrip('-')
policy_value = 'disable'
else:
flag = flag.lstrip('+')
policy_value = 'require'
cpu_feature = vconfig.LibvirtConfigGuestCPUFeature(
flag, policy=policy_value)
return cpu_feature
def _check_cpu_compatibility(self):
mode = CONF.libvirt.cpu_mode
models = CONF.libvirt.cpu_models
if (CONF.libvirt.virt_type not in ("kvm", "qemu") and
mode not in (None, 'none')):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode != "custom":
if not models:
return
msg = _("The cpu_models option is not required when "
"cpu_mode!=custom")
raise exception.Invalid(msg)
if not models:
msg = _("The cpu_models option is required when cpu_mode=custom")
raise exception.Invalid(msg)
cpu = vconfig.LibvirtConfigGuestCPU()
for model in models:
cpu.model = self._get_cpu_model_mapping(model)
try:
self._compare_cpu(cpu, self._get_cpu_info(), None)
except exception.InvalidCPUInfo as e:
msg = (_("Configured CPU model: %(model)s is not "
"compatible with host CPU. Please correct your "
"config and try again. %(e)s") % {
'model': model, 'e': e})
raise exception.InvalidCPUInfo(msg)
# Use guest CPU model to check the compatibility between guest CPU and
# configured extra_flags
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = self._host.get_capabilities().host.cpu.model
for flag in set(x.lower() for x in CONF.libvirt.cpu_model_extra_flags):
cpu_feature = self._prepare_cpu_flag(flag)
cpu.add_feature(cpu_feature)
try:
self._compare_cpu(cpu, self._get_cpu_info(), None)
except exception.InvalidCPUInfo as e:
msg = (_("Configured extra flag: %(flag)s it not correct, or "
"the host CPU does not support this flag. Please "
"correct the config and try again. %(e)s") % {
'flag': flag, 'e': e})
raise exception.InvalidCPUInfo(msg)
def _check_vtpm_support(self) -> None:
# TODO(efried): A key manager must be configured to create/retrieve
# secrets. Is there a way to check that one is set up correctly?
# CONF.key_manager.backend is optional :(
if not CONF.libvirt.swtpm_enabled:
return
if CONF.libvirt.virt_type not in ('qemu', 'kvm'):
msg = _(
"vTPM support requires '[libvirt] virt_type' of 'qemu' or "
"'kvm'; found '%s'.")
raise exception.InvalidConfiguration(msg % CONF.libvirt.virt_type)
# These executables need to be installed for libvirt to make use of
# emulated TPM.
# NOTE(stephenfin): This checks using the PATH of the user running
# nova-compute rather than the libvirtd service, meaning it's an
# imperfect check but the best we can do
if not any(shutil.which(cmd) for cmd in ('swtpm_setup', 'swtpm')):
msg = _(
"vTPM support is configured but the 'swtpm' and "
"'swtpm_setup' binaries could not be found on PATH.")
raise exception.InvalidConfiguration(msg)
# The user and group must be valid on this host for cold migration and
# resize to function.
try:
pwd.getpwnam(CONF.libvirt.swtpm_user)
except KeyError:
msg = _(
"The user configured in '[libvirt] swtpm_user' does not exist "
"on this host; expected '%s'.")
raise exception.InvalidConfiguration(msg % CONF.libvirt.swtpm_user)
try:
grp.getgrnam(CONF.libvirt.swtpm_group)
except KeyError:
msg = _(
"The group configured in '[libvirt] swtpm_group' does not "
"exist on this host; expected '%s'.")
raise exception.InvalidConfiguration(
msg % CONF.libvirt.swtpm_group)
LOG.debug('Enabling emulated TPM support')
@staticmethod
def _is_existing_mdev(uuid):
# FIXME(sbauza): Some kernel can have a uevent race meaning that the
# libvirt daemon won't know when a mediated device is created unless
# you restart that daemon. Until all kernels we support are not having
# that possible race, check the sysfs directly instead of asking the
# libvirt API.
# See https://bugzilla.redhat.com/show_bug.cgi?id=1376907 for ref.
return os.path.exists('/sys/bus/mdev/devices/{0}'.format(uuid))
def _recreate_assigned_mediated_devices(self):
"""Recreate assigned mdevs that could have disappeared if we reboot
the host.
"""
# NOTE(sbauza): This method just calls sysfs to recreate mediated
# devices by looking up existing guest XMLs and doesn't use
# the Placement API so it works with or without a vGPU reshape.
mdevs = self._get_all_assigned_mediated_devices()
for (mdev_uuid, instance_uuid) in mdevs.items():
if not self._is_existing_mdev(mdev_uuid):
dev_name = libvirt_utils.mdev_uuid2name(mdev_uuid)
dev_info = self._get_mediated_device_information(dev_name)
parent = dev_info['parent']
parent_type = self._get_vgpu_type_per_pgpu(parent)
if dev_info['type'] != parent_type:
# NOTE(sbauza): The mdev was created by using a different
# vGPU type. We can't recreate the mdev until the operator
# modifies the configuration.
parent = "{}:{}:{}.{}".format(*parent[4:].split('_'))
msg = ("The instance UUID %(inst)s uses a VGPU that "
"its parent pGPU %(parent)s no longer "
"supports as the instance vGPU type %(type)s "
"is not accepted for the pGPU. Please correct "
"the configuration accordingly." %
{'inst': instance_uuid,
'parent': parent,
'type': dev_info['type']})
raise exception.InvalidLibvirtGPUConfig(reason=msg)
self._create_new_mediated_device(parent, uuid=mdev_uuid)
def _check_file_backed_memory_support(self):
if not CONF.libvirt.file_backed_memory:
return
# file_backed_memory is only compatible with qemu/kvm virts
if CONF.libvirt.virt_type not in ("qemu", "kvm"):
raise exception.InternalError(
_('Running Nova with file_backed_memory and virt_type '
'%(type)s is not supported. file_backed_memory is only '
'supported with qemu and kvm types.') %
{'type': CONF.libvirt.virt_type})
# file-backed memory doesn't work with memory overcommit.
# Block service startup if file-backed memory is enabled and
# ram_allocation_ratio is not 1.0
if CONF.ram_allocation_ratio != 1.0:
raise exception.InternalError(
'Running Nova with file_backed_memory requires '
'ram_allocation_ratio configured to 1.0')
if CONF.reserved_host_memory_mb:
# this is a hard failure as placement won't allow total < reserved
if CONF.reserved_host_memory_mb >= CONF.libvirt.file_backed_memory:
msg = _(
"'[libvirt] file_backed_memory', which represents total "
"memory reported to placement, must be greater than "
"reserved memory configured via '[DEFAULT] "
"reserved_host_memory_mb'"
)
raise exception.InternalError(msg)
# TODO(stephenfin): Change this to an exception in W or later
LOG.warning(
"Reserving memory via '[DEFAULT] reserved_host_memory_mb' "
"is not compatible with file-backed memory. Consider "
"setting '[DEFAULT] reserved_host_memory_mb' to 0. This will "
"be an error in a future release."
)
def _check_my_ip(self):
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warning('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s',
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
def _check_cpu_set_configuration(self):
# evaluate these now to force a quick fail if they're invalid
vcpu_pin_set = hardware.get_vcpu_pin_set() or set()
cpu_shared_set = hardware.get_cpu_shared_set() or set()
cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
# TODO(stephenfin): Remove this in U once we remove the 'vcpu_pin_set'
# option
if not vcpu_pin_set:
if not (cpu_shared_set or cpu_dedicated_set):
return
if not cpu_dedicated_set.isdisjoint(cpu_shared_set):
msg = _(
"The '[compute] cpu_dedicated_set' and '[compute] "
"cpu_shared_set' configuration options must be "
"disjoint.")
raise exception.InvalidConfiguration(msg)
if CONF.reserved_host_cpus:
msg = _(
"The 'reserved_host_cpus' config option cannot be defined "
"alongside the '[compute] cpu_shared_set' or '[compute] "
"cpu_dedicated_set' options. Unset 'reserved_host_cpus'.")
raise exception.InvalidConfiguration(msg)
return
if cpu_dedicated_set:
# NOTE(stephenfin): This is a new option in Train so it can be
# an error
msg = _(
"The 'vcpu_pin_set' config option has been deprecated and "
"cannot be defined alongside '[compute] cpu_dedicated_set'. "
"Unset 'vcpu_pin_set'.")
raise exception.InvalidConfiguration(msg)
if cpu_shared_set:
LOG.warning(
"The '[compute] cpu_shared_set' and 'vcpu_pin_set' config "
"options have both been defined. While 'vcpu_pin_set' is "
"defined, it will continue to be used to configure the "
"specific host CPUs used for 'VCPU' inventory, while "
"'[compute] cpu_shared_set' will only be used for guest "
"emulator threads when 'hw:emulator_threads_policy=shared' "
"is defined in the flavor. This is legacy behavior and will "
"not be supported in a future release. "
"If you wish to define specific host CPUs to be used for "
"'VCPU' or 'PCPU' inventory, you must migrate the "
"'vcpu_pin_set' config option value to '[compute] "
"cpu_shared_set' and '[compute] cpu_dedicated_set', "
"respectively, and undefine 'vcpu_pin_set'.")
else:
LOG.warning(
"The 'vcpu_pin_set' config option has been deprecated and "
"will be removed in a future release. When defined, "
"'vcpu_pin_set' will be used to calculate 'VCPU' inventory "
"and schedule instances that have 'VCPU' allocations. "
"If you wish to define specific host CPUs to be used for "
"'VCPU' or 'PCPU' inventory, you must migrate the "
"'vcpu_pin_set' config option value to '[compute] "
"cpu_shared_set' and '[compute] cpu_dedicated_set', "
"respectively, and undefine 'vcpu_pin_set'.")
def _prepare_migration_flags(self):
migration_flags = 0
migration_flags |= libvirt.VIR_MIGRATE_LIVE
# Enable support for p2p migrations
migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER
# Adding VIR_MIGRATE_UNDEFINE_SOURCE because, without it, migrated
# instance will remain defined on the source host
migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
# Adding VIR_MIGRATE_PERSIST_DEST to persist the VM on the
# destination host
migration_flags |= libvirt.VIR_MIGRATE_PERSIST_DEST
live_migration_flags = block_migration_flags = migration_flags
# Adding VIR_MIGRATE_NON_SHARED_INC, otherwise all block-migrations
# will be live-migrations instead
block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC
return (live_migration_flags, block_migration_flags)
# TODO(kchamart) Once the MIN_LIBVIRT_VERSION and MIN_QEMU_VERSION
# reach 4.4.0 and 2.11.0, which provide "native TLS" support by
# default, deprecate and remove the support for "tunnelled live
# migration" (and related config attribute), because:
#
# (a) it cannot handle live migration of disks in a non-shared
# storage setup (a.k.a. "block migration");
#
# (b) has a huge performance overhead and latency, because it burns
# more CPU and memory bandwidth due to increased number of data
# copies on both source and destination hosts.
#
# Both the above limitations are addressed by the QEMU-native TLS
# support (`live_migration_with_native_tls`).
def _handle_live_migration_tunnelled(self, migration_flags):
if CONF.libvirt.live_migration_tunnelled:
migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED
return migration_flags
def _handle_native_tls(self, migration_flags):
if (CONF.libvirt.live_migration_with_native_tls):
migration_flags |= libvirt.VIR_MIGRATE_TLS
return migration_flags
def _handle_live_migration_post_copy(self, migration_flags):
if CONF.libvirt.live_migration_permit_post_copy:
migration_flags |= libvirt.VIR_MIGRATE_POSTCOPY
return migration_flags
def _handle_live_migration_auto_converge(self, migration_flags):
if self._is_post_copy_enabled(migration_flags):
LOG.info('The live_migration_permit_post_copy is set to '
'True and post copy live migration is available '
'so auto-converge will not be in use.')
elif CONF.libvirt.live_migration_permit_auto_converge:
migration_flags |= libvirt.VIR_MIGRATE_AUTO_CONVERGE
return migration_flags
def _parse_migration_flags(self):
(live_migration_flags,
block_migration_flags) = self._prepare_migration_flags()
live_migration_flags = self._handle_live_migration_tunnelled(
live_migration_flags)
block_migration_flags = self._handle_live_migration_tunnelled(
block_migration_flags)
live_migration_flags = self._handle_native_tls(
live_migration_flags)
block_migration_flags = self._handle_native_tls(
block_migration_flags)
live_migration_flags = self._handle_live_migration_post_copy(
live_migration_flags)
block_migration_flags = self._handle_live_migration_post_copy(
block_migration_flags)
live_migration_flags = self._handle_live_migration_auto_converge(
live_migration_flags)
block_migration_flags = self._handle_live_migration_auto_converge(
block_migration_flags)
self._live_migration_flags = live_migration_flags
self._block_migration_flags = block_migration_flags
# TODO(sahid): This method is targeted for removal when the tests
# have been updated to avoid its use
#
# All libvirt API calls on the libvirt.Connect object should be
# encapsulated by methods on the nova.virt.libvirt.host.Host
# object, rather than directly invoking the libvirt APIs. The goal
# is to avoid a direct dependency on the libvirt API from the
# driver.py file.
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
@staticmethod
def _uri():
if CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
elif CONF.libvirt.virt_type == 'parallels':
uri = CONF.libvirt.connection_uri or 'parallels:///system'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
@staticmethod
def _live_migration_uri(dest):
uris = {
'kvm': 'qemu+%(scheme)s://%(dest)s/system',
'qemu': 'qemu+%(scheme)s://%(dest)s/system',
'parallels': 'parallels+tcp://%(dest)s/system',
}
dest = oslo_netutils.escape_ipv6(dest)
virt_type = CONF.libvirt.virt_type
# TODO(pkoniszewski): Remove fetching live_migration_uri in Pike
uri = CONF.libvirt.live_migration_uri
if uri:
return uri % dest
uri = uris.get(virt_type)
if uri is None:
raise exception.LiveMigrationURINotAvailable(virt_type=virt_type)
str_format = {
'dest': dest,
'scheme': CONF.libvirt.live_migration_scheme or 'tcp',
}
return uri % str_format
@staticmethod
def _migrate_uri(dest):
uri = None
dest = oslo_netutils.escape_ipv6(dest)
# Only QEMU live migrations supports migrate-uri parameter
virt_type = CONF.libvirt.virt_type
if virt_type in ('qemu', 'kvm'):
# QEMU accept two schemes: tcp and rdma. By default
# libvirt build the URI using the remote hostname and the
# tcp schema.
uri = 'tcp://%s' % dest
# Because dest might be of type unicode, here we might return value of
# type unicode as well which is not acceptable by libvirt python
# binding when Python 2.7 is in use, so let's convert it explicitly
# back to string. When Python 3.x is in use, libvirt python binding
# accepts unicode type so it is completely fine to do a no-op str(uri)
# conversion which will return value of type unicode.
return uri and str(uri)
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._host.get_guest(instance)
return True
except (exception.InternalError, exception.InstanceNotFound):
return False
def list_instances(self):
names = []
for guest in self._host.list_guests(only_running=False):
names.append(guest.name)
return names
def list_instance_uuids(self):
uuids = []
for guest in self._host.list_guests(only_running=False):
uuids.append(guest.uuid)
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
LOG.debug('Attempting to teardown container at path %(dir)s with '
'root device: %(rootfs_dev)s',
{'dir': container_dir, 'rootfs_dev': rootfs_dev},
instance=instance)
disk_api.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance):
try:
guest = self._host.get_guest(instance)
if CONF.serial_console.enabled:
# This method is called for several events: destroy,
# rebuild, hard-reboot, power-off - For all of these
# events we want to release the serial ports acquired
# for the guest before destroying it.
serials = self._get_serial_ports_from_guest(guest)
for hostname, port in serials:
serial_console.release_port(host=hostname, port=port)
except exception.InstanceNotFound:
guest = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if guest is not None:
try:
old_domid = guest.id
guest.poweroff()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
# Domain already gone. This can safely be ignored.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = guest.get_power_state(self._host)
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
errmsg = e.get_error_message()
if (CONF.libvirt.virt_type == 'lxc' and
errmsg == 'internal error: '
'Some processes refused to die'):
# Some processes in the container didn't die
# fast enough for libvirt. The container will
# eventually die. For now, move on and let
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warning("Cannot destroy instance, operation time out",
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
elif errcode == libvirt.VIR_ERR_SYSTEM_ERROR:
with excutils.save_and_reraise_exception():
LOG.warning("Cannot destroy instance, general system "
"call failure", instance=instance)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s',
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.internal_id
except exception.InstanceNotFound:
LOG.debug("During wait destroy, instance disappeared.",
instance=instance)
state = power_state.SHUTDOWN
if state == power_state.SHUTDOWN:
LOG.info("Instance destroyed successfully.", instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be an endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info("Instance may be started again.", instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info("Going to destroy instance again.", instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks)
def _undefine_domain(self, instance):
try:
guest = self._host.get_guest(instance)
try:
hw_firmware_type = instance.image_meta.properties.get(
'hw_firmware_type')
support_uefi = self._check_uefi_support(hw_firmware_type)
guest.delete_configuration(support_uefi)
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception() as ctxt:
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug("Called undefine, but domain already gone.",
instance=instance)
ctxt.reraise = False
else:
LOG.error('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s',
{'errcode': errcode,
'e': encodeutils.exception_to_unicode(e)},
instance=instance)
except exception.InstanceNotFound:
pass
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup the instance from the host.
Identify if the instance disks and instance path should be removed
from the host before calling down into the _cleanup method for the
actual removal of resources from the host.
:param context: security context
:param instance: instance object for the instance being cleaned up
:param network_info: instance network information
:param block_device_info: optional instance block device information
:param destroy_disks: if local ephemeral disks should be destroyed
:param migrate_data: optional migrate_data object
:param destroy_vifs: if plugged vifs should be unplugged
"""
cleanup_instance_dir = False
cleanup_instance_disks = False
# We assume destroy_disks means destroy instance directory and disks
if destroy_disks:
cleanup_instance_dir = True
cleanup_instance_disks = True
else:
# NOTE(mdbooth): I think the theory here was that if this is a
# migration with shared block storage then we need to delete the
# instance directory because that's not shared. I'm pretty sure
# this is wrong.
if migrate_data and 'is_shared_block_storage' in migrate_data:
cleanup_instance_dir = migrate_data.is_shared_block_storage
# NOTE(lyarwood): The following workaround allows operators to
# ensure that non-shared instance directories are removed after an
# evacuation or revert resize when using the shared RBD
# imagebackend. This workaround is not required when cleaning up
# migrations that provide migrate_data to this method as the
# existing is_shared_block_storage conditional will cause the
# instance directory to be removed.
if not cleanup_instance_dir:
if CONF.workarounds.ensure_libvirt_rbd_instance_dir_cleanup:
cleanup_instance_dir = CONF.libvirt.images_type == 'rbd'
return self._cleanup(
context, instance, network_info,
block_device_info=block_device_info,
destroy_vifs=destroy_vifs,
cleanup_instance_dir=cleanup_instance_dir,
cleanup_instance_disks=cleanup_instance_disks)
def _cleanup(self, context, instance, network_info, block_device_info=None,
destroy_vifs=True, cleanup_instance_dir=False,
cleanup_instance_disks=False):
"""Cleanup the domain and any attached resources from the host.
This method cleans up any pmem devices, unplugs VIFs, disconnects
attached volumes and undefines the instance domain within libvirt.
It also optionally removes the ephemeral disks and the instance
directory from the host depending on the cleanup_instance_dir|disks
kwargs provided.
:param context: security context
:param instance: instance object for the instance being cleaned up
:param network_info: instance network information
:param block_device_info: optional instance block device information
:param destroy_vifs: if plugged vifs should be unplugged
:param cleanup_instance_dir: If the instance dir should be removed
:param cleanup_instance_disks: If the instance disks should be removed
"""
# zero the data on backend pmem device
vpmems = self._get_vpmems(instance)
if vpmems:
self._cleanup_vpmems(vpmems)
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
# FIXME(wangpan): if the instance is booted again here, such as the
# soft reboot operation boot it here, it will become
# "running deleted", should we check and destroy it
# at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
if not connection_info:
# if booting from a volume, creation could have failed meaning
# this would be unset
continue
try:
self._disconnect_volume(context, connection_info, instance)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if cleanup_instance_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warning(
"Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s",
{'vol_id': vol.get('volume_id'),
'exc': encodeutils.exception_to_unicode(exc)},
instance=instance)
if cleanup_instance_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance, block_device_info)
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
if cleanup_instance_dir:
attempts = int(instance.system_metadata.get('clean_attempts',
'0'))
success = self.delete_instance_files(instance)
# NOTE(mriedem): This is used in the _run_pending_deletes periodic
# task in the compute manager. The tight coupling is not great...
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
try:
instance.save()
except exception.InstanceNotFound:
pass
if cleanup_instance_disks:
crypto.delete_vtpm_secret(context, instance)
self._undefine_domain(instance)
def cleanup_lingering_instance_resources(self, instance):
# zero the data on backend pmem device, if fails
# it will raise an exception
vpmems = self._get_vpmems(instance)
if vpmems:
self._cleanup_vpmems(vpmems)
def _cleanup_vpmems(self, vpmems):
for vpmem in vpmems:
try:
nova.privsep.libvirt.cleanup_vpmem(vpmem.devpath)
except Exception as e:
raise exception.VPMEMCleanupFailed(dev=vpmem.devpath,
error=e)
def _get_serial_ports_from_guest(self, guest, mode=None):
"""Returns an iterator over serial port(s) configured on guest.
:param mode: Should be a value in (None, bind, connect)
"""
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
# The 'serial' device is the base for x86 platforms. Other platforms
# (e.g. kvm on system z = S390X) can only use 'console' devices.
xpath_mode = "[@mode='%s']" % mode if mode else ""
serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode
tcp_devices = tree.findall(serial_tcp)
if len(tcp_devices) == 0:
tcp_devices = tree.findall(console_tcp)
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
def _get_scsi_controller_next_unit(self, guest):
"""Returns the max disk unit used by scsi controller"""
xml = guest.get_xml_desc()
tree = etree.fromstring(xml)
addrs = "./devices/disk[target/@bus='scsi']/address[@type='drive']"
ret = []
for obj in tree.xpath(addrs):
ret.append(int(obj.get('unit', 0)))
return max(ret) + 1 if ret else 0
def _cleanup_rbd(self, instance):
# NOTE(nic): On revert_resize, the cleanup steps for the root
# volume are handled with an "rbd snap rollback" command,
# and none of this is needed (and is, in fact, harmful) so
# filter out non-ephemerals from the list
if instance.task_state == task_states.RESIZE_REVERTING:
filter_fn = lambda disk: (disk.startswith(instance.uuid) and
disk.endswith('disk.local'))
else:
filter_fn = lambda disk: disk.startswith(instance.uuid)
rbd_utils.RBDDriver().cleanup_volumes(filter_fn)
def _cleanup_lvm(self, instance, block_device_info):
"""Delete all LVM disks for given instance object."""
if instance.get('ephemeral_key_uuid') is not None:
# detach encrypted volumes
disks = self._get_instance_disk_info(instance, block_device_info)
for disk in disks:
if dmcrypt.is_encrypted(disk['path']):
dmcrypt.delete_volume(disk['path'])
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance.uuid
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disks = [fullpath(disk) for disk in logical_volumes
if belongs_to_instance(disk)]
return disks
return []
def get_volume_connector(self, instance):
root_helper = utils.get_root_helper()
return connector.get_connector_properties(
root_helper, CONF.my_block_storage_ip,
CONF.libvirt.volume_use_multipath,
enforce_multipath=True,
host=CONF.host)
def _cleanup_resize_vtpm(
self,
context: nova_context.RequestContext,
instance: 'objects.Instance',
) -> None:
"""Handle vTPM when confirming a migration or resize.
If the old flavor have vTPM and the new one doesn't, there are keys to
be deleted.
"""
old_vtpm_config = hardware.get_vtpm_constraint(
instance.old_flavor, instance.image_meta)
new_vtpm_config = hardware.get_vtpm_constraint(
instance.new_flavor, instance.image_meta)
if old_vtpm_config and not new_vtpm_config:
# the instance no longer cares for its vTPM so delete the related
# secret; the deletion of the instance directory and undefining of
# the domain will take care of the TPM files themselves
LOG.info('New flavor no longer requests vTPM; deleting secret.')
crypto.delete_vtpm_secret(context, instance)
# TODO(stephenfin): Fold this back into its only caller, cleanup_resize
def _cleanup_resize(self, context, instance, network_info):
inst_base = libvirt_utils.get_instance_path(instance)
target = inst_base + '_resize'
# zero the data on backend old pmem device
vpmems = self._get_vpmems(instance, prefix='old')
if vpmems:
self._cleanup_vpmems(vpmems)
# Remove any old vTPM data, if necessary
self._cleanup_resize_vtpm(context, instance)
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
attempts = 0
while(os.path.exists(target) and attempts < 5):
shutil.rmtree(target, ignore_errors=True)
if os.path.exists(target):
time.sleep(random.randint(20, 200) / 100.0)
attempts += 1
# NOTE(mriedem): Some image backends will recreate the instance path
# and disk.info during init, and all we need the root disk for
# here is removing cloned snapshots which is backend-specific, so
# check that first before initializing the image backend object. If
# there is ever an image type that supports clone *and* re-creates
# the instance directory and disk.info on init, this condition will
# need to be re-visited to make sure that backend doesn't re-create
# the disk. Refer to bugs: 1666831 1728603 1769131
if self.image_backend.backend(CONF.libvirt.images_type).SUPPORTS_CLONE:
root_disk = self.image_backend.by_name(instance, 'disk')
if root_disk.exists():
root_disk.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
if instance.host != CONF.host:
self._undefine_domain(instance)
# TODO(sean-k-mooney): remove this call to unplug_vifs after
# Wallaby is released. VIFs are now unplugged in resize_instance.
try:
self.unplug_vifs(instance, network_info)
except exception.InternalError as e:
LOG.debug(e, instance=instance)
def _get_volume_driver(
self, connection_info: ty.Dict[str, ty.Any]
) -> 'volume.LibvirtBaseVolumeDriver':
"""Fetch the nova.virt.libvirt.volume driver
Based on the provided connection_info return a nova.virt.libvirt.volume
driver. This will call out to os-brick to construct an connector and
check if the connector is valid on the underlying host.
:param connection_info: The connection_info associated with the volume
:raises: VolumeDriverNotFound if no driver is found or if the host
doesn't support the requested driver. This retains legacy behaviour
when only supported drivers were loaded on startup leading to a
VolumeDriverNotFound being raised later if an invalid driver was
requested.
"""
driver_type = connection_info.get('driver_volume_type')
# If the driver_type isn't listed in the supported type list fail
if driver_type not in VOLUME_DRIVERS:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
# Return the cached driver
if driver_type in self.volume_drivers:
return self.volume_drivers.get(driver_type)
@utils.synchronized('cache_volume_driver')
def _cache_volume_driver(driver_type):
# Check if another request cached the driver while we waited
if driver_type in self.volume_drivers:
return self.volume_drivers.get(driver_type)
try:
driver_class = importutils.import_class(
VOLUME_DRIVERS.get(driver_type))
self.volume_drivers[driver_type] = driver_class(self._host)
return self.volume_drivers.get(driver_type)
except brick_exception.InvalidConnectorProtocol:
LOG.debug('Unable to load volume driver %s. It is not '
'supported on this host.', driver_type)
# NOTE(lyarwood): This exception is a subclass of
# VolumeDriverNotFound to ensure no callers have to change
# their error handling code after the move to on-demand loading
# of the volume drivers and associated os-brick connectors.
raise exception.VolumeDriverNotSupported(
volume_driver=VOLUME_DRIVERS.get(driver_type))
# Cache the volume driver if it hasn't already been
return _cache_volume_driver(driver_type)
def _connect_volume(self, context, connection_info, instance,
encryption=None):
vol_driver = self._get_volume_driver(connection_info)
vol_driver.connect_volume(connection_info, instance)
try:
self._attach_encryptor(context, connection_info, encryption)
except Exception:
# Encryption failed so rollback the volume connection.
with excutils.save_and_reraise_exception(logger=LOG):
LOG.exception("Failure attaching encryptor; rolling back "
"volume connection", instance=instance)
vol_driver.disconnect_volume(connection_info, instance)
def _should_disconnect_target(self, context, instance, multiattach,
vol_driver, volume_id):
# NOTE(jdg): Multiattach is a special case (not to be confused
# with shared_targets). With multiattach we may have a single volume
# attached multiple times to *this* compute node (ie Server-1 and
# Server-2). So, if we receive a call to delete the attachment for
# Server-1 we need to take special care to make sure that the Volume
# isn't also attached to another Server on this Node. Otherwise we
# will indiscriminantly delete the connection for all Server and that's
# no good. So check if it's attached multiple times on this node
# if it is we skip the call to brick to delete the connection.
if not multiattach:
return True
# NOTE(deiter): Volume drivers using _HostMountStateManager are another
# special case. _HostMountStateManager ensures that the compute node
# only attempts to mount a single mountpoint in use by multiple
# attachments once, and that it is not unmounted until it is no longer
# in use by any attachments. So we can skip the multiattach check for
# volume drivers that based on LibvirtMountedFileSystemVolumeDriver.
if isinstance(vol_driver, fs.LibvirtMountedFileSystemVolumeDriver):
return True
connection_count = 0
volume = self._volume_api.get(context, volume_id)
attachments = volume.get('attachments', {})
if len(attachments) > 1:
# First we get a list of all Server UUID's associated with
# this Host (Compute Node). We're going to use this to
# determine if the Volume being detached is also in-use by
# another Server on this Host, ie just check to see if more
# than one attachment.server_id for this volume is in our
# list of Server UUID's for this Host
servers_this_host = objects.InstanceList.get_uuids_by_host(
context, instance.host)
# NOTE(jdg): nova.volume.cinder translates the
# volume['attachments'] response into a dict which includes
# the Server UUID as the key, so we're using that
# here to check against our server_this_host list
for server_id, data in attachments.items():
if server_id in servers_this_host:
connection_count += 1
return (False if connection_count > 1 else True)
def _disconnect_volume(self, context, connection_info, instance,
encryption=None):
self._detach_encryptor(context, connection_info, encryption=encryption)
vol_driver = self._get_volume_driver(connection_info)
volume_id = driver_block_device.get_volume_id(connection_info)
multiattach = connection_info.get('multiattach', False)
if self._should_disconnect_target(
context, instance, multiattach, vol_driver, volume_id):
vol_driver.disconnect_volume(connection_info, instance)
else:
LOG.info('Detected multiple connections on this host for '
'volume: %(volume)s, skipping target disconnect.',
{'volume': volume_id})
def _extend_volume(self, connection_info, instance, requested_size):
vol_driver = self._get_volume_driver(connection_info)
return vol_driver.extend_volume(connection_info, instance,
requested_size)
def _allow_native_luksv1(self, encryption=None):
"""Check if QEMU's native LUKSv1 decryption should be used.
"""
# NOTE(lyarwood): Native LUKSv1 decryption can be disabled via a
# workarounds configurable in order to aviod known performance issues
# with the libgcrypt lib.
if CONF.workarounds.disable_native_luksv1:
return False
# NOTE(lyarwood): Ensure the LUKSv1 provider is used.
provider = None
if encryption:
provider = encryption.get('provider', None)
if provider in encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP:
provider = encryptors.LEGACY_PROVIDER_CLASS_TO_FORMAT_MAP[provider]
return provider == encryptors.LUKS
def _get_volume_config(self, connection_info, disk_info):
vol_driver = self._get_volume_driver(connection_info)
conf = vol_driver.get_config(connection_info, disk_info)
self._set_cache_mode(conf)
return conf
def _get_volume_encryptor(self, connection_info, encryption):
root_helper = utils.get_root_helper()
return encryptors.get_volume_encryptor(root_helper=root_helper,
keymgr=key_manager.API(CONF),
connection_info=connection_info,
**encryption)
def _get_volume_encryption(self, context, connection_info):
"""Get the encryption metadata dict if it is not provided
"""
encryption = {}
volume_id = driver_block_device.get_volume_id(connection_info)
if volume_id:
encryption = encryptors.get_encryption_metadata(context,
self._volume_api, volume_id, connection_info)
return encryption
def _attach_encryptor(self, context, connection_info, encryption):
"""Attach the frontend encryptor if one is required by the volume.
The request context is only used when an encryption metadata dict is
not provided. The encryption metadata dict being populated is then used
to determine if an attempt to attach the encryptor should be made.
"""
# NOTE(lyarwood): Skip any attempt to fetch encryption metadata or the
# actual passphrase from the key manager if a libvirt secert already
# exists locally for the volume. This suggests that the instance was
# only powered off or the underlying host rebooted.
volume_id = driver_block_device.get_volume_id(connection_info)
if self._host.find_secret('volume', volume_id):
LOG.debug("A libvirt secret for volume %s has been found on the "
"host, skipping any attempt to create another or attach "
"an os-brick encryptor.", volume_id)
return
if encryption is None:
encryption = self._get_volume_encryption(context, connection_info)
if encryption and self._allow_native_luksv1(encryption=encryption):
# NOTE(lyarwood): Fetch the associated key for the volume and
# decode the passphrase from the key.
# FIXME(lyarwood): c-vol currently creates symmetric keys for use
# with volumes, leading to the binary to hex to string conversion
# below.
keymgr = key_manager.API(CONF)
key = keymgr.get(context, encryption['encryption_key_id'])
key_encoded = key.get_encoded()
passphrase = binascii.hexlify(key_encoded).decode('utf-8')
# NOTE(lyarwood): Retain the behaviour of the original os-brick
# encryptors and format any volume that does not identify as
# encrypted with LUKS.
# FIXME(lyarwood): Remove this once c-vol correctly formats
# encrypted volumes during their initial creation:
# https://bugs.launchpad.net/cinder/+bug/1739442
device_path = connection_info.get('data').get('device_path')
if device_path:
root_helper = utils.get_root_helper()
if not luks_encryptor.is_luks(root_helper, device_path):
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor._format_volume(passphrase, **encryption)
# NOTE(lyarwood): Store the passphrase as a libvirt secret locally
# on the compute node. This secret is used later when generating
# the volume config.
self._host.create_secret('volume', volume_id, password=passphrase)
elif encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
def _detach_encryptor(self, context, connection_info, encryption):
"""Detach the frontend encryptor if one is required by the volume.
The request context is only used when an encryption metadata dict is
not provided. The encryption metadata dict being populated is then used
to determine if an attempt to detach the encryptor should be made.
If native LUKS decryption is enabled then delete previously created
Libvirt volume secret from the host.
"""
volume_id = driver_block_device.get_volume_id(connection_info)
if volume_id and self._host.find_secret('volume', volume_id):
return self._host.delete_secret('volume', volume_id)
if encryption is None:
encryption = self._get_volume_encryption(context, connection_info)
# NOTE(lyarwood): Handle bugs #1821696 and #1917619 by avoiding the use
# of the os-brick encryptors if we don't have a device_path. The lack
# of a device_path here suggests the volume was natively attached to
# QEMU anyway as volumes without a device_path are not supported by
# os-brick encryptors. For volumes with a device_path the calls to
# the os-brick encryptors are safe as they are actually idempotent,
# ignoring any failures caused by the volumes actually being natively
# attached previously.
if (encryption and connection_info['data'].get('device_path') is None):
return
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
def _check_discard_for_attach_volume(self, conf, instance):
"""Perform some checks for volumes configured for discard support.
If discard is configured for the volume, and the guest is using a
configuration known to not work, we will log a message explaining
the reason why.
"""
if conf.driver_discard == 'unmap' and conf.target_bus == 'virtio':
LOG.debug('Attempting to attach volume %(id)s with discard '
'support enabled to an instance using an '
'unsupported configuration. target_bus = '
'%(bus)s. Trim commands will not be issued to '
'the storage device.',
{'bus': conf.target_bus,
'id': conf.serial},
instance=instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
self._connect_volume(context, connection_info, instance,
encryption=encryption)
disk_info = blockinfo.get_info_from_bdm(
instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
if disk_info['bus'] == 'scsi':
disk_info['unit'] = self._get_scsi_controller_next_unit(guest)
conf = self._get_volume_config(connection_info, disk_info)
self._check_discard_for_attach_volume(conf, instance)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(conf, persistent=True, live=live)
# NOTE(artom) If we're attaching with a device role tag, we need to
# rebuild device_metadata. If we're attaching without a role
# tag, we're rebuilding it here needlessly anyways. This isn't a
# massive deal, and it helps reduce code complexity by not having
# to indicate to the virt driver that the attach is tagged. The
# really important optimization of not calling the database unless
# device_metadata has actually changed is done for us by
# instance.save().
instance.device_metadata = self._build_device_metadata(
context, instance)
instance.save()
except Exception:
LOG.exception('Failed to attach volume at mountpoint: %s',
mountpoint, instance=instance)
with excutils.save_and_reraise_exception():
self._disconnect_volume(context, connection_info, instance,
encryption=encryption)
def _swap_volume(self, guest, disk_dev, conf, resize_to, hw_firmware_type):
"""Swap existing disk with a new block device.
Call virDomainBlockRebase or virDomainBlockCopy with Libvirt >= 6.0.0
to copy and then pivot to a new volume.
:param: guest: Guest object representing the guest domain
:param: disk_dev: Device within the domain that is being swapped
:param: conf: LibvirtConfigGuestDisk object representing the new volume
:param: resize_to: Size of the dst volume, 0 if the same as the src
:param: hw_firmware_type: fields.FirmwareType if set in the imagemeta
"""
dev = guest.get_block_device(disk_dev)
# Save a copy of the domain's persistent XML file. We'll use this
# to redefine the domain if anything fails during the volume swap.
xml = guest.get_xml_desc(dump_inactive=True, dump_sensitive=True)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
dev.abort_job()
except Exception:
pass
try:
# NOTE (rmk): virDomainBlockRebase and virDomainBlockCopy cannot be
# executed on persistent domains, so we need to temporarily
# undefine it. If any part of this block fails, the domain is
# re-defined regardless.
if guest.has_persistent_configuration():
support_uefi = self._check_uefi_support(hw_firmware_type)
guest.delete_configuration(support_uefi)
try:
dev.copy(conf.to_xml(), reuse_ext=True)
while not dev.is_job_complete():
time.sleep(0.5)
dev.abort_job(pivot=True)
except Exception as exc:
# NOTE(lyarwood): conf.source_path is not set for RBD disks so
# fallback to conf.target_dev when None.
new_path = conf.source_path or conf.target_dev
old_path = disk_dev
LOG.exception("Failure rebasing volume %(new_path)s on "
"%(old_path)s.", {'new_path': new_path,
'old_path': old_path})
raise exception.VolumeRebaseFailed(reason=str(exc))
if resize_to:
dev.resize(resize_to * units.Gi)
# Make sure we will redefine the domain using the updated
# configuration after the volume was swapped. The dump_inactive
# keyword arg controls whether we pull the inactive (persistent)
# or active (live) config from the domain. We want to pull the
# live config after the volume was updated to use when we redefine
# the domain.
xml = guest.get_xml_desc(dump_inactive=False, dump_sensitive=True)
finally:
self._host.write_instance_config(xml)
def swap_volume(self, context, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
# NOTE(lyarwood): https://bugzilla.redhat.com/show_bug.cgi?id=760547
old_encrypt = self._get_volume_encryption(context, old_connection_info)
new_encrypt = self._get_volume_encryption(context, new_connection_info)
if ((old_encrypt and self._allow_native_luksv1(old_encrypt)) or
(new_encrypt and self._allow_native_luksv1(new_encrypt))):
raise NotImplementedError(_("Swap volume is not supported for "
"encrypted volumes when native LUKS decryption is enabled."))
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
if not guest.get_disk(disk_dev):
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
# NOTE (lyarwood): new_connection_info will be modified by the
# following _connect_volume call down into the volume drivers. The
# majority of the volume drivers will add a device_path that is in turn
# used by _get_volume_config to set the source_path of the
# LibvirtConfigGuestDisk object it returns. We do not explicitly save
# this to the BDM here as the upper compute swap_volume method will
# eventually do this for us.
self._connect_volume(context, new_connection_info, instance)
conf = self._get_volume_config(new_connection_info, disk_info)
hw_firmware_type = instance.image_meta.properties.get(
'hw_firmware_type')
try:
self._swap_volume(guest, disk_dev, conf,
resize_to, hw_firmware_type)
except exception.VolumeRebaseFailed:
with excutils.save_and_reraise_exception():
self._disconnect_volume(context, new_connection_info, instance)
self._disconnect_volume(context, old_connection_info, instance)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except exception.InstanceNotFound:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
instance.image_meta,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
instance.image_meta,
block_device_info=block_device_info)
return xml
def emit_event(self, event: virtevent.InstanceEvent) -> None:
"""Handles libvirt specific events locally and dispatches the rest to
the compute manager.
"""
if isinstance(event, libvirtevent.LibvirtEvent):
# These are libvirt specific events handled here on the driver
# level instead of propagating them to the compute manager level
if isinstance(event, libvirtevent.DeviceEvent):
# TODO(gibi): handle it
pass
else:
LOG.debug(
"Received event %s from libvirt but no handler is "
"implemented for it in the libvirt driver so it is "
"ignored", event)
else:
# Let the generic driver code dispatch the event to the compute
# manager
super().emit_event(event)
def detach_volume(self, context, connection_info, instance, mountpoint,
encryption=None):
disk_dev = mountpoint.rpartition("/")[2]
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
# NOTE(lyarwood): The volume must be detached from the VM before
# detaching any attached encryptors or disconnecting the underlying
# volume in _disconnect_volume. Otherwise, the encryptor or volume
# driver may report that the volume is still in use.
wait_for_detach = guest.detach_device_with_retry(
guest.get_disk, disk_dev, live=live)
wait_for_detach()
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warning("During detach_volume, instance disappeared.",
instance=instance)
except exception.DeviceNotFound:
# We should still try to disconnect logical device from
# host, an error might have happened during a previous
# call.
LOG.info("Device %s not found in instance.",
disk_dev, instance=instance)
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warning("During detach_volume, instance disappeared.",
instance=instance)
else:
raise
self._disconnect_volume(context, connection_info, instance,
encryption=encryption)
def _resize_attached_volume(self, new_size, block_device, instance):
LOG.debug('Resizing target device %(dev)s to %(size)u',
{'dev': block_device._disk, 'size': new_size},
instance=instance)
block_device.resize(new_size)
def _resize_attached_encrypted_volume(self, original_new_size,
block_device, instance,
connection_info, encryption):
# TODO(lyarwood): Also handle the dm-crpyt encryption providers of
# plain and LUKSv2, for now just use the original_new_size.
decrypted_device_new_size = original_new_size
# NOTE(lyarwood): original_new_size currently refers to the total size
# of the extended volume in bytes. With natively decrypted LUKSv1
# volumes we need to ensure this now takes the LUKSv1 header and key
# material into account. Otherwise QEMU will attempt and fail to grow
# host block devices and remote RBD volumes.
if self._allow_native_luksv1(encryption):
try:
# NOTE(lyarwood): Find the path to provide to qemu-img
if 'device_path' in connection_info['data']:
path = connection_info['data']['device_path']
elif connection_info['driver_volume_type'] == 'rbd':
volume_name = connection_info['data']['name']
path = f"rbd:{volume_name}"
if connection_info['data'].get('auth_enabled'):
username = connection_info['data']['auth_username']
path = f"rbd:{volume_name}:id={username}"
else:
path = 'unknown'
raise exception.DiskNotFound(location='unknown')
info = images.privileged_qemu_img_info(path)
format_specific_data = info.format_specific['data']
payload_offset = format_specific_data['payload-offset']
# NOTE(lyarwood): Ensure the underlying device is not resized
# by subtracting the LUKSv1 payload_offset (where the users
# encrypted data starts) from the original_new_size (the total
# size of the underlying volume). Both are reported in bytes.
decrypted_device_new_size = original_new_size - payload_offset
except exception.DiskNotFound:
with excutils.save_and_reraise_exception():
LOG.exception('Unable to access the encrypted disk %s.',
path, instance=instance)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Unknown error when attempting to find the '
'payload_offset for LUKSv1 encrypted disk '
'%s.', path, instance=instance)
# NOTE(lyarwood): Resize the decrypted device within the instance to
# the calculated size as with normal volumes.
self._resize_attached_volume(
decrypted_device_new_size, block_device, instance)
def extend_volume(self, context, connection_info, instance,
requested_size):
try:
new_size = self._extend_volume(connection_info, instance,
requested_size)
except NotImplementedError:
raise exception.ExtendVolumeNotSupported()
# Resize the device in QEMU so its size is updated and
# detected by the instance without rebooting.
try:
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
volume_id = driver_block_device.get_volume_id(connection_info)
active_state = state in (power_state.RUNNING, power_state.PAUSED)
if active_state:
if 'device_path' in connection_info['data']:
disk_path = connection_info['data']['device_path']
else:
# Some drivers (eg. net) don't put the device_path
# into the connection_info. Match disks by their serial
# number instead
disk = next(iter([
d for d in guest.get_all_disks()
if d.serial == volume_id
]), None)
if not disk:
raise exception.VolumeNotFound(volume_id=volume_id)
disk_path = disk.target_dev
dev = guest.get_block_device(disk_path)
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
self._resize_attached_encrypted_volume(
new_size, dev, instance,
connection_info, encryption)
else:
self._resize_attached_volume(
new_size, dev, instance)
else:
LOG.debug('Skipping block device resize, guest is not running',
instance=instance)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
LOG.warning('During extend_volume, instance disappeared.',
instance=instance)
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.exception('resizing block device failed.',
instance=instance)
def attach_interface(self, context, instance, image_meta, vif):
guest = self._host.get_guest(instance)
self.vif_driver.plug(instance, vif)
cfg = self.vif_driver.get_config(instance, vif, image_meta,
instance.flavor,
CONF.libvirt.virt_type)
try:
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
guest.attach_device(cfg, persistent=True, live=live)
except libvirt.libvirtError:
LOG.error('attaching network adapter failed.',
instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
try:
# NOTE(artom) If we're attaching with a device role tag, we need to
# rebuild device_metadata. If we're attaching without a role
# tag, we're rebuilding it here needlessly anyways. This isn't a
# massive deal, and it helps reduce code complexity by not having
# to indicate to the virt driver that the attach is tagged. The
# really important optimization of not calling the database unless
# device_metadata has actually changed is done for us by
# instance.save().
instance.device_metadata = self._build_device_metadata(
context, instance)
instance.save()
except Exception:
# NOTE(artom) If we fail here it means the interface attached
# successfully but building and/or saving the device metadata
# failed. Just unplugging the vif is therefore not enough cleanup,
# we need to detach the interface.
with excutils.save_and_reraise_exception(reraise=False):
LOG.error('Interface attached successfully but building '
'and/or saving device metadata failed.',
instance=instance, exc_info=True)
self.detach_interface(context, instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
try:
guest.set_metadata(
self._get_guest_config_meta(
instance, instance.get_network_info()))
except libvirt.libvirtError:
LOG.warning('updating libvirt metadata failed.', instance=instance)
def detach_interface(self, context, instance, vif):
guest = self._host.get_guest(instance)
cfg = self.vif_driver.get_config(instance, vif,
instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type)
interface = guest.get_interface_by_cfg(cfg)
try:
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so if the interface is not found then
# we can just log it as a warning.
if not interface:
mac = vif.get('address')
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': mac}, instance=instance)
return
state = guest.get_power_state(self._host)
live = state in (power_state.RUNNING, power_state.PAUSED)
# Now we are going to loop until the interface is detached or we
# timeout.
wait_for_detach = guest.detach_device_with_retry(
guest.get_interface_by_cfg, cfg, live=live,
alternative_device_name=self.vif_driver.get_vif_devname(vif))
wait_for_detach()
except exception.DeviceDetachFailed:
# We failed to detach the device even with the retry loop, so let's
# dump some debug information to the logs before raising back up.
with excutils.save_and_reraise_exception():
devname = self.vif_driver.get_vif_devname(vif)
interface = guest.get_interface_by_cfg(cfg)
if interface:
LOG.warning(
'Failed to detach interface %(devname)s after '
'repeated attempts. Final interface xml:\n'
'%(interface_xml)s\nFinal guest xml:\n%(guest_xml)s',
{'devname': devname,
'interface_xml': interface.to_xml(),
'guest_xml': guest.get_xml_desc()},
instance=instance)
except exception.DeviceNotFound:
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': vif.get('address')}, instance=instance)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warning("During detach_interface, instance disappeared.",
instance=instance)
else:
# NOTE(mriedem): When deleting an instance and using Neutron,
# we can be racing against Neutron deleting the port and
# sending the vif-deleted event which then triggers a call to
# detach the interface, so we might have failed because the
# network device no longer exists. Libvirt will fail with
# "operation failed: no matching network device was found"
# which unfortunately does not have a unique error code so we
# need to look up the interface by config and if it's not found
# then we can just log it as a warning rather than tracing an
# error.
mac = vif.get('address')
# Get a fresh instance of the guest in case it is gone.
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
LOG.info("Instance disappeared while detaching interface "
"%s", vif['id'], instance=instance)
return
interface = guest.get_interface_by_cfg(cfg)
if interface:
LOG.error('detaching network adapter failed.',
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
# The interface is gone so just log it as a warning.
LOG.warning('Detaching interface %(mac)s failed because '
'the device is no longer found on the guest.',
{'mac': mac}, instance=instance)
finally:
# NOTE(gibi): we need to unplug the vif _after_ the detach is done
# on the libvirt side as otherwise libvirt will still manage the
# device that our unplug code trying to reset. This can cause a
# race and leave the detached device configured. Also even if we
# are failed to detach due to race conditions the unplug is
# necessary for the same reason
self.vif_driver.unplug(instance, vif)
try:
# NOTE(nmiki): In order for the interface to be removed from
# network_info, the nova-compute process need to wait for
# processing on the neutron side.
# Here, I simply exclude the target VIF from metadata.
network_info = list(filter(lambda info: info['id'] != vif['id'],
instance.get_network_info()))
guest.set_metadata(
self._get_guest_config_meta(instance, network_info))
except libvirt.libvirtError:
LOG.warning('updating libvirt metadata failed.', instance=instance)
def _create_snapshot_metadata(self, image_meta, instance,
img_fmt, snp_name):
metadata = {'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance.kernel_id,
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
'ramdisk_id': instance.ramdisk_id,
}
}
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type
# NOTE(vish): glance forces ami disk format to be ami
if image_meta.disk_format == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
if image_meta.obj_attr_is_set("container_format"):
metadata['container_format'] = image_meta.container_format
else:
metadata['container_format'] = "bare"
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
guest = self._host.get_guest(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
snapshot = self._image_api.get(context, image_id)
# source_format is an on-disk format
# source_type is a backend type
disk_path, source_format = libvirt_utils.find_disk(guest)
source_type = libvirt_utils.get_disk_type_from_path(disk_path)
# We won't have source_type for raw or qcow2 disks, because we can't
# determine that from the path. We should have it from the libvirt
# xml, though.
if source_type is None:
source_type = source_format
# For lxc instances we won't have it either from libvirt xml
# (because we just gave libvirt the mounted filesystem), or the path,
# so source_type is still going to be None. In this case,
# root_disk is going to default to CONF.libvirt.images_type
# below, which is still safe.
image_format = CONF.libvirt.snapshot_image_format or source_type
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(instance.image_meta,
instance,
image_format,
snapshot['name'])
snapshot_name = uuidutils.generate_uuid(dashed=False)
# store current state so we know what to resume back to if we suspend
original_power_state = guest.get_power_state(self._host)
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.