You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
22777 lines
1021 KiB
22777 lines
1021 KiB
# Copyright 2010 OpenStack Foundation |
|
# Copyright 2012 University Of Minho |
|
# |
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
|
# not use this file except in compliance with the License. You may obtain |
|
# a copy of the License at |
|
# |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
|
# |
|
# Unless required by applicable law or agreed to in writing, software |
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
|
# License for the specific language governing permissions and limitations |
|
# under the License. |
|
|
|
import binascii |
|
from collections import deque |
|
from collections import OrderedDict |
|
import contextlib |
|
import copy |
|
import datetime |
|
import errno |
|
import glob |
|
import os |
|
import random |
|
import re |
|
import shutil |
|
import signal |
|
import threading |
|
import time |
|
import unittest |
|
|
|
from castellan import key_manager |
|
import ddt |
|
import eventlet |
|
from eventlet import greenthread |
|
import fixtures |
|
from lxml import etree |
|
import mock |
|
from mox3 import mox |
|
from os_brick import encryptors |
|
from os_brick import exception as brick_exception |
|
from os_brick.initiator import connector |
|
import os_vif |
|
from oslo_concurrency import lockutils |
|
from oslo_concurrency import processutils |
|
from oslo_config import cfg |
|
from oslo_serialization import jsonutils |
|
from oslo_service import loopingcall |
|
from oslo_utils import encodeutils |
|
from oslo_utils import fileutils |
|
from oslo_utils import fixture as utils_fixture |
|
from oslo_utils import units |
|
from oslo_utils import uuidutils |
|
from oslo_utils import versionutils |
|
import six |
|
from six.moves import builtins |
|
from six.moves import range |
|
|
|
from nova.api.metadata import base as instance_metadata |
|
from nova.api.openstack.placement.objects import resource_provider as rp_object |
|
from nova.compute import manager |
|
from nova.compute import power_state |
|
from nova.compute import provider_tree |
|
from nova.compute import task_states |
|
from nova.compute import vm_states |
|
import nova.conf |
|
from nova import context |
|
from nova.db import api as db |
|
from nova import exception |
|
from nova.network import model as network_model |
|
from nova import objects |
|
from nova.objects import block_device as block_device_obj |
|
from nova.objects import fields |
|
from nova.objects import migrate_data as migrate_data_obj |
|
from nova.objects import virtual_interface as obj_vif |
|
from nova.pci import manager as pci_manager |
|
from nova.pci import utils as pci_utils |
|
import nova.privsep.fs |
|
import nova.privsep.libvirt |
|
from nova import rc_fields |
|
from nova import test |
|
from nova.tests.unit import fake_block_device |
|
from nova.tests.unit import fake_diagnostics |
|
from nova.tests.unit import fake_flavor |
|
from nova.tests.unit import fake_instance |
|
from nova.tests.unit import fake_network |
|
import nova.tests.unit.image.fake |
|
from nova.tests.unit import matchers |
|
from nova.tests.unit.objects import test_diagnostics |
|
from nova.tests.unit.objects import test_pci_device |
|
from nova.tests.unit.objects import test_vcpu_model |
|
from nova.tests.unit.virt.libvirt import fake_imagebackend |
|
from nova.tests.unit.virt.libvirt import fake_libvirt_utils |
|
from nova.tests.unit.virt.libvirt import fakelibvirt |
|
from nova.tests import uuidsentinel as uuids |
|
from nova import utils |
|
from nova import version |
|
from nova.virt import block_device as driver_block_device |
|
from nova.virt import driver |
|
from nova.virt import fake |
|
from nova.virt import firewall as base_firewall |
|
from nova.virt import hardware |
|
from nova.virt.image import model as imgmodel |
|
from nova.virt import images |
|
from nova.virt.libvirt import blockinfo |
|
from nova.virt.libvirt import config as vconfig |
|
from nova.virt.libvirt import driver as libvirt_driver |
|
from nova.virt.libvirt import firewall |
|
from nova.virt.libvirt import guest as libvirt_guest |
|
from nova.virt.libvirt import host |
|
from nova.virt.libvirt import imagebackend |
|
from nova.virt.libvirt import imagecache |
|
from nova.virt.libvirt import migration as libvirt_migrate |
|
from nova.virt.libvirt.storage import dmcrypt |
|
from nova.virt.libvirt.storage import lvm |
|
from nova.virt.libvirt.storage import rbd_utils |
|
from nova.virt.libvirt import utils as libvirt_utils |
|
from nova.virt.libvirt.volume import volume as volume_drivers |
|
|
|
|
|
CONF = nova.conf.CONF |
|
|
|
_fake_network_info = fake_network.fake_get_instance_nw_info |
|
|
|
_fake_NodeDevXml = \ |
|
{"pci_0000_04_00_3": """ |
|
<device> |
|
<name>pci_0000_04_00_3</name> |
|
<parent>pci_0000_00_01_1</parent> |
|
<driver> |
|
<name>igb</name> |
|
</driver> |
|
<capability type='pci'> |
|
<domain>0</domain> |
|
<bus>4</bus> |
|
<slot>0</slot> |
|
<function>3</function> |
|
<product id='0x1521'>I350 Gigabit Network Connection</product> |
|
<vendor id='0x8086'>Intel Corporation</vendor> |
|
<capability type='virt_functions'> |
|
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/> |
|
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/> |
|
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/> |
|
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/> |
|
</capability> |
|
</capability> |
|
</device>""", |
|
"pci_0000_04_10_7": """ |
|
<device> |
|
<name>pci_0000_04_10_7</name> |
|
<parent>pci_0000_00_01_1</parent> |
|
<driver> |
|
<name>igbvf</name> |
|
</driver> |
|
<capability type='pci'> |
|
<domain>0</domain> |
|
<bus>4</bus> |
|
<slot>16</slot> |
|
<function>7</function> |
|
<product id='0x1520'>I350 Ethernet Controller Virtual Function |
|
</product> |
|
<vendor id='0x8086'>Intel Corporation</vendor> |
|
<capability type='phys_function'> |
|
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> |
|
</capability> |
|
<capability type='virt_functions'> |
|
</capability> |
|
</capability> |
|
</device>""", |
|
"pci_0000_04_11_7": """ |
|
<device> |
|
<name>pci_0000_04_11_7</name> |
|
<parent>pci_0000_00_01_1</parent> |
|
<driver> |
|
<name>igbvf</name> |
|
</driver> |
|
<capability type='pci'> |
|
<domain>0</domain> |
|
<bus>4</bus> |
|
<slot>17</slot> |
|
<function>7</function> |
|
<product id='0x1520'>I350 Ethernet Controller Virtual Function |
|
</product> |
|
<vendor id='0x8086'>Intel Corporation</vendor> |
|
<numa node='0'/> |
|
<capability type='phys_function'> |
|
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> |
|
</capability> |
|
<capability type='virt_functions'> |
|
</capability> |
|
</capability> |
|
</device>""", |
|
"pci_0000_04_00_1": """ |
|
<device> |
|
<name>pci_0000_04_00_1</name> |
|
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1</path> |
|
<parent>pci_0000_00_02_0</parent> |
|
<driver> |
|
<name>mlx5_core</name> |
|
</driver> |
|
<capability type='pci'> |
|
<domain>0</domain> |
|
<bus>4</bus> |
|
<slot>0</slot> |
|
<function>1</function> |
|
<product id='0x1013'>MT27700 Family [ConnectX-4]</product> |
|
<vendor id='0x15b3'>Mellanox Technologies</vendor> |
|
<iommuGroup number='15'> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> |
|
</iommuGroup> |
|
<numa node='0'/> |
|
<pci-express> |
|
<link validity='cap' port='0' speed='8' width='16'/> |
|
<link validity='sta' speed='8' width='16'/> |
|
</pci-express> |
|
</capability> |
|
</device>""", |
|
# libvirt >= 1.3.0 nodedev-dumpxml |
|
"pci_0000_03_00_0": """ |
|
<device> |
|
<name>pci_0000_03_00_0</name> |
|
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path> |
|
<parent>pci_0000_00_02_0</parent> |
|
<driver> |
|
<name>mlx5_core</name> |
|
</driver> |
|
<capability type='pci'> |
|
<domain>0</domain> |
|
<bus>3</bus> |
|
<slot>0</slot> |
|
<function>0</function> |
|
<product id='0x1013'>MT27700 Family [ConnectX-4]</product> |
|
<vendor id='0x15b3'>Mellanox Technologies</vendor> |
|
<capability type='virt_functions' maxCount='16'> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/> |
|
</capability> |
|
<iommuGroup number='15'> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> |
|
</iommuGroup> |
|
<numa node='0'/> |
|
<pci-express> |
|
<link validity='cap' port='0' speed='8' width='16'/> |
|
<link validity='sta' speed='8' width='16'/> |
|
</pci-express> |
|
</capability> |
|
</device>""", |
|
"pci_0000_03_00_1": """ |
|
<device> |
|
<name>pci_0000_03_00_1</name> |
|
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path> |
|
<parent>pci_0000_00_02_0</parent> |
|
<driver> |
|
<name>mlx5_core</name> |
|
</driver> |
|
<capability type='pci'> |
|
<domain>0</domain> |
|
<bus>3</bus> |
|
<slot>0</slot> |
|
<function>1</function> |
|
<product id='0x1013'>MT27700 Family [ConnectX-4]</product> |
|
<vendor id='0x15b3'>Mellanox Technologies</vendor> |
|
<capability type='virt_functions' maxCount='16'/> |
|
<iommuGroup number='15'> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> |
|
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> |
|
</iommuGroup> |
|
<numa node='0'/> |
|
<pci-express> |
|
<link validity='cap' port='0' speed='8' width='16'/> |
|
<link validity='sta' speed='8' width='16'/> |
|
</pci-express> |
|
</capability> |
|
</device>""", |
|
"net_enp2s2_02_9a_a1_37_be_54": """ |
|
<device> |
|
<name>net_enp2s2_02_9a_a1_37_be_54</name> |
|
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:02:02.0/net/enp2s2</path> |
|
<parent>pci_0000_04_11_7</parent> |
|
<capability type='net'> |
|
<interface>enp2s2</interface> |
|
<address>02:9a:a1:37:be:54</address> |
|
<link state='down'/> |
|
<feature name='rx'/> |
|
<feature name='tx'/> |
|
<feature name='sg'/> |
|
<feature name='tso'/> |
|
<feature name='gso'/> |
|
<feature name='gro'/> |
|
<feature name='rxvlan'/> |
|
<feature name='txvlan'/> |
|
<capability type='80203'/> |
|
</capability> |
|
</device>""", |
|
"pci_0000_06_00_0": """ |
|
<device> |
|
<name>pci_0000_06_00_0</name> |
|
<path>/sys/devices/pci0000:00/0000:00:06.0</path> |
|
<parent></parent> |
|
<driver> |
|
<name>nvidia</name> |
|
</driver> |
|
<capability type="pci"> |
|
<domain>0</domain> |
|
<bus>10</bus> |
|
<slot>1</slot> |
|
<function>5</function> |
|
<product id="0x0FFE">GRID M60-0B</product> |
|
<vendor id="0x10DE">Nvidia</vendor> |
|
<numa node="8"/> |
|
<capability type='mdev_types'> |
|
<type id='nvidia-11'> |
|
<name>GRID M60-0B</name> |
|
<deviceAPI>vfio-pci</deviceAPI> |
|
<availableInstances>16</availableInstances> |
|
</type> |
|
</capability> |
|
</capability> |
|
</device>""", |
|
"mdev_4b20d080_1b54_4048_85b3_a6a62d165c01": """ |
|
<device> |
|
<name>mdev_4b20d080_1b54_4048_85b3_a6a62d165c01</name> |
|
<path>/sys/devices/pci0000:00/0000:00:02.0/4b20d080-1b54-4048-85b3-a6a62d165c01</path> |
|
<parent>pci_0000_00_02_0</parent> |
|
<driver> |
|
<name>vfio_mdev</name> |
|
</driver> |
|
<capability type='mdev'> |
|
<type id='nvidia-11'/> |
|
<iommuGroup number='12'/> |
|
</capability> |
|
</device> |
|
""", |
|
} |
|
|
|
_fake_cpu_info = { |
|
"arch": "test_arch", |
|
"model": "test_model", |
|
"vendor": "test_vendor", |
|
"topology": { |
|
"sockets": 1, |
|
"cores": 8, |
|
"threads": 16 |
|
}, |
|
"features": ["feature1", "feature2"] |
|
} |
|
|
|
eph_default_ext = utils.get_hash_str(nova.privsep.fs._DEFAULT_FILE_SYSTEM)[:7] |
|
|
|
_fake_qemu64_cpu_feature = """ |
|
<cpu mode='custom' match='exact'> |
|
<model fallback='forbid'>qemu64</model> |
|
<feature policy='require' name='svm'/> |
|
<feature policy='require' name='lm'/> |
|
<feature policy='require' name='nx'/> |
|
<feature policy='require' name='syscall'/> |
|
<feature policy='require' name='cx16'/> |
|
<feature policy='require' name='pni'/> |
|
<feature policy='require' name='sse2'/> |
|
<feature policy='require' name='sse'/> |
|
<feature policy='require' name='fxsr'/> |
|
<feature policy='require' name='mmx'/> |
|
<feature policy='require' name='clflush'/> |
|
<feature policy='require' name='pse36'/> |
|
<feature policy='require' name='pat'/> |
|
<feature policy='require' name='cmov'/> |
|
<feature policy='require' name='mca'/> |
|
<feature policy='require' name='pge'/> |
|
<feature policy='require' name='mtrr'/> |
|
<feature policy='require' name='sep'/> |
|
<feature policy='require' name='apic'/> |
|
<feature policy='require' name='cx8'/> |
|
<feature policy='require' name='mce'/> |
|
<feature policy='require' name='pae'/> |
|
<feature policy='require' name='msr'/> |
|
<feature policy='require' name='tsc'/> |
|
<feature policy='require' name='pse'/> |
|
<feature policy='require' name='de'/> |
|
<feature policy='require' name='fpu'/> |
|
</cpu> |
|
""" |
|
|
|
_fake_broadwell_cpu_feature = """ |
|
<cpu mode='custom' match='exact'> |
|
<model fallback='forbid'>Broadwell-noTSX</model> |
|
<vendor>Intel</vendor> |
|
<feature policy='require' name='smap'/> |
|
<feature policy='require' name='adx'/> |
|
<feature policy='require' name='rdseed'/> |
|
<feature policy='require' name='invpcid'/> |
|
<feature policy='require' name='erms'/> |
|
<feature policy='require' name='bmi2'/> |
|
<feature policy='require' name='smep'/> |
|
<feature policy='require' name='avx2'/> |
|
<feature policy='require' name='bmi1'/> |
|
<feature policy='require' name='fsgsbase'/> |
|
<feature policy='require' name='3dnowprefetch'/> |
|
<feature policy='require' name='lahf_lm'/> |
|
<feature policy='require' name='lm'/> |
|
<feature policy='require' name='rdtscp'/> |
|
<feature policy='require' name='nx'/> |
|
<feature policy='require' name='syscall'/> |
|
<feature policy='require' name='avx'/> |
|
<feature policy='require' name='xsave'/> |
|
<feature policy='require' name='aes'/> |
|
<feature policy='require' name='tsc-deadline'/> |
|
<feature policy='require' name='popcnt'/> |
|
<feature policy='require' name='movbe'/> |
|
<feature policy='require' name='x2apic'/> |
|
<feature policy='require' name='sse4.2'/> |
|
<feature policy='require' name='sse4.1'/> |
|
<feature policy='require' name='pcid'/> |
|
<feature policy='require' name='cx16'/> |
|
<feature policy='require' name='fma'/> |
|
<feature policy='require' name='ssse3'/> |
|
<feature policy='require' name='pclmuldq'/> |
|
<feature policy='require' name='pni'/> |
|
<feature policy='require' name='sse2'/> |
|
<feature policy='require' name='sse'/> |
|
<feature policy='require' name='fxsr'/> |
|
<feature policy='require' name='mmx'/> |
|
<feature policy='require' name='clflush'/> |
|
<feature policy='require' name='pse36'/> |
|
<feature policy='require' name='pat'/> |
|
<feature policy='require' name='cmov'/> |
|
<feature policy='require' name='mca'/> |
|
<feature policy='require' name='pge'/> |
|
<feature policy='require' name='mtrr'/> |
|
<feature policy='require' name='sep'/> |
|
<feature policy='require' name='apic'/> |
|
<feature policy='require' name='cx8'/> |
|
<feature policy='require' name='mce'/> |
|
<feature policy='require' name='pae'/> |
|
<feature policy='require' name='msr'/> |
|
<feature policy='require' name='tsc'/> |
|
<feature policy='require' name='pse'/> |
|
<feature policy='require' name='de'/> |
|
<feature policy='require' name='fpu'/> |
|
</cpu> |
|
""" |
|
|
|
|
|
def eph_name(size): |
|
return ('ephemeral_%(size)s_%(ext)s' % |
|
{'size': size, 'ext': eph_default_ext}) |
|
|
|
|
|
def fake_disk_info_byname(instance, type='qcow2'): |
|
"""Return instance_disk_info corresponding accurately to the properties of |
|
the given Instance object. The info is returned as an OrderedDict of |
|
name->disk_info for each disk. |
|
|
|
:param instance: The instance we're generating fake disk_info for. |
|
:param type: libvirt's disk type. |
|
:return: disk_info |
|
:rtype: OrderedDict |
|
""" |
|
instance_dir = os.path.join(CONF.instances_path, instance.uuid) |
|
|
|
def instance_path(name): |
|
return os.path.join(instance_dir, name) |
|
|
|
disk_info = OrderedDict() |
|
|
|
# root disk |
|
if (instance.image_ref is not None and |
|
instance.image_ref != uuids.fake_volume_backed_image_ref): |
|
cache_name = imagecache.get_cache_fname(instance.image_ref) |
|
disk_info['disk'] = { |
|
'type': type, |
|
'path': instance_path('disk'), |
|
'virt_disk_size': instance.flavor.root_gb * units.Gi, |
|
'backing_file': cache_name, |
|
'disk_size': instance.flavor.root_gb * units.Gi, |
|
'over_committed_disk_size': 0} |
|
|
|
swap_mb = instance.flavor.swap |
|
if swap_mb > 0: |
|
disk_info['disk.swap'] = { |
|
'type': type, |
|
'path': instance_path('disk.swap'), |
|
'virt_disk_size': swap_mb * units.Mi, |
|
'backing_file': 'swap_%s' % swap_mb, |
|
'disk_size': swap_mb * units.Mi, |
|
'over_committed_disk_size': 0} |
|
|
|
eph_gb = instance.flavor.ephemeral_gb |
|
if eph_gb > 0: |
|
disk_info['disk.local'] = { |
|
'type': type, |
|
'path': instance_path('disk.local'), |
|
'virt_disk_size': eph_gb * units.Gi, |
|
'backing_file': eph_name(eph_gb), |
|
'disk_size': eph_gb * units.Gi, |
|
'over_committed_disk_size': 0} |
|
|
|
if instance.config_drive: |
|
disk_info['disk.config'] = { |
|
'type': 'raw', |
|
'path': instance_path('disk.config'), |
|
'virt_disk_size': 1024, |
|
'backing_file': '', |
|
'disk_size': 1024, |
|
'over_committed_disk_size': 0} |
|
|
|
return disk_info |
|
|
|
|
|
def fake_diagnostics_object(with_cpus=False, with_disks=False, with_nic=False): |
|
diag_dict = {'config_drive': False, |
|
'driver': 'libvirt', |
|
'hypervisor': 'kvm', |
|
'hypervisor_os': 'linux', |
|
'memory_details': {'maximum': 2048, 'used': 1234}, |
|
'state': 'running', |
|
'uptime': 10} |
|
|
|
if with_cpus: |
|
diag_dict['cpu_details'] = [] |
|
for id, t in enumerate([15340000000, 1640000000, |
|
3040000000, 1420000000]): |
|
diag_dict['cpu_details'].append({'id': id, 'time': t}) |
|
|
|
if with_disks: |
|
diag_dict['disk_details'] = [] |
|
for i in range(2): |
|
diag_dict['disk_details'].append( |
|
{'read_bytes': 688640, |
|
'read_requests': 169, |
|
'write_bytes': 0, |
|
'write_requests': 0, |
|
'errors_count': 1}) |
|
|
|
if with_nic: |
|
diag_dict['nic_details'] = [ |
|
{'mac_address': '52:54:00:a4:38:38', |
|
'rx_drop': 0, |
|
'rx_errors': 0, |
|
'rx_octets': 4408, |
|
'rx_packets': 82, |
|
'tx_drop': 0, |
|
'tx_errors': 0, |
|
'tx_octets': 0, |
|
'tx_packets': 0}] |
|
|
|
return fake_diagnostics.fake_diagnostics_obj(**diag_dict) |
|
|
|
|
|
def fake_disk_info_json(instance, type='qcow2'): |
|
"""Return fake instance_disk_info corresponding accurately to the |
|
properties of the given Instance object. |
|
|
|
:param instance: The instance we're generating fake disk_info for. |
|
:param type: libvirt's disk type. |
|
:return: JSON representation of instance_disk_info for all disks. |
|
:rtype: str |
|
""" |
|
disk_info = fake_disk_info_byname(instance, type) |
|
return jsonutils.dumps(disk_info.values()) |
|
|
|
|
|
def get_injection_info(network_info=None, admin_pass=None, files=None): |
|
return libvirt_driver.InjectionInfo( |
|
network_info=network_info, admin_pass=admin_pass, files=files) |
|
|
|
|
|
def _concurrency(signal, wait, done, target, is_block_dev=False): |
|
signal.send() |
|
wait.wait() |
|
done.send() |
|
|
|
|
|
class FakeVirtDomain(object): |
|
|
|
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None, |
|
info=None): |
|
if uuidstr is None: |
|
uuidstr = uuids.fake |
|
self.uuidstr = uuidstr |
|
self.id = id |
|
self.domname = name |
|
self._info = info or ( |
|
[power_state.RUNNING, 2048 * units.Mi, |
|
1234 * units.Mi, None, None]) |
|
if fake_xml: |
|
self._fake_dom_xml = fake_xml |
|
else: |
|
self._fake_dom_xml = """ |
|
<domain type='kvm'> |
|
<name>testinstance1</name> |
|
<devices> |
|
<disk type='file'> |
|
<source file='filename'/> |
|
</disk> |
|
</devices> |
|
</domain> |
|
""" |
|
|
|
def name(self): |
|
if self.domname is None: |
|
return "fake-domain %s" % self |
|
else: |
|
return self.domname |
|
|
|
def ID(self): |
|
return self.id |
|
|
|
def info(self): |
|
return self._info |
|
|
|
def create(self): |
|
pass |
|
|
|
def managedSave(self, *args): |
|
pass |
|
|
|
def createWithFlags(self, launch_flags): |
|
pass |
|
|
|
def XMLDesc(self, flags): |
|
return self._fake_dom_xml |
|
|
|
def UUIDString(self): |
|
return self.uuidstr |
|
|
|
def attachDeviceFlags(self, xml, flags): |
|
pass |
|
|
|
def attachDevice(self, xml): |
|
pass |
|
|
|
def detachDeviceFlags(self, xml, flags): |
|
pass |
|
|
|
def snapshotCreateXML(self, xml, flags): |
|
pass |
|
|
|
def blockCommit(self, disk, base, top, bandwidth=0, flags=0): |
|
pass |
|
|
|
def blockRebase(self, disk, base, bandwidth=0, flags=0): |
|
pass |
|
|
|
def blockJobInfo(self, path, flags): |
|
pass |
|
|
|
def blockJobAbort(self, path, flags): |
|
pass |
|
|
|
def resume(self): |
|
pass |
|
|
|
def destroy(self): |
|
pass |
|
|
|
def fsFreeze(self, disks=None, flags=0): |
|
pass |
|
|
|
def fsThaw(self, disks=None, flags=0): |
|
pass |
|
|
|
def isActive(self): |
|
return True |
|
|
|
def isPersistent(self): |
|
return True |
|
|
|
def undefine(self): |
|
return True |
|
|
|
|
|
class CacheConcurrencyTestCase(test.NoDBTestCase): |
|
def setUp(self): |
|
super(CacheConcurrencyTestCase, self).setUp() |
|
|
|
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) |
|
|
|
# utils.synchronized() will create the lock_path for us if it |
|
# doesn't already exist. It will also delete it when it's done, |
|
# which can cause race conditions with the multiple threads we |
|
# use for tests. So, create the path here so utils.synchronized() |
|
# won't delete it out from under one of the threads. |
|
self.lock_path = os.path.join(CONF.instances_path, 'locks') |
|
fileutils.ensure_tree(self.lock_path) |
|
|
|
def fake_exists(fname): |
|
basedir = os.path.join(CONF.instances_path, |
|
CONF.image_cache_subdirectory_name) |
|
if fname == basedir or fname == self.lock_path: |
|
return True |
|
return False |
|
|
|
self.stub_out('os.path.exists', fake_exists) |
|
self.stub_out('nova.utils.execute', lambda *a, **kw: None) |
|
self.stub_out('nova.virt.disk.api.extend', |
|
lambda image, size, use_cow=False: None) |
|
self.useFixture(fixtures.MonkeyPatch( |
|
'nova.virt.libvirt.imagebackend.libvirt_utils', |
|
fake_libvirt_utils)) |
|
|
|
def _fake_instance(self, uuid): |
|
return objects.Instance(id=1, uuid=uuid) |
|
|
|
def test_same_fname_concurrency(self): |
|
# Ensures that the same fname cache runs at a sequentially. |
|
uuid = uuids.fake |
|
|
|
backend = imagebackend.Backend(False) |
|
wait1 = eventlet.event.Event() |
|
done1 = eventlet.event.Event() |
|
sig1 = eventlet.event.Event() |
|
thr1 = eventlet.spawn(backend.by_name(self._fake_instance(uuid), |
|
'name').cache, |
|
_concurrency, 'fname', None, |
|
signal=sig1, wait=wait1, done=done1) |
|
eventlet.sleep(0) |
|
# Thread 1 should run before thread 2. |
|
sig1.wait() |
|
|
|
wait2 = eventlet.event.Event() |
|
done2 = eventlet.event.Event() |
|
sig2 = eventlet.event.Event() |
|
thr2 = eventlet.spawn(backend.by_name(self._fake_instance(uuid), |
|
'name').cache, |
|
_concurrency, 'fname', None, |
|
signal=sig2, wait=wait2, done=done2) |
|
|
|
wait2.send() |
|
eventlet.sleep(0) |
|
try: |
|
self.assertFalse(done2.ready()) |
|
finally: |
|
wait1.send() |
|
done1.wait() |
|
eventlet.sleep(0) |
|
self.assertTrue(done2.ready()) |
|
# Wait on greenthreads to assert they didn't raise exceptions |
|
# during execution |
|
thr1.wait() |
|
thr2.wait() |
|
|
|
def test_different_fname_concurrency(self): |
|
# Ensures that two different fname caches are concurrent. |
|
uuid = uuids.fake |
|
|
|
backend = imagebackend.Backend(False) |
|
wait1 = eventlet.event.Event() |
|
done1 = eventlet.event.Event() |
|
sig1 = eventlet.event.Event() |
|
thr1 = eventlet.spawn(backend.by_name(self._fake_instance(uuid), |
|
'name').cache, |
|
_concurrency, 'fname2', None, |
|
signal=sig1, wait=wait1, done=done1) |
|
eventlet.sleep(0) |
|
# Thread 1 should run before thread 2. |
|
sig1.wait() |
|
|
|
wait2 = eventlet.event.Event() |
|
done2 = eventlet.event.Event() |
|
sig2 = eventlet.event.Event() |
|
thr2 = eventlet.spawn(backend.by_name(self._fake_instance(uuid), |
|
'name').cache, |
|
_concurrency, 'fname1', None, |
|
signal=sig2, wait=wait2, done=done2) |
|
eventlet.sleep(0) |
|
# Wait for thread 2 to start. |
|
sig2.wait() |
|
|
|
wait2.send() |
|
tries = 0 |
|
while not done2.ready() and tries < 10: |
|
eventlet.sleep(0) |
|
tries += 1 |
|
try: |
|
self.assertTrue(done2.ready()) |
|
finally: |
|
wait1.send() |
|
eventlet.sleep(0) |
|
# Wait on greenthreads to assert they didn't raise exceptions |
|
# during execution |
|
thr1.wait() |
|
thr2.wait() |
|
|
|
|
|
class FakeInvalidVolumeDriver(object): |
|
def __init__(self, *args, **kwargs): |
|
raise brick_exception.InvalidConnectorProtocol('oops!') |
|
|
|
|
|
class FakeConfigGuestDisk(object): |
|
def __init__(self, *args, **kwargs): |
|
self.source_type = None |
|
self.driver_cache = None |
|
|
|
|
|
class FakeConfigGuest(object): |
|
def __init__(self, *args, **kwargs): |
|
self.driver_cache = None |
|
|
|
|
|
class FakeNodeDevice(object): |
|
def __init__(self, fakexml): |
|
self.xml = fakexml |
|
|
|
def XMLDesc(self, flags): |
|
return self.xml |
|
|
|
|
|
def _create_test_instance(): |
|
flavor = objects.Flavor(memory_mb=2048, |
|
swap=0, |
|
vcpu_weight=None, |
|
root_gb=10, |
|
id=2, |
|
name=u'm1.small', |
|
ephemeral_gb=20, |
|
rxtx_factor=1.0, |
|
flavorid=u'1', |
|
vcpus=2, |
|
extra_specs={}) |
|
return { |
|
'id': 1, |
|
'uuid': uuids.instance, |
|
'memory_kb': '1024000', |
|
'basepath': '/some/path', |
|
'bridge_name': 'br100', |
|
'display_name': "Acme webserver", |
|
'vcpus': 2, |
|
'project_id': 'fake', |
|
'bridge': 'br101', |
|
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6', |
|
'root_gb': 10, |
|
'ephemeral_gb': 20, |
|
'instance_type_id': '5', # m1.small |
|
'extra_specs': {}, |
|
'system_metadata': { |
|
'image_disk_format': 'raw' |
|
}, |
|
'flavor': flavor, |
|
'new_flavor': None, |
|
'old_flavor': None, |
|
'pci_devices': objects.PciDeviceList(), |
|
'numa_topology': None, |
|
'config_drive': None, |
|
'vm_mode': None, |
|
'kernel_id': None, |
|
'ramdisk_id': None, |
|
'os_type': 'linux', |
|
'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb', |
|
'ephemeral_key_uuid': None, |
|
'vcpu_model': None, |
|
'host': 'fake-host', |
|
'task_state': None, |
|
'vm_state': None, |
|
'trusted_certs': None |
|
} |
|
|
|
|
|
@ddt.ddt |
|
class LibvirtConnTestCase(test.NoDBTestCase, |
|
test_diagnostics.DiagnosticsComparisonMixin): |
|
|
|
REQUIRES_LOCKING = True |
|
|
|
_EPHEMERAL_20_DEFAULT = eph_name(20) |
|
|
|
def setUp(self): |
|
super(LibvirtConnTestCase, self).setUp() |
|
self.user_id = 'fake' |
|
self.project_id = 'fake' |
|
self.context = context.get_admin_context() |
|
temp_dir = self.useFixture(fixtures.TempDir()).path |
|
self.flags(instances_path=temp_dir, |
|
firewall_driver=None) |
|
self.flags(snapshots_directory=temp_dir, group='libvirt') |
|
self.useFixture(fixtures.MonkeyPatch( |
|
'nova.virt.libvirt.driver.libvirt_utils', |
|
fake_libvirt_utils)) |
|
|
|
self.flags(sysinfo_serial="hardware", group="libvirt") |
|
|
|
# normally loaded during nova-compute startup |
|
os_vif.initialize() |
|
|
|
self.useFixture(fixtures.MonkeyPatch( |
|
'nova.virt.libvirt.imagebackend.libvirt_utils', |
|
fake_libvirt_utils)) |
|
|
|
self.stub_out('nova.virt.disk.api.extend', |
|
lambda image, size, use_cow=False: None) |
|
|
|
self.stub_out('nova.virt.libvirt.imagebackend.Image.' |
|
'resolve_driver_format', |
|
imagebackend.Image._get_driver_format) |
|
|
|
self.stub_out('nova.compute.utils.get_machine_ips', lambda: []) |
|
|
|
self.useFixture(fakelibvirt.FakeLibvirtFixture()) |
|
self.test_instance = _create_test_instance() |
|
self.test_image_meta = { |
|
"disk_format": "raw", |
|
} |
|
self.image_service = nova.tests.unit.image.fake.stub_out_image_service( |
|
self) |
|
self.device_xml_tmpl = """ |
|
<domain type='kvm'> |
|
<devices> |
|
<disk type='block' device='disk'> |
|
<driver name='qemu' type='raw' cache='none'/> |
|
<source dev='{device_path}'/> |
|
<target bus='virtio' dev='vdb'/> |
|
<serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial> |
|
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \ |
|
function='0x0'/> |
|
</disk> |
|
</devices> |
|
</domain> |
|
""" |
|
|
|
def relpath(self, path): |
|
return os.path.relpath(path, CONF.instances_path) |
|
|
|
def tearDown(self): |
|
nova.tests.unit.image.fake.FakeImageService_reset() |
|
super(LibvirtConnTestCase, self).tearDown() |
|
|
|
def test_driver_capabilities(self): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) |
|
self.assertTrue(drvr.capabilities['has_imagecache'], |
|
'Driver capabilities for \'has_imagecache\' ' |
|
'is invalid') |
|
self.assertTrue(drvr.capabilities['supports_evacuate'], |
|
'Driver capabilities for \'supports_evacuate\' ' |
|
'is invalid') |
|
self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'], |
|
'Driver capabilities for ' |
|
'\'supports_migrate_to_same_host\' is invalid') |
|
self.assertTrue(drvr.capabilities['supports_attach_interface'], |
|
'Driver capabilities for ' |
|
'\'supports_attach_interface\' ' |
|
'is invalid') |
|
self.assertTrue(drvr.capabilities['supports_extend_volume'], |
|
'Driver capabilities for ' |
|
'\'supports_extend_volume\' ' |
|
'is invalid') |
|
self.assertFalse(drvr.requires_allocation_refresh, |
|
'Driver does not need allocation refresh') |
|
self.assertTrue(drvr.capabilities['supports_trusted_certs'], |
|
'Driver capabilities for ' |
|
'\'supports_trusted_certs\' ' |
|
'is invalid') |
|
|
|
def create_fake_libvirt_mock(self, **kwargs): |
|
"""Defining mocks for LibvirtDriver(libvirt is not used).""" |
|
|
|
# A fake libvirt.virConnect |
|
class FakeLibvirtDriver(object): |
|
def defineXML(self, xml): |
|
return FakeVirtDomain() |
|
|
|
# Creating mocks |
|
fake = FakeLibvirtDriver() |
|
# Customizing above fake if necessary |
|
for key, val in kwargs.items(): |
|
fake.__setattr__(key, val) |
|
|
|
self.stub_out('nova.virt.libvirt.driver.LibvirtDriver._conn', fake) |
|
self.stub_out('nova.virt.libvirt.host.Host.get_connection', |
|
lambda x: fake) |
|
|
|
def fake_lookup(self, instance_name): |
|
return FakeVirtDomain() |
|
|
|
def fake_execute(self, *args, **kwargs): |
|
open(args[-1], "a").close() |
|
|
|
def _create_service(self, **kwargs): |
|
service_ref = {'host': kwargs.get('host', 'dummy'), |
|
'disabled': kwargs.get('disabled', False), |
|
'binary': 'nova-compute', |
|
'topic': 'compute', |
|
'report_count': 0} |
|
|
|
return objects.Service(**service_ref) |
|
|
|
def _get_pause_flag(self, drvr, network_info, power_on=True, |
|
vifs_already_plugged=False): |
|
timeout = CONF.vif_plugging_timeout |
|
|
|
events = [] |
|
if (drvr._conn_supports_start_paused and |
|
utils.is_neutron() and |
|
not vifs_already_plugged and |
|
power_on and timeout): |
|
events = drvr._get_neutron_events(network_info) |
|
|
|
return bool(events) |
|
|
|
def test_public_api_signatures(self): |
|
baseinst = driver.ComputeDriver(None) |
|
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertPublicAPISignatures(baseinst, inst) |
|
|
|
def test_legacy_block_device_info(self): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertFalse(drvr.need_legacy_block_device_info) |
|
|
|
@mock.patch.object(host.Host, "has_min_version") |
|
def test_min_version_start_ok(self, mock_version): |
|
mock_version.return_value = True |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("dummyhost") |
|
|
|
@mock.patch.object(host.Host, "has_min_version") |
|
def test_min_version_start_abort(self, mock_version): |
|
mock_version.return_value = False |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertRaises(exception.NovaException, |
|
drvr.init_host, |
|
"dummyhost") |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1) |
|
@mock.patch.object(libvirt_driver.LOG, 'warning') |
|
def test_next_min_version_deprecation_warning(self, mock_warning, |
|
mock_get_libversion): |
|
# Skip test if there's no currently planned new min version |
|
if (versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_VERSION)): |
|
self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") |
|
|
|
# Test that a warning is logged if the libvirt version is less than |
|
# the next required minimum version. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("dummyhost") |
|
# assert that the next min version is in a warning message |
|
expected_arg = {'version': versionutils.convert_version_to_str( |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} |
|
version_arg_found = False |
|
for call in mock_warning.call_args_list: |
|
if call[0][1] == expected_arg: |
|
version_arg_found = True |
|
break |
|
self.assertTrue(version_arg_found) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_QEMU_VERSION) - 1) |
|
@mock.patch.object(libvirt_driver.LOG, 'warning') |
|
def test_next_min_qemu_version_deprecation_warning(self, mock_warning, |
|
mock_get_libversion): |
|
# Skip test if there's no currently planned new min version |
|
if (versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_QEMU_VERSION) == |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_VERSION)): |
|
self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION") |
|
|
|
# Test that a warning is logged if the libvirt version is less than |
|
# the next required minimum version. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("dummyhost") |
|
# assert that the next min version is in a warning message |
|
expected_arg = {'version': versionutils.convert_version_to_str( |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_QEMU_VERSION))} |
|
version_arg_found = False |
|
for call in mock_warning.call_args_list: |
|
if call[0][1] == expected_arg: |
|
version_arg_found = True |
|
break |
|
self.assertTrue(version_arg_found) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION)) |
|
@mock.patch.object(libvirt_driver.LOG, 'warning') |
|
def test_next_min_version_ok(self, mock_warning, mock_get_libversion): |
|
# Skip test if there's no currently planned new min version |
|
|
|
if (versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_VERSION)): |
|
self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") |
|
|
|
# Test that a warning is not logged if the libvirt version is greater |
|
# than or equal to NEXT_MIN_LIBVIRT_VERSION. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("dummyhost") |
|
# assert that the next min version is in a warning message |
|
expected_arg = {'version': versionutils.convert_version_to_str( |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} |
|
version_arg_found = False |
|
for call in mock_warning.call_args_list: |
|
if call[0][1] == expected_arg: |
|
version_arg_found = True |
|
break |
|
self.assertFalse(version_arg_found) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_QEMU_VERSION)) |
|
@mock.patch.object(libvirt_driver.LOG, 'warning') |
|
def test_next_min_qemu_version_ok(self, mock_warning, mock_get_libversion): |
|
# Skip test if there's no currently planned new min version |
|
|
|
if (versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_QEMU_VERSION) == |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_VERSION)): |
|
self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION") |
|
|
|
# Test that a warning is not logged if the libvirt version is greater |
|
# than or equal to NEXT_MIN_QEMU_VERSION. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("dummyhost") |
|
# assert that the next min version is in a warning message |
|
expected_arg = {'version': versionutils.convert_version_to_str( |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.NEXT_MIN_QEMU_VERSION))} |
|
version_arg_found = False |
|
for call in mock_warning.call_args_list: |
|
if call[0][1] == expected_arg: |
|
version_arg_found = True |
|
break |
|
self.assertFalse(version_arg_found) |
|
|
|
# NOTE(sdague): python2.7 and python3.5 have different behaviors |
|
# when it comes to comparing against the sentinel, so |
|
# has_min_version is needed to pass python3.5. |
|
@mock.patch.object(nova.virt.libvirt.host.Host, "has_min_version", |
|
return_value=True) |
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion', |
|
return_value=mock.sentinel.qemu_version) |
|
def test_qemu_image_version(self, mock_get_libversion, min_ver): |
|
"""Test that init_host sets qemu image version |
|
|
|
A sentinel is used here so that we aren't chasing this value |
|
against minimums that get raised over time. |
|
""" |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("dummyhost") |
|
self.assertEqual(images.QEMU_VERSION, mock.sentinel.qemu_version) |
|
|
|
@mock.patch.object(fields.Architecture, "from_host", |
|
return_value=fields.Architecture.PPC64) |
|
def test_min_version_ppc_ok(self, mock_arch): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("dummyhost") |
|
|
|
@mock.patch.object(fields.Architecture, "from_host", |
|
return_value=fields.Architecture.S390X) |
|
def test_min_version_s390_ok(self, mock_arch): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("dummyhost") |
|
|
|
def test_file_backed_memory_support_called(self): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
with mock.patch.object(drvr, |
|
'_check_file_backed_memory_support') as mock_check_fb_support: |
|
drvr.init_host("dummyhost") |
|
self.assertTrue(mock_check_fb_support.called) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION)) |
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION)) |
|
def test_min_version_file_backed_ok(self, mock_libv, mock_qemu): |
|
self.flags(file_backed_memory=1024, group='libvirt') |
|
self.flags(ram_allocation_ratio=1.0) |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr._check_file_backed_memory_support() |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION) - 1) |
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION)) |
|
def test_min_version_file_backed_old_libvirt(self, mock_libv, mock_qemu): |
|
self.flags(file_backed_memory=1024, group="libvirt") |
|
self.flags(ram_allocation_ratio=1.0) |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertRaises(exception.InternalError, |
|
drvr._check_file_backed_memory_support) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION)) |
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION) - 1) |
|
def test_min_version_file_backed_old_qemu(self, mock_libv, mock_qemu): |
|
self.flags(file_backed_memory=1024, group="libvirt") |
|
self.flags(ram_allocation_ratio=1.0) |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertRaises(exception.InternalError, |
|
drvr._check_file_backed_memory_support) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_VERSION)) |
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion', |
|
return_value=versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_FILE_BACKED_VERSION)) |
|
def test_min_version_file_backed_bad_ram_allocation_ratio(self, mock_libv, |
|
mock_qemu): |
|
self.flags(file_backed_memory=1024, group="libvirt") |
|
self.flags(ram_allocation_ratio=1.5) |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertRaises(exception.InternalError, |
|
drvr._check_file_backed_memory_support) |
|
|
|
def _do_test_parse_migration_flags(self, lm_expected=None, |
|
bm_expected=None): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr._parse_migration_flags() |
|
|
|
if lm_expected is not None: |
|
self.assertEqual(lm_expected, drvr._live_migration_flags) |
|
if bm_expected is not None: |
|
self.assertEqual(bm_expected, drvr._block_migration_flags) |
|
|
|
def test_parse_live_migration_flags_default(self): |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE)) |
|
|
|
def test_parse_live_migration_flags(self): |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE)) |
|
|
|
def test_parse_block_migration_flags_default(self): |
|
self._do_test_parse_migration_flags( |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) |
|
|
|
def test_parse_block_migration_flags(self): |
|
self._do_test_parse_migration_flags( |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) |
|
|
|
def test_parse_migration_flags_p2p_xen(self): |
|
self.flags(virt_type='xen', group='libvirt') |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) |
|
|
|
def test_live_migration_tunnelled_true(self): |
|
self.flags(live_migration_tunnelled=True, group='libvirt') |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | |
|
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) |
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True) |
|
def test_live_migration_permit_postcopy_true(self, host): |
|
self.flags(live_migration_permit_post_copy=True, group='libvirt') |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | |
|
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY)) |
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True) |
|
def test_live_migration_permit_auto_converge_true(self, host): |
|
self.flags(live_migration_permit_auto_converge=True, group='libvirt') |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | |
|
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE)) |
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=True) |
|
def test_live_migration_permit_auto_converge_and_post_copy_true(self, |
|
host): |
|
self.flags(live_migration_permit_auto_converge=True, group='libvirt') |
|
self.flags(live_migration_permit_post_copy=True, group='libvirt') |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | |
|
libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY)) |
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=False) |
|
def test_live_migration_auto_converge_and_post_copy_true_old_libvirt( |
|
self, min_ver): |
|
self.flags(live_migration_permit_auto_converge=True, group='libvirt') |
|
self.flags(live_migration_permit_post_copy=True, group='libvirt') |
|
|
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | |
|
libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE)) |
|
|
|
min_ver.assert_called_with( |
|
lv_ver=libvirt_driver.MIN_LIBVIRT_POSTCOPY_VERSION) |
|
|
|
@mock.patch.object(host.Host, 'has_min_version', return_value=False) |
|
def test_live_migration_permit_postcopy_true_old_libvirt(self, host): |
|
self.flags(live_migration_permit_post_copy=True, group='libvirt') |
|
|
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) |
|
|
|
def test_live_migration_permit_postcopy_false(self): |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) |
|
|
|
def test_live_migration_permit_autoconverge_false(self): |
|
self._do_test_parse_migration_flags( |
|
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE), |
|
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PERSIST_DEST | |
|
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | |
|
libvirt_driver.libvirt.VIR_MIGRATE_LIVE | |
|
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) |
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata') |
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest') |
|
def test_set_admin_password(self, mock_get_guest, ver, mock_image): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
mock_image.return_value = {"properties": { |
|
"hw_qemu_guest_agent": "yes"}} |
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest) |
|
mock_get_guest.return_value = mock_guest |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.set_admin_password(instance, "123") |
|
|
|
mock_guest.set_user_password.assert_called_once_with("root", "123") |
|
|
|
@mock.patch('nova.objects.Instance.save') |
|
@mock.patch('oslo_serialization.base64.encode_as_text') |
|
@mock.patch('nova.api.metadata.password.convert_password') |
|
@mock.patch('nova.crypto.ssh_encrypt_text') |
|
@mock.patch('nova.utils.get_image_from_system_metadata') |
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest') |
|
def test_set_admin_password_saves_sysmeta(self, mock_get_guest, |
|
ver, mock_image, mock_encrypt, |
|
mock_convert, mock_encode, |
|
mock_save): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
# Password will only be saved in sysmeta if the key_data is present |
|
instance.key_data = 'ssh-rsa ABCFEFG' |
|
mock_image.return_value = {"properties": { |
|
"hw_qemu_guest_agent": "yes"}} |
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest) |
|
mock_get_guest.return_value = mock_guest |
|
mock_convert.return_value = {'password_0': 'converted-password'} |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.set_admin_password(instance, "123") |
|
|
|
mock_guest.set_user_password.assert_called_once_with("root", "123") |
|
mock_encrypt.assert_called_once_with(instance.key_data, '123') |
|
mock_encode.assert_called_once_with(mock_encrypt.return_value) |
|
mock_convert.assert_called_once_with(None, mock_encode.return_value) |
|
self.assertEqual('converted-password', |
|
instance.system_metadata['password_0']) |
|
mock_save.assert_called_once_with() |
|
|
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest') |
|
def test_set_admin_password_parallels(self, mock_get_guest, ver): |
|
self.flags(virt_type='parallels', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest) |
|
mock_get_guest.return_value = mock_guest |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.set_admin_password(instance, "123") |
|
|
|
mock_guest.set_user_password.assert_called_once_with("root", "123") |
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata') |
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest') |
|
def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
instance.os_type = "windows" |
|
mock_image.return_value = {"properties": { |
|
"hw_qemu_guest_agent": "yes"}} |
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest) |
|
mock_get_guest.return_value = mock_guest |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.set_admin_password(instance, "123") |
|
|
|
mock_guest.set_user_password.assert_called_once_with( |
|
"Administrator", "123") |
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata') |
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest') |
|
def test_set_admin_password_image(self, mock_get_guest, ver, mock_image): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
mock_image.return_value = {"properties": { |
|
"hw_qemu_guest_agent": "yes", |
|
"os_admin_user": "foo" |
|
}} |
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest) |
|
mock_get_guest.return_value = mock_guest |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.set_admin_password(instance, "123") |
|
|
|
mock_guest.set_user_password.assert_called_once_with("foo", "123") |
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata') |
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
def test_set_admin_password_bad_hyp(self, mock_svc, mock_image): |
|
self.flags(virt_type='lxc', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
mock_image.return_value = {"properties": { |
|
"hw_qemu_guest_agent": "yes"}} |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertRaises(exception.SetAdminPasswdNotSupported, |
|
drvr.set_admin_password, instance, "123") |
|
|
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
def test_set_admin_password_guest_agent_not_running(self, mock_svc): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertRaises(exception.QemuGuestAgentNotEnabled, |
|
drvr.set_admin_password, instance, "123") |
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata') |
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest') |
|
def test_set_admin_password_error(self, mock_get_guest, ver, mock_image): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
mock_image.return_value = {"properties": { |
|
"hw_qemu_guest_agent": "yes"}} |
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest) |
|
mock_guest.set_user_password.side_effect = ( |
|
fakelibvirt.libvirtError("error")) |
|
mock_get_guest.return_value = mock_guest |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
with mock.patch.object( |
|
drvr, '_save_instance_password_if_sshkey_present') as save_p: |
|
self.assertRaises(exception.NovaException, |
|
drvr.set_admin_password, instance, "123") |
|
save_p.assert_not_called() |
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata') |
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest') |
|
def test_set_admin_password_error_with_unicode( |
|
self, mock_get_guest, ver, mock_image): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
mock_image.return_value = {"properties": { |
|
"hw_qemu_guest_agent": "yes"}} |
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest) |
|
mock_guest.set_user_password.side_effect = ( |
|
fakelibvirt.libvirtError( |
|
b"failed: \xe9\x94\x99\xe8\xaf\xaf\xe3\x80\x82")) |
|
mock_get_guest.return_value = mock_guest |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertRaises(exception.NovaException, |
|
drvr.set_admin_password, instance, "123") |
|
|
|
@mock.patch('nova.utils.get_image_from_system_metadata') |
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
@mock.patch('nova.virt.libvirt.host.Host.get_guest') |
|
def test_set_admin_password_not_implemented( |
|
self, mock_get_guest, ver, mock_image): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
instance = objects.Instance(**self.test_instance) |
|
mock_image.return_value = {"properties": { |
|
"hw_qemu_guest_agent": "yes"}} |
|
mock_guest = mock.Mock(spec=libvirt_guest.Guest) |
|
not_implemented = fakelibvirt.make_libvirtError( |
|
fakelibvirt.libvirtError, |
|
"Guest agent disappeared while executing command", |
|
error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE) |
|
mock_guest.set_user_password.side_effect = not_implemented |
|
mock_get_guest.return_value = mock_guest |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
self.assertRaises(NotImplementedError, |
|
drvr.set_admin_password, instance, "123") |
|
|
|
@mock.patch.object(objects.Service, 'save') |
|
@mock.patch.object(objects.Service, 'get_by_compute_host') |
|
def test_set_host_enabled_with_disable(self, mock_svc, mock_save): |
|
# Tests disabling an enabled host. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
svc = self._create_service(host='fake-mini') |
|
mock_svc.return_value = svc |
|
drvr._set_host_enabled(False) |
|
self.assertTrue(svc.disabled) |
|
mock_save.assert_called_once_with() |
|
|
|
@mock.patch.object(objects.Service, 'save') |
|
@mock.patch.object(objects.Service, 'get_by_compute_host') |
|
def test_set_host_enabled_with_enable(self, mock_svc, mock_save): |
|
# Tests enabling a disabled host. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
svc = self._create_service(disabled=True, host='fake-mini') |
|
mock_svc.return_value = svc |
|
drvr._set_host_enabled(True) |
|
# since disabled_reason is not set and not prefixed with "AUTO:", |
|
# service should not be enabled. |
|
mock_save.assert_not_called() |
|
self.assertTrue(svc.disabled) |
|
|
|
@mock.patch.object(objects.Service, 'save') |
|
@mock.patch.object(objects.Service, 'get_by_compute_host') |
|
def test_set_host_enabled_with_enable_state_enabled(self, mock_svc, |
|
mock_save): |
|
# Tests enabling an enabled host. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
svc = self._create_service(disabled=False, host='fake-mini') |
|
mock_svc.return_value = svc |
|
drvr._set_host_enabled(True) |
|
self.assertFalse(svc.disabled) |
|
mock_save.assert_not_called() |
|
|
|
@mock.patch.object(objects.Service, 'save') |
|
@mock.patch.object(objects.Service, 'get_by_compute_host') |
|
def test_set_host_enabled_with_disable_state_disabled(self, mock_svc, |
|
mock_save): |
|
# Tests disabling a disabled host. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
svc = self._create_service(disabled=True, host='fake-mini') |
|
mock_svc.return_value = svc |
|
drvr._set_host_enabled(False) |
|
mock_save.assert_not_called() |
|
self.assertTrue(svc.disabled) |
|
|
|
def test_set_host_enabled_swallows_exceptions(self): |
|
# Tests that set_host_enabled will swallow exceptions coming from the |
|
# db_api code so they don't break anything calling it, e.g. the |
|
# _get_new_connection method. |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
with mock.patch.object(db, 'service_get_by_compute_host') as db_mock: |
|
# Make db.service_get_by_compute_host raise NovaException; this |
|
# is more robust than just raising ComputeHostNotFound. |
|
db_mock.side_effect = exception.NovaException |
|
drvr._set_host_enabled(False) |
|
|
|
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") |
|
def test_prepare_pci_device(self, mock_lookup): |
|
|
|
pci_devices = [dict(hypervisor_name='xxx')] |
|
|
|
self.flags(virt_type='xen', group='libvirt') |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
conn = drvr._host.get_connection() |
|
|
|
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) |
|
drvr._prepare_pci_devices_for_use(pci_devices) |
|
|
|
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") |
|
@mock.patch.object(fakelibvirt.virNodeDevice, "dettach") |
|
def test_prepare_pci_device_exception(self, mock_detach, mock_lookup): |
|
|
|
pci_devices = [dict(hypervisor_name='xxx', |
|
id='id1', |
|
instance_uuid='uuid')] |
|
|
|
self.flags(virt_type='xen', group='libvirt') |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
conn = drvr._host.get_connection() |
|
|
|
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) |
|
mock_detach.side_effect = fakelibvirt.libvirtError("xxxx") |
|
|
|
self.assertRaises(exception.PciDevicePrepareFailed, |
|
drvr._prepare_pci_devices_for_use, pci_devices) |
|
|
|
@mock.patch.object(host.Host, "has_min_version", return_value=False) |
|
def test_device_metadata(self, mock_version): |
|
xml = """ |
|
<domain> |
|
<name>dummy</name> |
|
<uuid>32dfcb37-5af1-552b-357c-be8c3aa38310</uuid> |
|
<memory>1048576</memory> |
|
<vcpu>1</vcpu> |
|
<os> |
|
<type arch='x86_64' machine='pc-i440fx-2.4'>hvm</type> |
|
</os> |
|
<devices> |
|
<disk type='block' device='disk'> |
|
<driver name='qemu' type='qcow2'/> |
|
<source dev='/dev/mapper/generic'/> |
|
<target dev='sda' bus='scsi'/> |
|
<address type='drive' controller='0' bus='0' target='0' unit='0'/> |
|
</disk> |
|
<disk type='block' device='disk'> |
|
<driver name='qemu' type='qcow2'/> |
|
<source dev='/dev/mapper/generic-1'/> |
|
<target dev='hda' bus='ide'/> |
|
<address type='drive' controller='0' bus='1' target='0' unit='0'/> |
|
</disk> |
|
<disk type='block' device='disk'> |
|
<driver name='qemu' type='qcow2'/> |
|
<source dev='/dev/mapper/generic-2'/> |
|
<target dev='hdb' bus='ide'/> |
|
<address type='drive' controller='0' bus='1' target='1' unit='1'/> |
|
</disk> |
|
<disk type='block' device='disk'> |
|
<driver name='qemu' type='qcow2'/> |
|
<source dev='/dev/mapper/aa1'/> |
|
<target dev='sdb' bus='usb'/> |
|
</disk> |
|
<disk type='block' device='disk'> |
|
<driver name='qemu' type='qcow2'/> |
|
<source dev='/var/lib/libvirt/images/centos'/> |
|
<backingStore/> |
|
<target dev='vda' bus='virtio'/> |
|
<boot order='1'/> |
|
<alias name='virtio-disk0'/> |
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x09' |
|
function='0x0'/> |
|
</disk> |
|
<disk type='file' device='disk'> |
|
<driver name='qemu' type='qcow2' cache='none'/> |
|
<source file='/var/lib/libvirt/images/generic.qcow2'/> |
|
<target dev='vdb' bus='virtio'/> |
|
<address type='virtio-mmio'/> |
|
</disk> |
|
<disk type='file' device='disk'> |
|
<driver name='qemu' type='qcow2'/> |
|
<source file='/var/lib/libvirt/images/test.qcow2'/> |
|
<backingStore/> |
|
<target dev='vdc' bus='virtio'/> |
|
<alias name='virtio-disk1'/> |
|
<address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0000'/> |
|
</disk> |
|
<interface type='network'> |
|
<mac address='52:54:00:f6:35:8f'/> |
|
<source network='default'/> |
|
<model type='virtio'/> |
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' |
|
function='0x0'/> |
|
</interface> |
|
<interface type='network'> |
|
<mac address='51:5a:2c:a4:5e:1b'/> |
|
<source network='default'/> |
|
<model type='virtio'/> |
|
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' |
|
function='0x1'/> |
|
</interface> |
|
<interface type='network'> |
|
<mac address='fa:16:3e:d1:28:e4'/> |
|
<source network='default'/> |
|
<model type='virtio'/> |
|
<address type='virtio-mmio'/> |
|
</interface> |
|
<interface type='network'> |
|
<mac address='52:54:00:14:6f:50'/> |
|
<source network='default' bridge='virbr0'/> |
|
<target dev='vnet0'/> |
|
<model type='virtio'/> |
|
<alias name='net0'/> |
|
<address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0001'/> |
|
</interface> |
|
<hostdev mode="subsystem" type="pci" managed="yes"> |
|
<source> |
|
<address bus="0x06" domain="0x0000" function="0x1" |
|
slot="0x00"/> |
|
</source> |
|
</hostdev> |
|
</devices> |
|
</domain>""" |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) |
|
guest = libvirt_guest.Guest(dom) |
|
|
|
instance_ref = objects.Instance(**self.test_instance) |
|
bdms = block_device_obj.block_device_make_list_from_dicts( |
|
self.context, [ |
|
fake_block_device.FakeDbBlockDeviceDict( |
|
{'id': 1, |
|
'source_type': 'volume', 'destination_type': 'volume', |
|
'device_name': '/dev/sda', 'tag': "db", |
|
'volume_id': uuids.volume_1}), |
|
fake_block_device.FakeDbBlockDeviceDict( |
|
{'id': 2, |
|
'source_type': 'volume', 'destination_type': 'volume', |
|
'device_name': '/dev/hda', 'tag': "nfvfunc1", |
|
'volume_id': uuids.volume_2}), |
|
fake_block_device.FakeDbBlockDeviceDict( |
|
{'id': 3, |
|
'source_type': 'volume', 'destination_type': 'volume', |
|
'device_name': '/dev/sdb', 'tag': "nfvfunc2", |
|
'volume_id': uuids.volume_3}), |
|
fake_block_device.FakeDbBlockDeviceDict( |
|
{'id': 4, |
|
'source_type': 'volume', 'destination_type': 'volume', |
|
'device_name': '/dev/hdb', |
|
'volume_id': uuids.volume_4}), |
|
fake_block_device.FakeDbBlockDeviceDict( |
|
{'id': 5, |
|
'source_type': 'volume', 'destination_type': 'volume', |
|
'device_name': '/dev/vda', 'tag': "nfvfunc3", |
|
'volume_id': uuids.volume_5}), |
|
fake_block_device.FakeDbBlockDeviceDict( |
|
{'id': 6, |
|
'source_type': 'volume', 'destination_type': 'volume', |
|
'device_name': '/dev/vdb', 'tag': "nfvfunc4", |
|
'volume_id': uuids.volume_6}), |
|
fake_block_device.FakeDbBlockDeviceDict( |
|
{'id': 7, |
|
'source_type': 'volume', 'destination_type': 'volume', |
|
'device_name': '/dev/vdc', 'tag': "nfvfunc5", |
|
'volume_id': uuids.volume_7}), |
|
] |
|
) |
|
vif = obj_vif.VirtualInterface(context=self.context) |
|
vif.address = '52:54:00:f6:35:8f' |
|
vif.network_id = 123 |
|
vif.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' |
|
vif.uuid = '12ec4b21-ef22-6c21-534b-ba3e3ab3a311' |
|
vif.tag = 'mytag1' |
|
|
|
vif1 = obj_vif.VirtualInterface(context=self.context) |
|
vif1.address = '51:5a:2c:a4:5e:1b' |
|
vif1.network_id = 123 |
|
vif1.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' |
|
vif1.uuid = 'abec4b21-ef22-6c21-534b-ba3e3ab3a312' |
|
|
|
vif2 = obj_vif.VirtualInterface(context=self.context) |
|
vif2.address = 'fa:16:3e:d1:28:e4' |
|
vif2.network_id = 123 |
|
vif2.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' |
|
vif2.uuid = '645686e4-7086-4eab-8c2f-c41f017a1b16' |
|
vif2.tag = 'mytag2' |
|
|
|
vif3 = obj_vif.VirtualInterface(context=self.context) |
|
vif3.address = '52:54:00:14:6f:50' |
|
vif3.network_id = 123 |
|
vif3.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' |
|
vif3.uuid = '99cc3604-782d-4a32-a27c-bc33ac56ce86' |
|
vif3.tag = 'mytag3' |
|
|
|
vif4 = obj_vif.VirtualInterface(context=self.context) |
|
vif4.address = 'da:d1:f2:91:95:c1' |
|
vif4.tag = 'pf_tag' |
|
|
|
vifs = [vif, vif1, vif2, vif3, vif4] |
|
|
|
network_info = _fake_network_info(self, 4) |
|
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL |
|
network_info[0]['address'] = "51:5a:2c:a4:5e:1b" |
|
network_info[0]['details'] = dict(vlan='2145') |
|
network_info[0]['profile'] = dict(trusted='true') |
|
instance_ref.info_cache = objects.InstanceInfoCache( |
|
network_info=network_info) |
|
|
|
with test.nested( |
|
mock.patch('nova.objects.VirtualInterfaceList' |
|
'.get_by_instance_uuid', return_value=vifs), |
|
mock.patch('nova.objects.BlockDeviceMappingList' |
|
'.get_by_instance_uuid', return_value=bdms), |
|
mock.patch('nova.virt.libvirt.host.Host.get_guest', |
|
return_value=guest), |
|
mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc', |
|
return_value=xml), |
|
mock.patch.object(pci_utils, 'get_mac_by_pci_address', |
|
return_value='da:d1:f2:91:95:c1')): |
|
metadata_obj = drvr._build_device_metadata(self.context, |
|
instance_ref) |
|
metadata = metadata_obj.devices |
|
self.assertEqual(11, len(metadata)) |
|
self.assertIsInstance(metadata[0], |
|
objects.DiskMetadata) |
|
self.assertIsInstance(metadata[0].bus, |
|
objects.SCSIDeviceBus) |
|
self.assertEqual(['db'], metadata[0].tags) |
|
self.assertEqual(uuids.volume_1, metadata[0].serial) |
|
self.assertFalse(metadata[0].bus.obj_attr_is_set('address')) |
|
self.assertEqual(['nfvfunc1'], metadata[1].tags) |
|
self.assertEqual(uuids.volume_2, metadata[1].serial) |
|
self.assertIsInstance(metadata[1], |
|
objects.DiskMetadata) |
|
self.assertIsInstance(metadata[1].bus, |
|
objects.IDEDeviceBus) |
|
self.assertEqual(['nfvfunc1'], metadata[1].tags) |
|
self.assertFalse(metadata[1].bus.obj_attr_is_set('address')) |
|
self.assertIsInstance(metadata[2], |
|
objects.DiskMetadata) |
|
self.assertIsInstance(metadata[2].bus, |
|
objects.USBDeviceBus) |
|
self.assertEqual(['nfvfunc2'], metadata[2].tags) |
|
self.assertEqual(uuids.volume_3, metadata[2].serial) |
|
self.assertFalse(metadata[2].bus.obj_attr_is_set('address')) |
|
self.assertIsInstance(metadata[3], |
|
objects.DiskMetadata) |
|
self.assertIsInstance(metadata[3].bus, |
|
objects.PCIDeviceBus) |
|
self.assertEqual(['nfvfunc3'], metadata[3].tags) |
|
# NOTE(artom) We're not checking volume 4 because it's not tagged |
|
# and only tagged devices appear in the metadata |
|
self.assertEqual(uuids.volume_5, metadata[3].serial) |
|
self.assertEqual('0000:00:09.0', metadata[3].bus.address) |
|
self.assertIsInstance(metadata[4], |
|
objects.DiskMetadata) |
|
self.assertEqual(['nfvfunc4'], metadata[4].tags) |
|
self.assertEqual(uuids.volume_6, metadata[4].serial) |
|
self.assertIsInstance(metadata[5], |
|
objects.DiskMetadata) |
|
self.assertEqual(['nfvfunc5'], metadata[5].tags) |
|
self.assertEqual(uuids.volume_7, metadata[5].serial) |
|
self.assertIsInstance(metadata[6], |
|
objects.NetworkInterfaceMetadata) |
|
self.assertIsInstance(metadata[6].bus, |
|
objects.PCIDeviceBus) |
|
self.assertEqual(['mytag1'], metadata[6].tags) |
|
self.assertEqual('0000:00:03.0', metadata[6].bus.address) |
|
self.assertFalse(metadata[6].vf_trusted) |
|
|
|
# Make sure that interface with vlan is exposed to the metadata |
|
self.assertIsInstance(metadata[7], |
|
objects.NetworkInterfaceMetadata) |
|
self.assertEqual('51:5a:2c:a4:5e:1b', metadata[7].mac) |
|
self.assertEqual(2145, metadata[7].vlan) |
|
self.assertTrue(metadata[7].vf_trusted) |
|
self.assertIsInstance(metadata[8], |
|
objects.NetworkInterfaceMetadata) |
|
self.assertEqual(['mytag2'], metadata[8].tags) |
|
self.assertFalse(metadata[8].vf_trusted) |
|
self.assertIsInstance(metadata[9], |
|
objects.NetworkInterfaceMetadata) |
|
self.assertEqual(['mytag3'], metadata[9].tags) |
|
self.assertFalse(metadata[9].vf_trusted) |
|
self.assertIsInstance(metadata[10], |
|
objects.NetworkInterfaceMetadata) |
|
self.assertEqual(['pf_tag'], metadata[10].tags) |
|
self.assertEqual('da:d1:f2:91:95:c1', metadata[10].mac) |
|
self.assertEqual('0000:06:00.1', metadata[10].bus.address) |
|
|
|
@mock.patch.object(host.Host, 'get_connection') |
|
@mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc') |
|
def test_detach_pci_devices(self, mocked_get_xml_desc, mock_conn): |
|
|
|
fake_domXML1_with_pci = ( |
|
"""<domain> <devices> |
|
<disk type='file' device='disk'> |
|
<driver name='qemu' type='qcow2' cache='none'/> |
|
<source file='xxx'/> |
|
<target dev='vda' bus='virtio'/> |
|
<alias name='virtio-disk0'/> |
|
<address type='pci' domain='0x0000' bus='0x00' |
|
slot='0x04' function='0x0'/> |
|
</disk> |
|
<hostdev mode="subsystem" type="pci" managed="yes"> |
|
<source> |
|
<address function="0x1" slot="0x10" domain="0x0001" |
|
bus="0x04"/> |
|
</source> |
|
</hostdev></devices></domain>""") |
|
|
|
fake_domXML1_without_pci = ( |
|
"""<domain> <devices> |
|
<disk type='file' device='disk'> |
|
<driver name='qemu' type='qcow2' cache='none'/> |
|
<source file='xxx'/> |
|
<target dev='vda' bus='virtio'/> |
|
<alias name='virtio-disk0'/> |
|
<address type='pci' domain='0x0001' bus='0x00' |
|
slot='0x04' function='0x0'/> |
|
</disk></devices></domain>""") |
|
|
|
pci_device_info = {'compute_node_id': 1, |
|
'instance_uuid': 'uuid', |
|
'address': '0001:04:10.1'} |
|
pci_device = objects.PciDevice(**pci_device_info) |
|
pci_devices = [pci_device] |
|
mocked_get_xml_desc.return_value = fake_domXML1_without_pci |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
dom = fakelibvirt.Domain( |
|
drvr._get_connection(), fake_domXML1_with_pci, False) |
|
guest = libvirt_guest.Guest(dom) |
|
drvr._detach_pci_devices(guest, pci_devices) |
|
|
|
@mock.patch.object(host.Host, 'get_connection') |
|
@mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc') |
|
def test_detach_pci_devices_timeout(self, mocked_get_xml_desc, mock_conn): |
|
|
|
fake_domXML1_with_pci = ( |
|
"""<domain> <devices> |
|
<disk type='file' device='disk'> |
|
<driver name='qemu' type='qcow2' cache='none'/> |
|
<source file='xxx'/> |
|
<target dev='vda' bus='virtio'/> |
|
<alias name='virtio-disk0'/> |
|
<address type='pci' domain='0x0000' bus='0x00' |
|
slot='0x04' function='0x0'/> |
|
</disk> |
|
<hostdev mode="subsystem" type="pci" managed="yes"> |
|
<source> |
|
<address function="0x1" slot="0x10" domain="0x0001" |
|
bus="0x04"/> |
|
</source> |
|
</hostdev></devices></domain>""") |
|
|
|
pci_device_info = {'compute_node_id': 1, |
|
'instance_uuid': 'uuid', |
|
'address': '0001:04:10.1'} |
|
pci_device = objects.PciDevice(**pci_device_info) |
|
pci_devices = [pci_device] |
|
mocked_get_xml_desc.return_value = fake_domXML1_with_pci |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
dom = fakelibvirt.Domain( |
|
drvr._get_connection(), fake_domXML1_with_pci, False) |
|
guest = libvirt_guest.Guest(dom) |
|
self.assertRaises(exception.PciDeviceDetachFailed, |
|
drvr._detach_pci_devices, guest, pci_devices) |
|
|
|
@mock.patch.object(connector, 'get_connector_properties') |
|
def test_get_connector(self, fake_get_connector): |
|
initiator = 'fake.initiator.iqn' |
|
ip = 'fakeip' |
|
host = 'fakehost' |
|
wwpns = ['100010604b019419'] |
|
wwnns = ['200010604b019419'] |
|
self.flags(my_ip=ip) |
|
self.flags(host=host) |
|
|
|
expected = { |
|
'ip': ip, |
|
'initiator': initiator, |
|
'host': host, |
|
'wwpns': wwpns, |
|
'wwnns': wwnns |
|
} |
|
volume = { |
|
'id': 'fake' |
|
} |
|
|
|
# TODO(walter-boring) add the fake in os-brick |
|
fake_get_connector.return_value = expected |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
result = drvr.get_volume_connector(volume) |
|
self.assertThat(expected, matchers.DictMatches(result)) |
|
|
|
@mock.patch.object(connector, 'get_connector_properties') |
|
def test_get_connector_storage_ip(self, fake_get_connector): |
|
ip = '100.100.100.100' |
|
storage_ip = '101.101.101.101' |
|
self.flags(my_block_storage_ip=storage_ip, my_ip=ip) |
|
volume = { |
|
'id': 'fake' |
|
} |
|
expected = { |
|
'ip': storage_ip |
|
} |
|
# TODO(walter-boring) add the fake in os-brick |
|
fake_get_connector.return_value = expected |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
result = drvr.get_volume_connector(volume) |
|
self.assertEqual(storage_ip, result['ip']) |
|
|
|
def test_lifecycle_event_registration(self): |
|
calls = [] |
|
|
|
def fake_registerErrorHandler(*args, **kwargs): |
|
calls.append('fake_registerErrorHandler') |
|
|
|
def fake_get_host_capabilities(**args): |
|
cpu = vconfig.LibvirtConfigGuestCPU() |
|
cpu.arch = fields.Architecture.ARMV7 |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = cpu |
|
calls.append('fake_get_host_capabilities') |
|
return caps |
|
|
|
@mock.patch.object(fakelibvirt, 'registerErrorHandler', |
|
side_effect=fake_registerErrorHandler) |
|
@mock.patch.object(host.Host, "get_capabilities", |
|
side_effect=fake_get_host_capabilities) |
|
def test_init_host(get_host_capabilities, register_error_handler): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
drvr.init_host("test_host") |
|
|
|
test_init_host() |
|
# NOTE(dkliban): Will fail if get_host_capabilities is called before |
|
# registerErrorHandler |
|
self.assertEqual(['fake_registerErrorHandler', |
|
'fake_get_host_capabilities'], calls) |
|
|
|
def test_sanitize_log_to_xml(self): |
|
# setup fake data |
|
data = {'auth_password': 'scrubme'} |
|
bdm = [{'connection_info': {'data': data}}] |
|
bdi = {'block_device_mapping': bdm} |
|
|
|
# Tests that the parameters to the _get_guest_xml method |
|
# are sanitized for passwords when logged. |
|
def fake_debug(*args, **kwargs): |
|
if 'auth_password' in args[0]: |
|
self.assertNotIn('scrubme', args[0]) |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
conf = mock.Mock() |
|
with test.nested( |
|
mock.patch.object(libvirt_driver.LOG, 'debug', |
|
side_effect=fake_debug), |
|
mock.patch.object(drvr, '_get_guest_config', return_value=conf) |
|
) as ( |
|
debug_mock, conf_mock |
|
): |
|
drvr._get_guest_xml(self.context, self.test_instance, |
|
network_info={}, disk_info={}, |
|
image_meta={}, block_device_info=bdi) |
|
# we don't care what the log message is, we just want to make sure |
|
# our stub method is called which asserts the password is scrubbed |
|
self.assertTrue(debug_mock.called) |
|
|
|
@mock.patch.object(time, "time") |
|
def test_get_guest_config(self, time_mock): |
|
time_mock.return_value = 1234567.89 |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
|
|
test_instance = copy.deepcopy(self.test_instance) |
|
test_instance["display_name"] = "purple tomatoes" |
|
test_instance['system_metadata']['owner_project_name'] = 'sweetshop' |
|
test_instance['system_metadata']['owner_user_name'] = 'cupcake' |
|
|
|
ctxt = context.RequestContext(project_id=123, |
|
project_name="aubergine", |
|
user_id=456, |
|
user_name="pie") |
|
|
|
flavor = objects.Flavor(name='m1.small', |
|
memory_mb=6, |
|
vcpus=28, |
|
root_gb=496, |
|
ephemeral_gb=8128, |
|
swap=33550336, |
|
extra_specs={}) |
|
instance_ref = objects.Instance(**test_instance) |
|
instance_ref.flavor = flavor |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
cfg = drvr._get_guest_config(instance_ref, |
|
_fake_network_info(self, 1), |
|
image_meta, disk_info, |
|
context=ctxt) |
|
|
|
self.assertEqual(cfg.uuid, instance_ref["uuid"]) |
|
self.assertEqual(2, len(cfg.features)) |
|
self.assertIsInstance(cfg.features[0], |
|
vconfig.LibvirtConfigGuestFeatureACPI) |
|
self.assertIsInstance(cfg.features[1], |
|
vconfig.LibvirtConfigGuestFeatureAPIC) |
|
self.assertEqual(cfg.memory, 6 * units.Ki) |
|
self.assertEqual(cfg.vcpus, 28) |
|
self.assertEqual(cfg.os_type, fields.VMMode.HVM) |
|
self.assertEqual(cfg.os_boot_dev, ["hd"]) |
|
self.assertIsNone(cfg.os_root) |
|
self.assertEqual(len(cfg.devices), 10) |
|
self.assertIsInstance(cfg.devices[0], |
|
vconfig.LibvirtConfigGuestDisk) |
|
self.assertIsInstance(cfg.devices[1], |
|
vconfig.LibvirtConfigGuestDisk) |
|
self.assertIsInstance(cfg.devices[2], |
|
vconfig.LibvirtConfigGuestDisk) |
|
self.assertIsInstance(cfg.devices[3], |
|
vconfig.LibvirtConfigGuestInterface) |
|
self.assertIsInstance(cfg.devices[4], |
|
vconfig.LibvirtConfigGuestSerial) |
|
self.assertIsInstance(cfg.devices[5], |
|
vconfig.LibvirtConfigGuestSerial) |
|
self.assertIsInstance(cfg.devices[6], |
|
vconfig.LibvirtConfigGuestInput) |
|
self.assertIsInstance(cfg.devices[7], |
|
vconfig.LibvirtConfigGuestGraphics) |
|
self.assertIsInstance(cfg.devices[8], |
|
vconfig.LibvirtConfigGuestVideo) |
|
self.assertIsInstance(cfg.devices[9], |
|
vconfig.LibvirtConfigMemoryBalloon) |
|
self.assertEqual(len(cfg.metadata), 1) |
|
self.assertIsInstance(cfg.metadata[0], |
|
vconfig.LibvirtConfigGuestMetaNovaInstance) |
|
self.assertEqual(version.version_string_with_package(), |
|
cfg.metadata[0].package) |
|
self.assertEqual("purple tomatoes", |
|
cfg.metadata[0].name) |
|
self.assertEqual(1234567.89, |
|
cfg.metadata[0].creationTime) |
|
self.assertEqual("image", |
|
cfg.metadata[0].roottype) |
|
self.assertEqual(str(instance_ref["image_ref"]), |
|
cfg.metadata[0].rootid) |
|
|
|
self.assertIsInstance(cfg.metadata[0].owner, |
|
vconfig.LibvirtConfigGuestMetaNovaOwner) |
|
self.assertEqual("838a72b0-0d54-4827-8fd6-fb1227633ceb", |
|
cfg.metadata[0].owner.userid) |
|
self.assertEqual("cupcake", |
|
cfg.metadata[0].owner.username) |
|
self.assertEqual("fake", |
|
cfg.metadata[0].owner.projectid) |
|
self.assertEqual("sweetshop", |
|
cfg.metadata[0].owner.projectname) |
|
|
|
self.assertIsInstance(cfg.metadata[0].flavor, |
|
vconfig.LibvirtConfigGuestMetaNovaFlavor) |
|
self.assertEqual("m1.small", |
|
cfg.metadata[0].flavor.name) |
|
self.assertEqual(6, |
|
cfg.metadata[0].flavor.memory) |
|
self.assertEqual(28, |
|
cfg.metadata[0].flavor.vcpus) |
|
self.assertEqual(496, |
|
cfg.metadata[0].flavor.disk) |
|
self.assertEqual(8128, |
|
cfg.metadata[0].flavor.ephemeral) |
|
self.assertEqual(33550336, |
|
cfg.metadata[0].flavor.swap) |
|
|
|
def test_get_guest_config_q35(self): |
|
self.flags(virt_type="kvm", |
|
group='libvirt') |
|
|
|
TEST_AMOUNT_OF_PCIE_SLOTS = 8 |
|
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS, |
|
group='libvirt') |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict({ |
|
"disk_format": "raw", |
|
"properties": {"hw_machine_type": |
|
"pc-q35-test"}}) |
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
cfg = drvr._get_guest_config(instance_ref, |
|
_fake_network_info(self, 1), |
|
image_meta, disk_info) |
|
|
|
num_ports = 0 |
|
for device in cfg.devices: |
|
try: |
|
if (device.root_name == 'controller' and |
|
device.model == 'pcie-root-port'): |
|
num_ports += 1 |
|
except AttributeError: |
|
pass |
|
|
|
self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports) |
|
|
|
def test_get_guest_config_pcie_i440fx(self): |
|
self.flags(virt_type="kvm", |
|
group='libvirt') |
|
|
|
TEST_AMOUNT_OF_PCIE_SLOTS = 8 |
|
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS, |
|
group='libvirt') |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict({ |
|
"disk_format": "raw", |
|
"properties": {"hw_machine_type": |
|
"pc-i440fx-test"}}) |
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
cfg = drvr._get_guest_config(instance_ref, |
|
_fake_network_info(self, 1), |
|
image_meta, disk_info) |
|
|
|
num_ports = 0 |
|
for device in cfg.devices: |
|
try: |
|
if (device.root_name == 'controller' and |
|
device.model == 'pcie-root-port'): |
|
num_ports += 1 |
|
except AttributeError: |
|
pass |
|
|
|
# i440fx is not pcie machine so there should be no pcie ports |
|
self.assertEqual(0, num_ports) |
|
|
|
def test_get_guest_config_missing_ownership_info(self): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
|
|
test_instance = copy.deepcopy(self.test_instance) |
|
|
|
ctxt = context.RequestContext(project_id=123, |
|
project_name="aubergine", |
|
user_id=456, |
|
user_name="pie") |
|
|
|
flavor = objects.Flavor(name='m1.small', |
|
memory_mb=6, |
|
vcpus=28, |
|
root_gb=496, |
|
ephemeral_gb=8128, |
|
swap=33550336, |
|
extra_specs={}) |
|
instance_ref = objects.Instance(**test_instance) |
|
instance_ref.flavor = flavor |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
|
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
cfg = drvr._get_guest_config(instance_ref, |
|
_fake_network_info(self, 1), |
|
image_meta, disk_info, |
|
context=ctxt) |
|
self.assertEqual("N/A", |
|
cfg.metadata[0].owner.username) |
|
self.assertEqual("N/A", |
|
cfg.metadata[0].owner.projectname) |
|
|
|
def test_get_guest_config_lxc(self): |
|
self.flags(virt_type='lxc', group='libvirt') |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
cfg = drvr._get_guest_config(instance_ref, |
|
_fake_network_info(self, 1), |
|
image_meta, {'mapping': {}}) |
|
self.assertEqual(instance_ref["uuid"], cfg.uuid) |
|
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) |
|
self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) |
|
self.assertEqual(fields.VMMode.EXE, cfg.os_type) |
|
self.assertEqual("/sbin/init", cfg.os_init_path) |
|
self.assertEqual("console=tty0 console=ttyS0 console=hvc0", |
|
cfg.os_cmdline) |
|
self.assertIsNone(cfg.os_root) |
|
self.assertEqual(3, len(cfg.devices)) |
|
self.assertIsInstance(cfg.devices[0], |
|
vconfig.LibvirtConfigGuestFilesys) |
|
self.assertIsInstance(cfg.devices[1], |
|
vconfig.LibvirtConfigGuestInterface) |
|
self.assertIsInstance(cfg.devices[2], |
|
vconfig.LibvirtConfigGuestConsole) |
|
|
|
def test_get_guest_config_lxc_with_id_maps(self): |
|
self.flags(virt_type='lxc', group='libvirt') |
|
self.flags(uid_maps=['0:1000:100'], group='libvirt') |
|
self.flags(gid_maps=['0:1000:100'], group='libvirt') |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
cfg = drvr._get_guest_config(instance_ref, |
|
_fake_network_info(self, 1), |
|
image_meta, {'mapping': {}}) |
|
self.assertEqual(instance_ref["uuid"], cfg.uuid) |
|
self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) |
|
self.assertEqual(instance_ref.vcpus, cfg.vcpus) |
|
self.assertEqual(fields.VMMode.EXE, cfg.os_type) |
|
self.assertEqual("/sbin/init", cfg.os_init_path) |
|
self.assertEqual("console=tty0 console=ttyS0 console=hvc0", |
|
cfg.os_cmdline) |
|
self.assertIsNone(cfg.os_root) |
|
self.assertEqual(3, len(cfg.devices)) |
|
self.assertIsInstance(cfg.devices[0], |
|
vconfig.LibvirtConfigGuestFilesys) |
|
self.assertIsInstance(cfg.devices[1], |
|
vconfig.LibvirtConfigGuestInterface) |
|
self.assertIsInstance(cfg.devices[2], |
|
vconfig.LibvirtConfigGuestConsole) |
|
self.assertEqual(len(cfg.idmaps), 2) |
|
self.assertIsInstance(cfg.idmaps[0], |
|
vconfig.LibvirtConfigGuestUIDMap) |
|
self.assertIsInstance(cfg.idmaps[1], |
|
vconfig.LibvirtConfigGuestGIDMap) |
|
|
|
@mock.patch.object( |
|
host.Host, "is_cpu_control_policy_capable", return_value=True) |
|
def test_get_guest_config_numa_host_instance_fits(self, is_able): |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps)): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
self.assertIsNone(cfg.cpuset) |
|
self.assertEqual(0, len(cfg.cputune.vcpupin)) |
|
self.assertIsNone(cfg.cpu.numa) |
|
|
|
@mock.patch.object( |
|
host.Host, "is_cpu_control_policy_capable", return_value=True) |
|
def test_get_guest_config_numa_host_instance_no_fit(self, is_able): |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', return_value=set([3])), |
|
mock.patch.object(random, 'choice'), |
|
mock.patch.object(drvr, '_has_numa_support', |
|
return_value=False) |
|
) as (get_host_cap_mock, |
|
get_vcpu_pin_set_mock, choice_mock, |
|
_has_numa_support_mock): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
self.assertFalse(choice_mock.called) |
|
self.assertEqual(set([3]), cfg.cpuset) |
|
self.assertEqual(0, len(cfg.cputune.vcpupin)) |
|
self.assertIsNone(cfg.cpu.numa) |
|
|
|
def _test_get_guest_memory_backing_config( |
|
self, host_topology, inst_topology, numatune): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
with mock.patch.object( |
|
drvr, "_get_host_numa_topology", |
|
return_value=host_topology): |
|
return drvr._get_guest_memory_backing_config( |
|
inst_topology, numatune, {}) |
|
|
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
def test_get_guest_memory_backing_config_large_success(self, mock_version): |
|
host_topology = objects.NUMATopology( |
|
cells=[ |
|
objects.NUMACell( |
|
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024, |
|
mempages=[ |
|
objects.NUMAPagesTopology(size_kb=4, total=2000, |
|
used=0), |
|
objects.NUMAPagesTopology(size_kb=2048, total=512, |
|
used=0), |
|
objects.NUMAPagesTopology(size_kb=1048576, total=0, |
|
used=0), |
|
])]) |
|
inst_topology = objects.InstanceNUMATopology(cells=[ |
|
objects.InstanceNUMACell( |
|
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)]) |
|
|
|
numa_tune = vconfig.LibvirtConfigGuestNUMATune() |
|
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] |
|
numa_tune.memnodes[0].cellid = 0 |
|
numa_tune.memnodes[0].nodeset = [3] |
|
|
|
result = self._test_get_guest_memory_backing_config( |
|
host_topology, inst_topology, numa_tune) |
|
self.assertEqual(1, len(result.hugepages)) |
|
self.assertEqual(2048, result.hugepages[0].size_kb) |
|
self.assertEqual([0], result.hugepages[0].nodeset) |
|
|
|
@mock.patch.object(host.Host, |
|
'has_min_version', return_value=True) |
|
def test_get_guest_memory_backing_config_smallest(self, mock_version): |
|
host_topology = objects.NUMATopology( |
|
cells=[ |
|
objects.NUMACell( |
|
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024, |
|
mempages=[ |
|
objects.NUMAPagesTopology(size_kb=4, total=2000, |
|
used=0), |
|
objects.NUMAPagesTopology(size_kb=2048, total=512, |
|
used=0), |
|
objects.NUMAPagesTopology(size_kb=1048576, total=0, |
|
used=0), |
|
])]) |
|
inst_topology = objects.InstanceNUMATopology(cells=[ |
|
objects.InstanceNUMACell( |
|
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)]) |
|
|
|
numa_tune = vconfig.LibvirtConfigGuestNUMATune() |
|
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] |
|
numa_tune.memnodes[0].cellid = 0 |
|
numa_tune.memnodes[0].nodeset = [3] |
|
|
|
result = self._test_get_guest_memory_backing_config( |
|
host_topology, inst_topology, numa_tune) |
|
self.assertIsNone(result) |
|
|
|
def test_get_guest_memory_backing_config_realtime(self): |
|
flavor = {"extra_specs": { |
|
"hw:cpu_realtime": "yes", |
|
"hw:cpu_policy": "dedicated" |
|
}} |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
membacking = drvr._get_guest_memory_backing_config( |
|
None, None, flavor) |
|
self.assertTrue(membacking.locked) |
|
self.assertFalse(membacking.sharedpages) |
|
|
|
def test_get_guest_memory_backing_config_file_backed(self): |
|
self.flags(file_backed_memory=1024, group="libvirt") |
|
|
|
result = self._test_get_guest_memory_backing_config( |
|
None, None, None |
|
) |
|
self.assertTrue(result.sharedaccess) |
|
self.assertTrue(result.filesource) |
|
self.assertTrue(result.allocateimmediate) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion') |
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion') |
|
def test_get_guest_memory_backing_config_file_backed_discard(self, |
|
mock_lib_version, mock_version): |
|
self.flags(file_backed_memory=1024, group='libvirt') |
|
|
|
mock_lib_version.return_value = versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION) |
|
mock_version.return_value = versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION) |
|
|
|
result = self._test_get_guest_memory_backing_config( |
|
None, None, None |
|
) |
|
self.assertTrue(result.discard) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion') |
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion') |
|
def test_get_guest_memory_backing_config_file_backed_discard_libvirt(self, |
|
mock_lib_version, mock_version): |
|
self.flags(file_backed_memory=1024, group='libvirt') |
|
|
|
mock_lib_version.return_value = versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION) - 1 |
|
mock_version.return_value = versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION) |
|
|
|
result = self._test_get_guest_memory_backing_config( |
|
None, None, None |
|
) |
|
self.assertFalse(result.discard) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion') |
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion') |
|
def test_get_guest_memory_backing_config_file_backed_discard_qemu(self, |
|
mock_lib_version, mock_version): |
|
self.flags(file_backed_memory=1024, group='libvirt') |
|
|
|
mock_lib_version.return_value = versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_FILE_BACKED_DISCARD_VERSION) |
|
mock_version.return_value = versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_FILE_BACKED_DISCARD_VERSION) - 1 |
|
|
|
result = self._test_get_guest_memory_backing_config( |
|
None, None, None |
|
) |
|
self.assertFalse(result.discard) |
|
|
|
def test_get_guest_memory_backing_config_file_backed_hugepages(self): |
|
self.flags(file_backed_memory=1024, group="libvirt") |
|
host_topology = objects.NUMATopology( |
|
cells=[ |
|
objects.NUMACell( |
|
id=3, cpuset=set([1]), siblings=[set([1])], memory=1024, |
|
mempages=[ |
|
objects.NUMAPagesTopology(size_kb=4, total=2000, |
|
used=0), |
|
objects.NUMAPagesTopology(size_kb=2048, total=512, |
|
used=0), |
|
objects.NUMAPagesTopology(size_kb=1048576, total=0, |
|
used=0), |
|
])]) |
|
inst_topology = objects.InstanceNUMATopology(cells=[ |
|
objects.InstanceNUMACell( |
|
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)]) |
|
|
|
numa_tune = vconfig.LibvirtConfigGuestNUMATune() |
|
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] |
|
numa_tune.memnodes[0].cellid = 0 |
|
numa_tune.memnodes[0].nodeset = [3] |
|
|
|
self.assertRaises(exception.MemoryPagesUnsupported, |
|
self._test_get_guest_memory_backing_config, |
|
host_topology, inst_topology, numa_tune) |
|
|
|
@mock.patch.object( |
|
host.Host, "is_cpu_control_policy_capable", return_value=True) |
|
def test_get_guest_config_numa_host_instance_pci_no_numa_info( |
|
self, is_able): |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
pci_device_info = dict(test_pci_device.fake_db_dev) |
|
pci_device_info.update(compute_node_id=1, |
|
label='fake', |
|
status=fields.PciDeviceStatus.AVAILABLE, |
|
address='0000:00:00.1', |
|
instance_uuid=None, |
|
request_id=None, |
|
extra_info={}, |
|
numa_node=None) |
|
pci_device = objects.PciDevice(**pci_device_info) |
|
|
|
with test.nested( |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object( |
|
host.Host, "get_capabilities", return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', return_value=set([3])), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(8))), |
|
mock.patch.object(pci_manager, "get_instance_pci_devs", |
|
return_value=[pci_device])): |
|
cfg = conn._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
self.assertEqual(set([3]), cfg.cpuset) |
|
self.assertEqual(0, len(cfg.cputune.vcpupin)) |
|
self.assertIsNone(cfg.cpu.numa) |
|
|
|
@mock.patch.object( |
|
host.Host, "is_cpu_control_policy_capable", return_value=True) |
|
def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able): |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
pci_device_info = dict(test_pci_device.fake_db_dev) |
|
pci_device_info.update(compute_node_id=1, |
|
label='fake', |
|
status=fields.PciDeviceStatus.AVAILABLE, |
|
address='0000:00:00.1', |
|
instance_uuid=None, |
|
request_id=None, |
|
extra_info={}, |
|
numa_node=1) |
|
pci_device = objects.PciDevice(**pci_device_info) |
|
pci_device_info.update(numa_node=0, address='0000:00:00.2') |
|
pci_device2 = objects.PciDevice(**pci_device_info) |
|
with test.nested( |
|
mock.patch.object( |
|
host.Host, "get_capabilities", return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', return_value=set([3])), |
|
mock.patch.object(random, 'choice'), |
|
mock.patch.object(pci_manager, "get_instance_pci_devs", |
|
return_value=[pci_device, pci_device2]), |
|
mock.patch.object(conn, '_has_numa_support', |
|
return_value=False) |
|
) as (get_host_cap_mock, |
|
get_vcpu_pin_set_mock, choice_mock, pci_mock, |
|
_has_numa_support_mock): |
|
cfg = conn._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
self.assertFalse(choice_mock.called) |
|
self.assertEqual(set([3]), cfg.cpuset) |
|
self.assertEqual(0, len(cfg.cputune.vcpupin)) |
|
self.assertIsNone(cfg.cpu.numa) |
|
|
|
@mock.patch.object(fakelibvirt.Connection, 'getType') |
|
@mock.patch.object(fakelibvirt.Connection, 'getVersion') |
|
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion') |
|
@mock.patch.object(host.Host, 'get_capabilities') |
|
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled') |
|
def _test_get_guest_config_numa_unsupported(self, fake_lib_version, |
|
fake_version, fake_type, |
|
fake_arch, exception_class, |
|
pagesize, mock_host, |
|
mock_caps, mock_lib_version, |
|
mock_version, mock_type): |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[objects.InstanceNUMACell( |
|
id=0, cpuset=set([0]), |
|
memory=1024, pagesize=pagesize)]) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fake_arch |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
mock_type.return_value = fake_type |
|
mock_version.return_value = fake_version |
|
mock_lib_version.return_value = fake_lib_version |
|
mock_caps.return_value = caps |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
self.assertRaises(exception_class, |
|
drvr._get_guest_config, |
|
instance_ref, [], |
|
image_meta, disk_info) |
|
|
|
def test_get_guest_config_numa_other_arch_qemu(self): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
|
|
self._test_get_guest_config_numa_unsupported( |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_VERSION), |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_QEMU_VERSION), |
|
host.HV_DRIVER_QEMU, |
|
fields.Architecture.S390, |
|
exception.NUMATopologyUnsupported, |
|
None) |
|
|
|
def test_get_guest_config_numa_xen(self): |
|
self.flags(virt_type='xen', group='libvirt') |
|
self._test_get_guest_config_numa_unsupported( |
|
versionutils.convert_version_to_int( |
|
libvirt_driver.MIN_LIBVIRT_VERSION), |
|
versionutils.convert_version_to_int((4, 5, 0)), |
|
'XEN', |
|
fields.Architecture.X86_64, |
|
exception.NUMATopologyUnsupported, |
|
None) |
|
|
|
@mock.patch.object( |
|
host.Host, "is_cpu_control_policy_capable", return_value=True) |
|
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset( |
|
self, is_able): |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology(kb_mem=4194304) |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', return_value=set([2, 3])), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(8))) |
|
) as (has_min_version_mock, get_host_cap_mock, |
|
get_vcpu_pin_set_mock, get_online_cpus_mock): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
# NOTE(ndipanov): we make sure that pin_set was taken into account |
|
# when choosing viable cells |
|
self.assertEqual(set([2, 3]), cfg.cpuset) |
|
self.assertEqual(0, len(cfg.cputune.vcpupin)) |
|
self.assertIsNone(cfg.cpu.numa) |
|
|
|
@mock.patch.object( |
|
host.Host, "is_cpu_control_policy_capable", return_value=True) |
|
def test_get_guest_config_non_numa_host_instance_topo(self, is_able): |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[objects.InstanceNUMACell( |
|
id=0, cpuset=set([0]), memory=1024), |
|
objects.InstanceNUMACell( |
|
id=1, cpuset=set([2]), memory=1024)]) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = None |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps)): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
self.assertIsNone(cfg.cpuset) |
|
self.assertEqual(0, len(cfg.cputune.vcpupin)) |
|
self.assertIsNone(cfg.numatune) |
|
self.assertIsNotNone(cfg.cpu.numa) |
|
for instance_cell, numa_cfg_cell in zip( |
|
instance_topology.cells, cfg.cpu.numa.cells): |
|
self.assertEqual(instance_cell.id, numa_cfg_cell.id) |
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) |
|
self.assertEqual(instance_cell.memory * units.Ki, |
|
numa_cfg_cell.memory) |
|
|
|
@mock.patch.object( |
|
host.Host, "is_cpu_control_policy_capable", return_value=True) |
|
def test_get_guest_config_numa_host_instance_topo(self, is_able): |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[objects.InstanceNUMACell( |
|
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None), |
|
objects.InstanceNUMACell( |
|
id=2, cpuset=set([2, 3]), memory=1024, |
|
pagesize=None)]) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', |
|
return_value=set([2, 3, 4, 5])), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(8))), |
|
): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
self.assertIsNone(cfg.cpuset) |
|
# Test that the pinning is correct and limited to allowed only |
|
self.assertEqual(0, cfg.cputune.vcpupin[0].id) |
|
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset) |
|
self.assertEqual(1, cfg.cputune.vcpupin[1].id) |
|
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset) |
|
self.assertEqual(2, cfg.cputune.vcpupin[2].id) |
|
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset) |
|
self.assertEqual(3, cfg.cputune.vcpupin[3].id) |
|
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset) |
|
self.assertIsNotNone(cfg.cpu.numa) |
|
|
|
self.assertIsInstance(cfg.cputune.emulatorpin, |
|
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) |
|
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) |
|
|
|
for instance_cell, numa_cfg_cell, index in zip( |
|
instance_topology.cells, |
|
cfg.cpu.numa.cells, |
|
range(len(instance_topology.cells))): |
|
self.assertEqual(index, numa_cfg_cell.id) |
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) |
|
self.assertEqual(instance_cell.memory * units.Ki, |
|
numa_cfg_cell.memory) |
|
|
|
allnodes = [cell.id for cell in instance_topology.cells] |
|
self.assertEqual(allnodes, cfg.numatune.memory.nodeset) |
|
self.assertEqual("strict", cfg.numatune.memory.mode) |
|
|
|
for instance_cell, memnode, index in zip( |
|
instance_topology.cells, |
|
cfg.numatune.memnodes, |
|
range(len(instance_topology.cells))): |
|
self.assertEqual(index, memnode.cellid) |
|
self.assertEqual([instance_cell.id], memnode.nodeset) |
|
self.assertEqual("strict", memnode.mode) |
|
|
|
def test_get_guest_config_numa_host_instance_topo_reordered(self): |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[objects.InstanceNUMACell( |
|
id=3, cpuset=set([0, 1]), memory=1024), |
|
objects.InstanceNUMACell( |
|
id=0, cpuset=set([2, 3]), memory=1024)]) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(8))), |
|
): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
self.assertIsNone(cfg.cpuset) |
|
# Test that the pinning is correct and limited to allowed only |
|
self.assertEqual(0, cfg.cputune.vcpupin[0].id) |
|
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset) |
|
self.assertEqual(1, cfg.cputune.vcpupin[1].id) |
|
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset) |
|
self.assertEqual(2, cfg.cputune.vcpupin[2].id) |
|
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset) |
|
self.assertEqual(3, cfg.cputune.vcpupin[3].id) |
|
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset) |
|
self.assertIsNotNone(cfg.cpu.numa) |
|
|
|
self.assertIsInstance(cfg.cputune.emulatorpin, |
|
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) |
|
self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset) |
|
|
|
for index, (instance_cell, numa_cfg_cell) in enumerate(zip( |
|
instance_topology.cells, |
|
cfg.cpu.numa.cells)): |
|
self.assertEqual(index, numa_cfg_cell.id) |
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) |
|
self.assertEqual(instance_cell.memory * units.Ki, |
|
numa_cfg_cell.memory) |
|
self.assertIsNone(numa_cfg_cell.memAccess) |
|
|
|
allnodes = set([cell.id for cell in instance_topology.cells]) |
|
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) |
|
self.assertEqual("strict", cfg.numatune.memory.mode) |
|
|
|
for index, (instance_cell, memnode) in enumerate(zip( |
|
instance_topology.cells, |
|
cfg.numatune.memnodes)): |
|
self.assertEqual(index, memnode.cellid) |
|
self.assertEqual([instance_cell.id], memnode.nodeset) |
|
self.assertEqual("strict", memnode.mode) |
|
|
|
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self): |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[objects.InstanceNUMACell( |
|
id=1, cpuset=set([0, 1]), memory=1024, |
|
cpu_pinning={0: 24, 1: 25}), |
|
objects.InstanceNUMACell( |
|
id=0, cpuset=set([2, 3]), memory=1024, |
|
cpu_pinning={2: 0, 3: 1})]) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology( |
|
sockets_per_cell=4, cores_per_socket=3, threads_per_core=2) |
|
|
|
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(8))), |
|
): |
|
cfg = conn._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
self.assertIsNone(cfg.cpuset) |
|
# Test that the pinning is correct and limited to allowed only |
|
self.assertEqual(0, cfg.cputune.vcpupin[0].id) |
|
self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset) |
|
self.assertEqual(1, cfg.cputune.vcpupin[1].id) |
|
self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset) |
|
self.assertEqual(2, cfg.cputune.vcpupin[2].id) |
|
self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset) |
|
self.assertEqual(3, cfg.cputune.vcpupin[3].id) |
|
self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset) |
|
self.assertIsNotNone(cfg.cpu.numa) |
|
|
|
# Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset |
|
self.assertIsInstance(cfg.cputune.emulatorpin, |
|
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) |
|
self.assertEqual(set([0, 1, 24, 25]), |
|
cfg.cputune.emulatorpin.cpuset) |
|
|
|
for i, (instance_cell, numa_cfg_cell) in enumerate(zip( |
|
instance_topology.cells, cfg.cpu.numa.cells)): |
|
self.assertEqual(i, numa_cfg_cell.id) |
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) |
|
self.assertEqual(instance_cell.memory * units.Ki, |
|
numa_cfg_cell.memory) |
|
self.assertIsNone(numa_cfg_cell.memAccess) |
|
|
|
allnodes = set([cell.id for cell in instance_topology.cells]) |
|
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) |
|
self.assertEqual("strict", cfg.numatune.memory.mode) |
|
|
|
for i, (instance_cell, memnode) in enumerate(zip( |
|
instance_topology.cells, cfg.numatune.memnodes)): |
|
self.assertEqual(i, memnode.cellid) |
|
self.assertEqual([instance_cell.id], memnode.nodeset) |
|
self.assertEqual("strict", memnode.mode) |
|
|
|
def test_get_guest_config_numa_host_mempages_shared(self): |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[ |
|
objects.InstanceNUMACell( |
|
id=1, cpuset=set([0, 1]), |
|
memory=1024, pagesize=2048), |
|
objects.InstanceNUMACell( |
|
id=2, cpuset=set([2, 3]), |
|
memory=1024, pagesize=2048)]) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
for i, cell in enumerate(caps.host.topology.cells): |
|
cell.mempages = fakelibvirt.create_mempages( |
|
[(4, 1024 * i), (2048, i)]) |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', |
|
return_value=set([2, 3, 4, 5])), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(8))), |
|
): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
|
|
for instance_cell, numa_cfg_cell, index in zip( |
|
instance_topology.cells, |
|
cfg.cpu.numa.cells, |
|
range(len(instance_topology.cells))): |
|
self.assertEqual(index, numa_cfg_cell.id) |
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) |
|
self.assertEqual(instance_cell.memory * units.Ki, |
|
numa_cfg_cell.memory) |
|
self.assertEqual("shared", numa_cfg_cell.memAccess) |
|
|
|
allnodes = [cell.id for cell in instance_topology.cells] |
|
self.assertEqual(allnodes, cfg.numatune.memory.nodeset) |
|
self.assertEqual("strict", cfg.numatune.memory.mode) |
|
|
|
for instance_cell, memnode, index in zip( |
|
instance_topology.cells, |
|
cfg.numatune.memnodes, |
|
range(len(instance_topology.cells))): |
|
self.assertEqual(index, memnode.cellid) |
|
self.assertEqual([instance_cell.id], memnode.nodeset) |
|
self.assertEqual("strict", memnode.mode) |
|
|
|
self.assertEqual(0, len(cfg.cputune.vcpusched)) |
|
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) |
|
|
|
def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self): |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[ |
|
objects.InstanceNUMACell( |
|
id=2, cpuset=set([0, 1]), |
|
memory=1024, pagesize=2048), |
|
objects.InstanceNUMACell( |
|
id=3, cpuset=set([2, 3]), |
|
memory=1024, pagesize=2048)]) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, |
|
ephemeral_gb=8128, swap=33550336, name='fake', |
|
extra_specs={ |
|
"hw:cpu_realtime": "yes", |
|
"hw:cpu_policy": "dedicated", |
|
"hw:cpu_realtime_mask": "^0-1" |
|
}) |
|
instance_ref.flavor = flavor |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
for i, cell in enumerate(caps.host.topology.cells): |
|
cell.mempages = fakelibvirt.create_mempages( |
|
[(4, 1024 * i), (2048, i)]) |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', |
|
return_value=set([4, 5, 6, 7])), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(8))), |
|
): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
|
|
for instance_cell, numa_cfg_cell, index in zip( |
|
instance_topology.cells, |
|
cfg.cpu.numa.cells, |
|
range(len(instance_topology.cells))): |
|
self.assertEqual(index, numa_cfg_cell.id) |
|
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) |
|
self.assertEqual(instance_cell.memory * units.Ki, |
|
numa_cfg_cell.memory) |
|
self.assertEqual("shared", numa_cfg_cell.memAccess) |
|
|
|
allnodes = [cell.id for cell in instance_topology.cells] |
|
self.assertEqual(allnodes, cfg.numatune.memory.nodeset) |
|
self.assertEqual("strict", cfg.numatune.memory.mode) |
|
|
|
for instance_cell, memnode, index in zip( |
|
instance_topology.cells, |
|
cfg.numatune.memnodes, |
|
range(len(instance_topology.cells))): |
|
self.assertEqual(index, memnode.cellid) |
|
self.assertEqual([instance_cell.id], memnode.nodeset) |
|
self.assertEqual("strict", memnode.mode) |
|
|
|
self.assertEqual(1, len(cfg.cputune.vcpusched)) |
|
self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler) |
|
|
|
# Ensure vCPUs 0-1 are pinned on host CPUs 4-5 and 2-3 are |
|
# set on host CPUs 6-7 according the realtime mask ^0-1 |
|
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[0].cpuset) |
|
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[1].cpuset) |
|
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[2].cpuset) |
|
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[3].cpuset) |
|
|
|
# We ensure that emulator threads are pinned on host CPUs |
|
# 4-5 which are "normal" vCPUs |
|
self.assertEqual(set([4, 5]), cfg.cputune.emulatorpin.cpuset) |
|
|
|
# We ensure that the vCPUs RT are 2-3 set to the host CPUs |
|
# which are 6, 7 |
|
self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus) |
|
|
|
def test_get_guest_config_numa_host_instance_isolated_emulthreads(self): |
|
instance_topology = objects.InstanceNUMATopology( |
|
emulator_threads_policy=( |
|
fields.CPUEmulatorThreadsPolicy.ISOLATE), |
|
cells=[ |
|
objects.InstanceNUMACell( |
|
id=0, cpuset=set([0, 1]), |
|
memory=1024, pagesize=2048, |
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED, |
|
cpu_pinning={0: 4, 1: 5}, |
|
cpuset_reserved=set([6])), |
|
objects.InstanceNUMACell( |
|
id=1, cpuset=set([2, 3]), |
|
memory=1024, pagesize=2048, |
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED, |
|
cpu_pinning={2: 7, 3: 8})]) |
|
|
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = "x86_64" |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', |
|
return_value=set([4, 5, 6, 7, 8])), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(10))), |
|
): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
|
|
self.assertEqual(set([6]), cfg.cputune.emulatorpin.cpuset) |
|
self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset) |
|
self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset) |
|
self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset) |
|
self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset) |
|
|
|
def test_get_guest_config_numa_host_instance_shared_emulthreads_err( |
|
self): |
|
self.flags(cpu_shared_set="48-50", group="compute") |
|
instance_topology = objects.InstanceNUMATopology( |
|
emulator_threads_policy=( |
|
fields.CPUEmulatorThreadsPolicy.SHARE), |
|
cells=[ |
|
objects.InstanceNUMACell( |
|
id=0, cpuset=set([0, 1]), |
|
memory=1024, pagesize=2048, |
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED, |
|
cpu_pinning={0: 4, 1: 5}, |
|
cpuset_reserved=set([6])), |
|
objects.InstanceNUMACell( |
|
id=1, cpuset=set([2, 3]), |
|
memory=1024, pagesize=2048, |
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED, |
|
cpu_pinning={2: 7, 3: 8})]) |
|
|
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = "x86_64" |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', |
|
return_value=set([4, 5, 6, 7, 8])), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(range(10))), |
|
): |
|
# pCPUs [48-50] are not online |
|
self.assertRaises(exception.Invalid, drvr._get_guest_config, |
|
instance_ref, [], image_meta, disk_info) |
|
|
|
def test_get_guest_config_numa_host_instance_shared_emulator_threads( |
|
self): |
|
self.flags(cpu_shared_set="48-50", group="compute") |
|
instance_topology = objects.InstanceNUMATopology( |
|
emulator_threads_policy=( |
|
fields.CPUEmulatorThreadsPolicy.SHARE), |
|
cells=[ |
|
objects.InstanceNUMACell( |
|
id=0, cpuset=set([0, 1]), |
|
memory=1024, pagesize=2048, |
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED, |
|
cpu_pinning={0: 4, 1: 5}, |
|
cpuset_reserved=set([6])), |
|
objects.InstanceNUMACell( |
|
id=1, cpuset=set([2, 3]), |
|
memory=1024, pagesize=2048, |
|
cpu_policy=fields.CPUAllocationPolicy.DEDICATED, |
|
cpu_pinning={2: 7, 3: 8})]) |
|
|
|
instance_ref = objects.Instance(**self.test_instance) |
|
instance_ref.numa_topology = instance_topology |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = "x86_64" |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, image_meta) |
|
|
|
with test.nested( |
|
mock.patch.object( |
|
objects.InstanceNUMATopology, "get_by_instance_uuid", |
|
return_value=instance_topology), |
|
mock.patch.object(host.Host, 'has_min_version', |
|
return_value=True), |
|
mock.patch.object(host.Host, "get_capabilities", |
|
return_value=caps), |
|
mock.patch.object( |
|
hardware, 'get_vcpu_pin_set', |
|
return_value=set([4, 5, 6, 7, 8])), |
|
mock.patch.object(host.Host, 'get_online_cpus', |
|
return_value=set(list(range(10)) + |
|
[48, 50])), |
|
): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, disk_info) |
|
|
|
# cpu_shared_set is configured with [48, 49, 50] but only |
|
# [48, 50] are online. |
|
self.assertEqual(set([48, 50]), cfg.cputune.emulatorpin.cpuset) |
|
self.assertEqual(set([4]), cfg.cputune.vcpupin[0].cpuset) |
|
self.assertEqual(set([5]), cfg.cputune.vcpupin[1].cpuset) |
|
self.assertEqual(set([7]), cfg.cputune.vcpupin[2].cpuset) |
|
self.assertEqual(set([8]), cfg.cputune.vcpupin[3].cpuset) |
|
|
|
def test_get_cpu_numa_config_from_instance(self): |
|
topology = objects.InstanceNUMATopology(cells=[ |
|
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), |
|
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), |
|
]) |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
conf = drvr._get_cpu_numa_config_from_instance(topology, True) |
|
|
|
self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA) |
|
self.assertEqual(0, conf.cells[0].id) |
|
self.assertEqual(set([1, 2]), conf.cells[0].cpus) |
|
self.assertEqual(131072, conf.cells[0].memory) |
|
self.assertEqual("shared", conf.cells[0].memAccess) |
|
self.assertEqual(1, conf.cells[1].id) |
|
self.assertEqual(set([3, 4]), conf.cells[1].cpus) |
|
self.assertEqual(131072, conf.cells[1].memory) |
|
self.assertEqual("shared", conf.cells[1].memAccess) |
|
|
|
def test_get_cpu_numa_config_from_instance_none(self): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
conf = drvr._get_cpu_numa_config_from_instance(None, False) |
|
self.assertIsNone(conf) |
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", |
|
return_value=True) |
|
def test_get_memnode_numa_config_from_instance(self, mock_numa): |
|
instance_topology = objects.InstanceNUMATopology(cells=[ |
|
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), |
|
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), |
|
objects.InstanceNUMACell(id=16, cpuset=set([5, 6]), memory=128) |
|
]) |
|
|
|
host_topology = objects.NUMATopology( |
|
cells=[ |
|
objects.NUMACell( |
|
id=0, cpuset=set([1, 2]), siblings=[set([1]), set([2])], |
|
memory=1024, mempages=[]), |
|
objects.NUMACell( |
|
id=1, cpuset=set([3, 4]), siblings=[set([3]), set([4])], |
|
memory=1024, mempages=[]), |
|
objects.NUMACell( |
|
id=16, cpuset=set([5, 6]), siblings=[set([5]), set([6])], |
|
memory=1024, mempages=[])]) |
|
|
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
with test.nested( |
|
mock.patch.object(drvr, "_get_host_numa_topology", |
|
return_value=host_topology)): |
|
guest_numa_config = drvr._get_guest_numa_config(instance_topology, |
|
flavor={}, allowed_cpus=[1, 2, 3, 4, 5, 6], image_meta={}) |
|
self.assertEqual(2, guest_numa_config.numatune.memnodes[2].cellid) |
|
self.assertEqual([16], |
|
guest_numa_config.numatune.memnodes[2].nodeset) |
|
self.assertEqual(set([5, 6]), |
|
guest_numa_config.numaconfig.cells[2].cpus) |
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", |
|
return_value=True) |
|
@mock.patch.object(host.Host, "get_capabilities") |
|
def test_does_not_want_hugepages(self, mock_caps, mock_numa): |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[ |
|
objects.InstanceNUMACell( |
|
id=1, cpuset=set([0, 1]), |
|
memory=1024, pagesize=4), |
|
objects.InstanceNUMACell( |
|
id=2, cpuset=set([2, 3]), |
|
memory=1024, pagesize=4)]) |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = fields.Architecture.X86_64 |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
|
|
mock_caps.return_value = caps |
|
|
|
host_topology = drvr._get_host_numa_topology() |
|
|
|
self.assertFalse(drvr._wants_hugepages(None, None)) |
|
self.assertFalse(drvr._wants_hugepages(host_topology, None)) |
|
self.assertFalse(drvr._wants_hugepages(None, instance_topology)) |
|
self.assertFalse(drvr._wants_hugepages(host_topology, |
|
instance_topology)) |
|
|
|
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", |
|
return_value=True) |
|
@mock.patch.object(host.Host, "get_capabilities") |
|
def test_does_want_hugepages(self, mock_caps, mock_numa): |
|
for arch in [fields.Architecture.I686, |
|
fields.Architecture.X86_64, |
|
fields.Architecture.AARCH64, |
|
fields.Architecture.PPC64LE, |
|
fields.Architecture.PPC64]: |
|
self._test_does_want_hugepages(mock_caps, mock_numa, arch) |
|
|
|
def _test_does_want_hugepages(self, mock_caps, mock_numa, architecture): |
|
self.flags(reserved_huge_pages=[ |
|
{'node': 0, 'size': 2048, 'count': 128}, |
|
{'node': 1, 'size': 2048, 'count': 1}, |
|
{'node': 3, 'size': 2048, 'count': 64}]) |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
instance_topology = objects.InstanceNUMATopology( |
|
cells=[ |
|
objects.InstanceNUMACell( |
|
id=1, cpuset=set([0, 1]), |
|
memory=1024, pagesize=2048), |
|
objects.InstanceNUMACell( |
|
id=2, cpuset=set([2, 3]), |
|
memory=1024, pagesize=2048)]) |
|
|
|
caps = vconfig.LibvirtConfigCaps() |
|
caps.host = vconfig.LibvirtConfigCapsHost() |
|
caps.host.cpu = vconfig.LibvirtConfigCPU() |
|
caps.host.cpu.arch = architecture |
|
caps.host.topology = fakelibvirt.NUMATopology() |
|
for i, cell in enumerate(caps.host.topology.cells): |
|
cell.mempages = fakelibvirt.create_mempages( |
|
[(4, 1024 * i), (2048, i)]) |
|
|
|
mock_caps.return_value = caps |
|
|
|
host_topology = drvr._get_host_numa_topology() |
|
self.assertEqual(128, host_topology.cells[0].mempages[1].reserved) |
|
self.assertEqual(1, host_topology.cells[1].mempages[1].reserved) |
|
self.assertEqual(0, host_topology.cells[2].mempages[1].reserved) |
|
self.assertEqual(64, host_topology.cells[3].mempages[1].reserved) |
|
|
|
self.assertTrue(drvr._wants_hugepages(host_topology, |
|
instance_topology)) |
|
|
|
def test_get_guest_config_clock(self): |
|
self.flags(virt_type='kvm', group='libvirt') |
|
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) |
|
instance_ref = objects.Instance(**self.test_instance) |
|
image_meta = objects.ImageMeta.from_dict(self.test_image_meta) |
|
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, |
|
instance_ref, |
|
image_meta) |
|
hpet_map = { |
|
fields.Architecture.X86_64: True, |
|
fields.Architecture.I686: True, |
|
fields.Architecture.PPC: False, |
|
fields.Architecture.PPC64: False, |
|
fields.Architecture.ARMV7: False, |
|
fields.Architecture.AARCH64: False, |
|
} |
|
|
|
for guestarch, expect_hpet in hpet_map.items(): |
|
with mock.patch.object(libvirt_driver.libvirt_utils, |
|
'get_arch', |
|
return_value=guestarch): |
|
cfg = drvr._get_guest_config(instance_ref, [], |
|
image_meta, |
|
disk_info) |
|
self.assertIsInstance(cfg.clock, |
|