Remove code already migrated to Ironic and IPA

Change-Id: Ic1bef90400f9067ac38feaa88072fc40988e3158
This commit is contained in:
Dmitry Tantsur 2024-09-10 11:26:22 +02:00
parent 1ca3c8cf1a
commit 779d498663
No known key found for this signature in database
GPG Key ID: 315B2AF9FD216C60
9 changed files with 0 additions and 2558 deletions

View File

@ -1,143 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_config import cfg
from ironic_lib.common.i18n import _
from ironic_lib import exception
from ironic_lib import utils
opts = [
cfg.IntOpt('check_device_interval',
default=1,
help='After Ironic has completed creating the partition table, '
'it continues to check for activity on the attached iSCSI '
'device status at this interval prior to copying the image'
' to the node, in seconds'),
cfg.IntOpt('check_device_max_retries',
default=20,
help='The maximum number of times to check that the device is '
'not accessed by another process. If the device is still '
'busy after that, the disk partitioning will be treated as'
' having failed.')
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='disk_partitioner',
title='Options for the disk partitioner')
CONF.register_group(opt_group)
CONF.register_opts(opts, opt_group)
LOG = logging.getLogger(__name__)
class DiskPartitioner(object):
def __init__(self, device, disk_label='msdos', alignment='optimal'):
"""A convenient wrapper around the parted tool.
:param device: The device path.
:param disk_label: The type of the partition table. Valid types are:
"bsd", "dvh", "gpt", "loop", "mac", "msdos",
"pc98", or "sun".
:param alignment: Set alignment for newly created partitions.
Valid types are: none, cylinder, minimal and
optimal.
"""
self._device = device
self._disk_label = disk_label
self._alignment = alignment
self._partitions = []
def _exec(self, *args):
# NOTE(lucasagomes): utils.execute() is already a wrapper on top
# of processutils.execute() which raises specific
# exceptions. It also logs any failure so we don't
# need to log it again here.
utils.execute('parted', '-a', self._alignment, '-s', self._device,
'--', 'unit', 'MiB', *args, use_standard_locale=True,
run_as_root=True)
def add_partition(self, size, part_type='primary', fs_type='',
boot_flag=None, extra_flags=None):
"""Add a partition.
:param size: The size of the partition in MiB.
:param part_type: The type of the partition. Valid values are:
primary, logical, or extended.
:param fs_type: The filesystem type. Valid types are: ext2, fat32,
fat16, HFS, linux-swap, NTFS, reiserfs, ufs.
If blank (''), it will create a Linux native
partition (83).
:param boot_flag: Boot flag that needs to be configured on the
partition. Ignored if None. It can take values
'bios_grub', 'boot'.
:param extra_flags: List of flags to set on the partition. Ignored
if None.
:returns: The partition number.
"""
self._partitions.append({'size': size,
'type': part_type,
'fs_type': fs_type,
'boot_flag': boot_flag,
'extra_flags': extra_flags})
return len(self._partitions)
def get_partitions(self):
"""Get the partitioning layout.
:returns: An iterator with the partition number and the
partition layout.
"""
return enumerate(self._partitions, 1)
def commit(self):
"""Write to the disk."""
LOG.debug("Committing partitions to disk.")
cmd_args = ['mklabel', self._disk_label]
# NOTE(lucasagomes): Lead in with 1MiB to allow room for the
# partition table itself.
start = 1
for num, part in self.get_partitions():
end = start + part['size']
cmd_args.extend(['mkpart', part['type'], part['fs_type'],
str(start), str(end)])
if part['boot_flag']:
cmd_args.extend(['set', str(num), part['boot_flag'], 'on'])
if part['extra_flags']:
for flag in part['extra_flags']:
cmd_args.extend(['set', str(num), flag, 'on'])
start = end
self._exec(*cmd_args)
try:
utils.wait_for_disk_to_become_available(self._device)
except exception.IronicException as e:
raise exception.InstanceDeployFailure(
_('Disk partitioning failed on device %(device)s. '
'Error: %(error)s')
% {'device': self._device, 'error': e})
def list_opts():
"""Entry point for oslo-config-generator."""
return [('disk_partitioner', opts)]

View File

@ -1,735 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import stat
import time
import warnings
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import excutils
from ironic_lib.common.i18n import _
from ironic_lib import disk_partitioner
from ironic_lib import exception
from ironic_lib import qemu_img
from ironic_lib import utils
opts = [
cfg.IntOpt('efi_system_partition_size',
default=550,
help='Size of EFI system partition in MiB when configuring '
'UEFI systems for local boot. A common minimum is ~200 '
'megabytes, however OS driven firmware updates and '
'unikernel usage generally requires more space on the '
'efi partition.'),
cfg.IntOpt('bios_boot_partition_size',
default=1,
help='Size of BIOS Boot partition in MiB when configuring '
'GPT partitioned systems for local boot in BIOS.'),
cfg.StrOpt('dd_block_size',
default='1M',
help='Block size to use when writing to the nodes disk.'),
cfg.IntOpt('partition_detection_attempts',
default=3,
min=1,
help='Maximum attempts to detect a newly created partition.'),
cfg.IntOpt('partprobe_attempts',
default=10,
help='Maximum number of attempts to try to read the '
'partition.'),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='disk_utils')
LOG = logging.getLogger(__name__)
_PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:"
r"([\d\.]+)MiB:([\d\.]+)MiB:(\w*):(.*):(.*);")
_PARTED_TABLE_TYPE_RE = re.compile(r'^.*partition\s+table\s*:\s*(gpt|msdos)',
re.IGNORECASE | re.MULTILINE)
CONFIGDRIVE_LABEL = "config-2"
MAX_CONFIG_DRIVE_SIZE_MB = 64
# Maximum disk size supported by MBR is 2TB (2 * 1024 * 1024 MB)
MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR = 2097152
# Backward compatibility, do not use
qemu_img_info = qemu_img.image_info
convert_image = qemu_img.convert_image
def list_partitions(device):
"""Get partitions information from given device.
:param device: The device path.
:returns: list of dictionaries (one per partition) with keys:
number, start, end, size (in MiB), filesystem, partition_name,
flags, path.
"""
output = utils.execute(
'parted', '-s', '-m', device, 'unit', 'MiB', 'print',
use_standard_locale=True, run_as_root=True)[0]
if isinstance(output, bytes):
output = output.decode("utf-8")
lines = [line for line in output.split('\n') if line.strip()][2:]
# Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot
fields = ('number', 'start', 'end', 'size', 'filesystem', 'partition_name',
'flags')
result = []
for line in lines:
match = _PARTED_PRINT_RE.match(line)
if match is None:
LOG.warning("Partition information from parted for device "
"%(device)s does not match "
"expected format: %(line)s",
dict(device=device, line=line))
continue
# Cast int fields to ints (some are floats and we round them down)
groups = [int(float(x)) if i < 4 else x
for i, x in enumerate(match.groups())]
item = dict(zip(fields, groups))
item['path'] = partition_index_to_path(device, item['number'])
result.append(item)
return result
def count_mbr_partitions(device):
"""Count the number of primary and logical partitions on a MBR
:param device: The device path.
:returns: A tuple with the number of primary partitions and logical
partitions.
:raise: ValueError if the device does not have a valid MBR partition
table.
"""
# -d do not update the kernel table
# -s print a summary of the partition table
output, err = utils.execute('partprobe', '-d', '-s', device,
run_as_root=True, use_standard_locale=True)
if 'msdos' not in output:
raise ValueError('The device %s does not have a valid MBR '
'partition table' % device)
# Sample output: /dev/vdb: msdos partitions 1 2 3 <5 6 7>
# The partitions with number > 4 (and inside <>) are logical partitions
output = output.replace('<', '').replace('>', '')
partitions = [int(s) for s in output.split() if s.isdigit()]
return (sum(i < 5 for i in partitions), sum(i > 4 for i in partitions))
def get_disk_identifier(dev):
"""Get the disk identifier from the disk being exposed by the ramdisk.
This disk identifier is appended to the pxe config which will then be
used by chain.c32 to detect the correct disk to chainload. This is helpful
in deployments to nodes with multiple disks.
http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr:
:param dev: Path for the already populated disk device.
:raises OSError: When the hexdump binary is unavailable.
:returns: The Disk Identifier.
"""
disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4',
'-e', '''\"0x%08x\"''',
dev, run_as_root=True,
attempts=5, delay_on_retry=True)
return disk_identifier[0]
def get_partition_table_type(device):
"""Get partition table type, msdos or gpt.
:param device: the name of the device
:return: dos, gpt or None
"""
out = utils.execute('parted', '--script', device, '--', 'print',
run_as_root=True, use_standard_locale=True)[0]
m = _PARTED_TABLE_TYPE_RE.search(out)
if m:
return m.group(1)
LOG.warning("Unable to get partition table type for device %s", device)
return 'unknown'
def _blkid(device, probe=False, fields=None):
args = []
if probe:
args.append('-p')
if fields:
args += sum((['-s', field] for field in fields), [])
output, err = utils.execute('blkid', device, *args,
use_standard_locale=True, run_as_root=True)
if output.strip():
return output.split(': ', 1)[1]
else:
return ""
def _lsblk(device, deps=True, fields=None):
args = ['--pairs', '--bytes', '--ascii']
if not deps:
args.append('--nodeps')
if fields:
args.extend(['--output', ','.join(fields)])
else:
args.append('--output-all')
output, err = utils.execute('lsblk', device, *args,
use_standard_locale=True, run_as_root=True)
return output.strip()
def get_device_information(device, probe=False, fields=None):
"""Get information about a device using blkid.
Can be applied to all block devices: disks, RAID, partitions.
:param device: Device name.
:param probe: DEPRECATED, do not use.
:param fields: A list of fields to request (all by default).
:return: A dictionary with requested fields as keys.
:raises: ProcessExecutionError
"""
if probe:
output = _blkid(device, probe=True, fields=fields)
else:
output = _lsblk(device, fields=fields, deps=False)
if output:
return next(utils.parse_device_tags(output))
else:
return {}
def find_efi_partition(device):
"""Looks for the EFI partition on a given device.
A boot partition on a GPT disk is assumed to be an EFI partition as well.
:param device: the name of the device
:return: the EFI partition record from `list_partitions` or None
"""
is_gpt = get_partition_table_type(device) == 'gpt'
for part in list_partitions(device):
flags = {x.strip() for x in part['flags'].split(',')}
if 'esp' in flags or ('boot' in flags and is_gpt):
LOG.debug("Found EFI partition %s on device %s", part, device)
return part
else:
LOG.debug("No efi partition found on device %s", device)
def get_uefi_disk_identifier(dev):
"""Get the uuid from the disk being exposed by the ramdisk.
DEPRECATED: use find_efi_partition with get_device_information instead.
:param dev: Path for the already populated disk device.
:raises InstanceDeployFailure: Image is not UEFI bootable.
:returns: The UUID of the partition.
"""
warnings.warn("get_uefi_disk_identifier is deprecated, use "
"find_efi_partition and get_partition_information instead",
DeprecationWarning)
partition_id = None
try:
report, _ = utils.execute('fdisk', '-l', dev, run_as_root=True)
except processutils.ProcessExecutionError as e:
msg = _('Failed to find the partition on the disk %s ') % e
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
for line in report.splitlines():
if line.startswith(dev) and 'EFI System' in line:
vals = line.split()
partition_id = vals[0]
try:
lsblk_output = _lsblk(partition_id, fields=['UUID'])
disk_identifier = lsblk_output.split("=")[1].strip()
disk_identifier = disk_identifier.strip('"')
except processutils.ProcessExecutionError as e:
raise exception.InstanceDeployFailure("Image is not UEFI bootable. "
"Error: %s " % e)
return disk_identifier
_ISCSI_PREFIX = "iqn.2008-10.org.openstack:"
# TODO(dtantsur): deprecate node_uuid here, it's not overly useful (any iSCSI
# device should get the same treatment).
def is_iscsi_device(dev, node_uuid=None):
"""Check whether the device path belongs to an iSCSI device.
If node UUID is provided, checks that the device belongs to this UUID.
"""
if node_uuid:
return (_ISCSI_PREFIX + node_uuid) in dev
else:
return _ISCSI_PREFIX in dev
def is_last_char_digit(dev):
"""check whether device name ends with a digit"""
if len(dev) >= 1:
return dev[-1].isdigit()
return False
def partition_index_to_path(device, index):
"""Guess a partition path based on its device and index.
:param device: Device path.
:param index: Partition index.
"""
# the actual device names in the baremetal are like /dev/sda, /dev/sdb etc.
# While for the iSCSI device, the naming convention has a format which has
# iqn also embedded in it.
# When this function is called by ironic-conductor, the iSCSI device name
# should be appended by "part%d". While on the baremetal, it should name
# the device partitions as /dev/sda1 and not /dev/sda-part1.
if is_iscsi_device(device):
part_template = '%s-part%d'
elif is_last_char_digit(device):
part_template = '%sp%d'
else:
part_template = '%s%d'
return part_template % (device, index)
def make_partitions(dev, root_mb, swap_mb, ephemeral_mb,
configdrive_mb, node_uuid, commit=True,
boot_option="netboot", boot_mode="bios",
disk_label=None, cpu_arch=""):
"""Partition the disk device.
Create partitions for root, swap, ephemeral and configdrive on a
disk device.
:param dev: Path for the device to work on.
:param root_mb: Size of the root partition in mebibytes (MiB).
:param swap_mb: Size of the swap partition in mebibytes (MiB). If 0,
no partition will be created.
:param ephemeral_mb: Size of the ephemeral partition in mebibytes (MiB).
If 0, no partition will be created.
:param configdrive_mb: Size of the configdrive partition in
mebibytes (MiB). If 0, no partition will be created.
:param commit: True/False. Default for this setting is True. If False
partitions will not be written to disk.
:param boot_option: Can be "local" or "netboot". "netboot" by default.
:param boot_mode: Can be "bios" or "uefi". "bios" by default.
:param node_uuid: Node's uuid. Used for logging.
:param disk_label: The disk label to be used when creating the
partition table. Valid values are: "msdos", "gpt" or None; If None
Ironic will figure it out according to the boot_mode parameter.
:param cpu_arch: Architecture of the node the disk device belongs to.
When using the default value of None, no architecture specific
steps will be taken. This default should be used for x86_64. When
set to ppc64*, architecture specific steps are taken for booting a
partition image locally.
:returns: A dictionary containing the partition type as Key and partition
path as Value for the partitions created by this method.
"""
LOG.debug("Starting to partition the disk device: %(dev)s "
"for node %(node)s",
{'dev': dev, 'node': node_uuid})
part_dict = {}
if disk_label is None:
disk_label = 'gpt' if boot_mode == 'uefi' else 'msdos'
dp = disk_partitioner.DiskPartitioner(dev, disk_label=disk_label)
# For uefi localboot, switch partition table to gpt and create the efi
# system partition as the first partition.
if boot_mode == "uefi" and boot_option == "local":
part_num = dp.add_partition(CONF.disk_utils.efi_system_partition_size,
fs_type='fat32',
boot_flag='boot')
part_dict['efi system partition'] = partition_index_to_path(
dev, part_num)
if (boot_mode == "bios" and boot_option == "local" and disk_label == "gpt"
and not cpu_arch.startswith('ppc64')):
part_num = dp.add_partition(CONF.disk_utils.bios_boot_partition_size,
boot_flag='bios_grub')
part_dict['BIOS Boot partition'] = partition_index_to_path(
dev, part_num)
# NOTE(mjturek): With ppc64* nodes, partition images are expected to have
# a PrEP partition at the start of the disk. This is an 8 MiB partition
# with the boot and prep flags set. The bootloader should be installed
# here.
if (cpu_arch.startswith("ppc64") and boot_mode == "bios"
and boot_option == "local"):
LOG.debug("Add PReP boot partition (8 MB) to device: "
"%(dev)s for node %(node)s",
{'dev': dev, 'node': node_uuid})
boot_flag = 'boot' if disk_label == 'msdos' else None
part_num = dp.add_partition(8, part_type='primary',
boot_flag=boot_flag, extra_flags=['prep'])
part_dict['PReP Boot partition'] = partition_index_to_path(
dev, part_num)
if ephemeral_mb:
LOG.debug("Add ephemeral partition (%(size)d MB) to device: %(dev)s "
"for node %(node)s",
{'dev': dev, 'size': ephemeral_mb, 'node': node_uuid})
part_num = dp.add_partition(ephemeral_mb)
part_dict['ephemeral'] = partition_index_to_path(dev, part_num)
if swap_mb:
LOG.debug("Add Swap partition (%(size)d MB) to device: %(dev)s "
"for node %(node)s",
{'dev': dev, 'size': swap_mb, 'node': node_uuid})
part_num = dp.add_partition(swap_mb, fs_type='linux-swap')
part_dict['swap'] = partition_index_to_path(dev, part_num)
if configdrive_mb:
LOG.debug("Add config drive partition (%(size)d MB) to device: "
"%(dev)s for node %(node)s",
{'dev': dev, 'size': configdrive_mb, 'node': node_uuid})
part_num = dp.add_partition(configdrive_mb)
part_dict['configdrive'] = partition_index_to_path(dev, part_num)
# NOTE(lucasagomes): Make the root partition the last partition. This
# enables tools like cloud-init's growroot utility to expand the root
# partition until the end of the disk.
LOG.debug("Add root partition (%(size)d MB) to device: %(dev)s "
"for node %(node)s",
{'dev': dev, 'size': root_mb, 'node': node_uuid})
boot_val = 'boot' if (not cpu_arch.startswith("ppc64")
and boot_mode == "bios"
and boot_option == "local"
and disk_label == "msdos") else None
part_num = dp.add_partition(root_mb, boot_flag=boot_val)
part_dict['root'] = partition_index_to_path(dev, part_num)
if commit:
# write to the disk
dp.commit()
trigger_device_rescan(dev)
return part_dict
def is_block_device(dev):
"""Check whether a device is block or not."""
attempts = CONF.disk_utils.partition_detection_attempts
for attempt in range(attempts):
try:
s = os.stat(dev)
except OSError as e:
LOG.debug("Unable to stat device %(dev)s. Attempt %(attempt)d "
"out of %(total)d. Error: %(err)s",
{"dev": dev, "attempt": attempt + 1,
"total": attempts, "err": e})
time.sleep(1)
else:
return stat.S_ISBLK(s.st_mode)
msg = _("Unable to stat device %(dev)s after attempting to verify "
"%(attempts)d times.") % {'dev': dev, 'attempts': attempts}
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def dd(src, dst, conv_flags=None):
"""Execute dd from src to dst."""
if conv_flags:
extra_args = ['conv=%s' % conv_flags]
else:
extra_args = []
utils.dd(src, dst, 'bs=%s' % CONF.disk_utils.dd_block_size, 'oflag=direct',
*extra_args)
def populate_image(src, dst, conv_flags=None):
data = qemu_img.image_info(src)
if data.file_format == 'raw':
dd(src, dst, conv_flags=conv_flags)
else:
qemu_img.convert_image(src, dst, 'raw', True, sparse_size='0')
def block_uuid(dev):
"""Get UUID of a block device.
Try to fetch the UUID, if that fails, try to fetch the PARTUUID.
"""
info = get_device_information(dev, fields=['UUID', 'PARTUUID'])
if info.get('UUID'):
return info['UUID']
else:
LOG.debug('Falling back to partition UUID as the block device UUID '
'was not found while examining %(device)s',
{'device': dev})
return info.get('PARTUUID', '')
def get_image_mb(image_path, virtual_size=True):
"""Get size of an image in Megabyte."""
mb = 1024 * 1024
if not virtual_size:
image_byte = os.path.getsize(image_path)
else:
data = qemu_img.image_info(image_path)
image_byte = data.virtual_size
# round up size to MB
image_mb = int((image_byte + mb - 1) / mb)
return image_mb
def get_dev_block_size(dev):
"""Get the device size in 512 byte sectors."""
block_sz, cmderr = utils.execute('blockdev', '--getsz', dev,
run_as_root=True)
return int(block_sz)
def get_dev_byte_size(dev):
"""Get the device size in bytes."""
byte_sz, cmderr = utils.execute('blockdev', '--getsize64', dev,
run_as_root=True)
return int(byte_sz)
def get_dev_sector_size(dev):
"""Get the device logical sector size in bytes."""
sect_sz, cmderr = utils.execute('blockdev', '--getss', dev,
run_as_root=True)
return int(sect_sz)
def destroy_disk_metadata(dev, node_uuid):
"""Destroy metadata structures on node's disk.
Ensure that node's disk magic strings are wiped without zeroing the
entire drive. To do this we use the wipefs tool from util-linux.
:param dev: Path for the device to work on.
:param node_uuid: Node's uuid. Used for logging.
"""
# NOTE(NobodyCam): This is needed to work around bug:
# https://bugs.launchpad.net/ironic/+bug/1317647
LOG.debug("Start destroy disk metadata for node %(node)s.",
{'node': node_uuid})
try:
utils.execute('wipefs', '--force', '--all', dev,
run_as_root=True,
use_standard_locale=True)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception() as ctxt:
# NOTE(zhenguo): Check if --force option is supported for wipefs,
# if not, we should try without it.
if '--force' in str(e):
ctxt.reraise = False
utils.execute('wipefs', '--all', dev,
run_as_root=True,
use_standard_locale=True)
# NOTE(TheJulia): sgdisk attempts to load and make sense of the
# partition tables in advance of wiping the partition data.
# This means when a CRC error is found, sgdisk fails before
# erasing partition data.
# This is the same bug as
# https://bugs.launchpad.net/ironic-python-agent/+bug/1737556
sector_size = get_dev_sector_size(dev)
# https://uefi.org/specs/UEFI/2.10/05_GUID_Partition_Table_Format.html If
# the block size is 512, the First Usable LBA must be greater than or equal
# to 34 [...] if the logical block size is 4096, the First Usable LBA must
# be greater than or equal to 6
if sector_size == 512:
gpt_sectors = 33
elif sector_size == 4096:
gpt_sectors = 5
# Overwrite the Primary GPT, catch very small partitions (like EBRs)
dd_bs = 'bs=%s' % sector_size
dd_device = 'of=%s' % dev
dd_count = 'count=%s' % gpt_sectors
dev_size = get_dev_byte_size(dev)
if dev_size < gpt_sectors * sector_size:
dd_count = 'count=%s' % int(dev_size / sector_size)
utils.execute('dd', dd_bs, 'if=/dev/zero', dd_device, dd_count,
'oflag=direct', run_as_root=True, use_standard_locale=True)
# Overwrite the Secondary GPT, do this only if there could be one
if dev_size > gpt_sectors * sector_size:
gpt_backup = int(dev_size / sector_size - gpt_sectors)
dd_seek = 'seek=%i' % gpt_backup
dd_count = 'count=%s' % gpt_sectors
utils.execute('dd', dd_bs, 'if=/dev/zero', dd_device, dd_count,
'oflag=direct', dd_seek, run_as_root=True,
use_standard_locale=True)
# Go ahead and let sgdisk run as well.
utils.execute('sgdisk', '-Z', dev, run_as_root=True,
use_standard_locale=True)
try:
utils.wait_for_disk_to_become_available(dev)
except exception.IronicException as e:
raise exception.InstanceDeployFailure(
_('Destroying metadata failed on device %(device)s. '
'Error: %(error)s')
% {'device': dev, 'error': e})
LOG.info("Disk metadata on %(dev)s successfully destroyed for node "
"%(node)s", {'dev': dev, 'node': node_uuid})
def list_opts():
"""Entry point for oslo-config-generator."""
return [('disk_utils', opts)]
def _fix_gpt_structs(device, node_uuid):
"""Checks backup GPT data structures and moves them to end of the device
:param device: The device path.
:param node_uuid: UUID of the Node. Used for logging.
:raises: InstanceDeployFailure, if any disk partitioning related
commands fail.
"""
try:
output, _err = utils.execute('sgdisk', '-v', device, run_as_root=True)
search_str = "it doesn't reside\nat the end of the disk"
if search_str in output:
utils.execute('sgdisk', '-e', device, run_as_root=True)
except (processutils.UnknownArgumentError,
processutils.ProcessExecutionError, OSError) as e:
msg = (_('Failed to fix GPT data structures on disk %(disk)s '
'for node %(node)s. Error: %(error)s') %
{'disk': device, 'node': node_uuid, 'error': e})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def fix_gpt_partition(device, node_uuid):
"""Fix GPT partition
Fix GPT table information when image is written to a disk which
has a bigger extend (e.g. 30GB image written on a 60Gb physical disk).
:param device: The device path.
:param node_uuid: UUID of the Node.
:raises: InstanceDeployFailure if exception is caught.
"""
try:
disk_is_gpt_partitioned = (get_partition_table_type(device) == 'gpt')
if disk_is_gpt_partitioned:
_fix_gpt_structs(device, node_uuid)
except Exception as e:
msg = (_('Failed to fix GPT partition on disk %(disk)s '
'for node %(node)s. Error: %(error)s') %
{'disk': device, 'node': node_uuid, 'error': e})
LOG.error(msg)
raise exception.InstanceDeployFailure(msg)
def udev_settle():
"""Wait for the udev event queue to settle.
Wait for the udev event queue to settle to make sure all devices
are detected once the machine boots up.
:return: True on success, False otherwise.
"""
LOG.debug('Waiting until udev event queue is empty')
try:
utils.execute('udevadm', 'settle')
except processutils.ProcessExecutionError as e:
LOG.warning('Something went wrong when waiting for udev '
'to settle. Error: %s', e)
return False
else:
return True
def partprobe(device, attempts=None):
"""Probe partitions on the given device.
:param device: The block device containing partitions that is attempting
to be updated.
:param attempts: Number of attempts to run partprobe, the default is read
from the configuration.
:return: True on success, False otherwise.
"""
if attempts is None:
attempts = CONF.disk_utils.partprobe_attempts
try:
utils.execute('partprobe', device, run_as_root=True, attempts=attempts)
except (processutils.UnknownArgumentError,
processutils.ProcessExecutionError, OSError) as e:
LOG.warning("Unable to probe for partitions on device %(device)s, "
"the partitioning table may be broken. Error: %(error)s",
{'device': device, 'error': e})
return False
else:
return True
def trigger_device_rescan(device, attempts=None):
"""Sync and trigger device rescan.
Disk partition performed via parted, when performed on a ramdisk
do not have to honor the fsync mechanism. In essence, fsync is used
on the file representing the block device, which falls to the kernel
filesystem layer to trigger a sync event. On a ramdisk using ramfs,
this is an explicit non-operation.
As a result of this, we need to trigger a system wide sync operation
which will trigger cache to flush to disk, after which partition changes
should be visible upon re-scan.
When ramdisks are not in use, this also helps ensure that data has
been safely flushed across the wire, such as on iscsi connections.
:param device: The block device containing partitions that is attempting
to be updated.
:param attempts: Number of attempts to run partprobe, the default is read
from the configuration.
:return: True on success, False otherwise.
"""
LOG.debug('Explicitly calling sync to force buffer/cache flush')
utils.execute('sync')
# Make sure any additions to the partitioning are reflected in the
# kernel.
udev_settle()
partprobe(device, attempts=attempts)
udev_settle()
try:
# Also verify that the partitioning is correct now.
utils.execute('sgdisk', '-v', device, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.warning('Failed to verify partition tables on device %(dev)s: '
'%(err)s', {'dev': device, 'err': exc})
return False
else:
return True

View File

@ -1,117 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import imageutils
from oslo_utils import units
import tenacity
from ironic_lib.common.i18n import _
from ironic_lib import utils
opts = [
cfg.IntOpt('image_convert_memory_limit',
default=2048,
help='Memory limit for "qemu-img convert" in MiB. Implemented '
'via the address space resource limit.'),
cfg.IntOpt('image_convert_attempts',
default=3,
help='Number of attempts to convert an image.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(opts, group='disk_utils')
# Limit the memory address space to 1 GiB when running qemu-img
QEMU_IMG_LIMITS = None
def _qemu_img_limits():
global QEMU_IMG_LIMITS
if QEMU_IMG_LIMITS is None:
QEMU_IMG_LIMITS = processutils.ProcessLimits(
address_space=CONF.disk_utils.image_convert_memory_limit
* units.Mi)
return QEMU_IMG_LIMITS
def _retry_on_res_temp_unavailable(exc):
if (isinstance(exc, processutils.ProcessExecutionError)
and ('Resource temporarily unavailable' in exc.stderr
or 'Cannot allocate memory' in exc.stderr)):
return True
return False
def image_info(path):
"""Return an object containing the parsed output from qemu-img info."""
if not os.path.exists(path):
raise FileNotFoundError(_("File %s does not exist") % path)
out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path,
'--output=json',
prlimit=_qemu_img_limits())
return imageutils.QemuImgInfo(out, format='json')
@tenacity.retry(
retry=tenacity.retry_if_exception(_retry_on_res_temp_unavailable),
stop=tenacity.stop_after_attempt(CONF.disk_utils.image_convert_attempts),
reraise=True)
def convert_image(source, dest, out_format, run_as_root=False, cache=None,
out_of_order=False, sparse_size=None):
"""Convert image to other format."""
cmd = ['qemu-img', 'convert', '-O', out_format]
if cache is not None:
cmd += ['-t', cache]
if sparse_size is not None:
cmd += ['-S', sparse_size]
if out_of_order:
cmd.append('-W')
cmd += [source, dest]
# NOTE(TheJulia): Statically set the MALLOC_ARENA_MAX to prevent leaking
# and the creation of new malloc arenas which will consume the system
# memory. If limited to 1, qemu-img consumes ~250 MB of RAM, but when
# another thread tries to access a locked section of memory in use with
# another thread, then by default a new malloc arena is created,
# which essentially balloons the memory requirement of the machine.
# Default for qemu-img is 8 * nCPU * ~250MB (based on defaults +
# thread/code/process/library overhead. In other words, 64 GB. Limiting
# this to 3 keeps the memory utilization in happy cases below the overall
# threshold which is in place in case a malicious image is attempted to
# be passed through qemu-img.
env_vars = {'MALLOC_ARENA_MAX': '3'}
try:
utils.execute(*cmd, run_as_root=run_as_root,
prlimit=_qemu_img_limits(),
use_standard_locale=True,
env_variables=env_vars)
except processutils.ProcessExecutionError as e:
if ('Resource temporarily unavailable' in e.stderr
or 'Cannot allocate memory' in e.stderr):
LOG.debug('Failed to convert image, retrying. Error: %s', e)
# Sync disk caches before the next attempt
utils.execute('sync')
raise
def list_opts():
"""Entry point for oslo-config-generator."""
return [('disk_utils', opts)]

View File

@ -1,205 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from ironic_lib import disk_partitioner
from ironic_lib import exception
from ironic_lib.tests import base
from ironic_lib import utils
CONF = disk_partitioner.CONF
class DiskPartitionerTestCase(base.IronicLibTestCase):
def test_add_partition(self):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
dp.add_partition(1024)
dp.add_partition(512, fs_type='linux-swap')
dp.add_partition(2048, boot_flag='boot')
dp.add_partition(2048, boot_flag='bios_grub')
expected = [(1, {'boot_flag': None,
'extra_flags': None,
'fs_type': '',
'type': 'primary',
'size': 1024}),
(2, {'boot_flag': None,
'extra_flags': None,
'fs_type': 'linux-swap',
'type': 'primary',
'size': 512}),
(3, {'boot_flag': 'boot',
'extra_flags': None,
'fs_type': '',
'type': 'primary',
'size': 2048}),
(4, {'boot_flag': 'bios_grub',
'extra_flags': None,
'fs_type': '',
'type': 'primary',
'size': 2048})]
partitions = [(n, p) for n, p in dp.get_partitions()]
self.assertEqual(4, len(partitions))
self.assertEqual(expected, partitions)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_commit(self, mock_utils_exc, mock_disk_partitioner_exec):
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'boot_flag': None,
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'boot_flag': 'boot',
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(3, {'boot_flag': 'bios_grub',
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(4, {'boot_flag': 'boot',
'extra_flags': ['prep', 'fake-flag'],
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = ('', '')
dp.commit()
mock_disk_partitioner_exec.assert_called_once_with(
mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on',
'mkpart', 'fake-type', 'fake-fs-type', '3', '4',
'set', '3', 'bios_grub', 'on',
'mkpart', 'fake-type', 'fake-fs-type', '4', '5',
'set', '4', 'boot', 'on', 'set', '4', 'prep', 'on',
'set', '4', 'fake-flag', 'on')
mock_utils_exc.assert_called_once_with(
'fuser', '/dev/fake', run_as_root=True,
check_exit_code=[0, 1])
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_is_busy_once(self, mock_utils_exc,
mock_disk_partitioner_exec):
CONF.set_override('check_device_interval', 0, group='disk_partitioner')
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'boot_flag': None,
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'boot_flag': 'boot',
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
# Test as if the 'psmisc' version of 'fuser' which has stderr output
fuser_outputs = iter([(" 10000 10001", '/dev/fake:\n'), ('', '')])
with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.side_effect = fuser_outputs
dp.commit()
mock_disk_partitioner_exec.assert_called_once_with(
mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with(
'fuser', '/dev/fake', run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(2, mock_utils_exc.call_count)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_is_always_busy(self, mock_utils_exc,
mock_disk_partitioner_exec):
CONF.set_override('check_device_interval', 0, group='disk_partitioner')
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'boot_flag': None,
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'boot_flag': 'boot',
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
# Test as if the 'busybox' version of 'fuser' which does not have
# stderr output
mock_utils_exc.return_value = ("10000 10001", '')
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
mock_disk_partitioner_exec.assert_called_once_with(
mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with(
'fuser', '/dev/fake', run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(20, mock_utils_exc.call_count)
@mock.patch.object(disk_partitioner.DiskPartitioner, '_exec',
autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_commit_with_device_disconnected(self, mock_utils_exc,
mock_disk_partitioner_exec):
CONF.set_override('check_device_interval', 0, group='disk_partitioner')
dp = disk_partitioner.DiskPartitioner('/dev/fake')
fake_parts = [(1, {'boot_flag': None,
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1}),
(2, {'boot_flag': 'boot',
'extra_flags': None,
'fs_type': 'fake-fs-type',
'type': 'fake-type',
'size': 1})]
with mock.patch.object(dp, 'get_partitions', autospec=True) as mock_gp:
mock_gp.return_value = fake_parts
mock_utils_exc.return_value = ('', "Specified filename /dev/fake"
" does not exist.")
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
mock_disk_partitioner_exec.assert_called_once_with(
mock.ANY, 'mklabel', 'msdos',
'mkpart', 'fake-type', 'fake-fs-type', '1', '2',
'mkpart', 'fake-type', 'fake-fs-type', '2', '3',
'set', '2', 'boot', 'on')
mock_utils_exc.assert_called_with(
'fuser', '/dev/fake', run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(20, mock_utils_exc.call_count)

View File

@ -1,962 +0,0 @@
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import stat
from unittest import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from ironic_lib import disk_utils
from ironic_lib import exception
from ironic_lib import qemu_img
from ironic_lib.tests import base
from ironic_lib import utils
CONF = cfg.CONF
@mock.patch.object(utils, 'execute', autospec=True)
class ListPartitionsTestCase(base.IronicLibTestCase):
def test_correct(self, execute_mock):
output = """
BYT;
/dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:;
1:1.00MiB:501MiB:500MiB:ext4::boot;
2:501MiB:476940MiB:476439MiB:::;
"""
expected = [
{'number': 1, 'start': 1, 'end': 501, 'size': 500,
'filesystem': 'ext4', 'partition_name': '', 'flags': 'boot',
'path': '/dev/fake1'},
{'number': 2, 'start': 501, 'end': 476940, 'size': 476439,
'filesystem': '', 'partition_name': '', 'flags': '',
'path': '/dev/fake2'},
]
execute_mock.return_value = (output, '')
result = disk_utils.list_partitions('/dev/fake')
self.assertEqual(expected, result)
execute_mock.assert_called_once_with(
'parted', '-s', '-m', '/dev/fake', 'unit', 'MiB', 'print',
use_standard_locale=True, run_as_root=True)
@mock.patch.object(disk_utils.LOG, 'warning', autospec=True)
def test_incorrect(self, log_mock, execute_mock):
output = """
BYT;
/dev/sda:500107862016B:scsi:512:4096:msdos:ATA HGST HTS725050A7:;
1:XX1076MiB:---:524MiB:ext4::boot;
"""
execute_mock.return_value = (output, '')
self.assertEqual([], disk_utils.list_partitions('/dev/fake'))
self.assertEqual(1, log_mock.call_count)
def test_correct_gpt_nvme(self, execute_mock):
output = """
BYT;
/dev/vda:40960MiB:virtblk:512:512:gpt:Virtio Block Device:;
2:1.00MiB:2.00MiB:1.00MiB::Bios partition:bios_grub;
1:4.00MiB:5407MiB:5403MiB:ext4:Root partition:;
3:5407MiB:5507MiB:100MiB:fat16:Boot partition:boot, esp;
"""
expected = [
{'end': 2, 'number': 2, 'start': 1, 'flags': 'bios_grub',
'filesystem': '', 'partition_name': 'Bios partition', 'size': 1,
'path': '/dev/fake0p2'},
{'end': 5407, 'number': 1, 'start': 4, 'flags': '',
'filesystem': 'ext4', 'partition_name': 'Root partition',
'size': 5403, 'path': '/dev/fake0p1'},
{'end': 5507, 'number': 3, 'start': 5407,
'flags': 'boot, esp', 'filesystem': 'fat16',
'partition_name': 'Boot partition', 'size': 100,
'path': '/dev/fake0p3'},
]
execute_mock.return_value = (output, '')
result = disk_utils.list_partitions('/dev/fake0')
self.assertEqual(expected, result)
execute_mock.assert_called_once_with(
'parted', '-s', '-m', '/dev/fake0', 'unit', 'MiB', 'print',
use_standard_locale=True, run_as_root=True)
@mock.patch.object(disk_utils.LOG, 'warning', autospec=True)
def test_incorrect_gpt(self, log_mock, execute_mock):
output = """
BYT;
/dev/vda:40960MiB:virtblk:512:512:gpt:Virtio Block Device:;
2:XX1.00MiB:---:1.00MiB::primary:bios_grub;
"""
execute_mock.return_value = (output, '')
self.assertEqual([], disk_utils.list_partitions('/dev/fake'))
self.assertEqual(1, log_mock.call_count)
class GetUEFIDiskIdentifierTestCase(base.IronicLibTestCase):
def setUp(self):
super(GetUEFIDiskIdentifierTestCase, self).setUp()
self.dev = '/dev/fake'
@mock.patch.object(utils, 'execute', autospec=True)
def test_get_uefi_disk_identifier_uefi_bootable_image(self, mock_execute):
mock_execute.return_value = ('', '')
fdisk_output = """
Disk /dev/sda: 931.5 GiB, 1000171331584 bytes, 1953459632 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 262144 bytes / 262144 bytes
Disklabel type: gpt
Disk identifier: 73457A6C-3595-4965-8D83-2EA1BD85F327
Device Start End Sectors Size Type
/dev/fake-part1 2048 1050623 1048576 512M EFI System
/dev/fake-part2 1050624 1920172031 1919121408 915.1G Linux filesystem
/dev/fake-part3 1920172032 1953458175 33286144 15.9G Linux swap
"""
partition_id = '/dev/fake-part1'
lsblk_output = 'UUID="ABCD-B05B"\n'
part_result = 'ABCD-B05B'
mock_execute.side_effect = [(fdisk_output, ''), (lsblk_output, '')]
result = disk_utils.get_uefi_disk_identifier(self.dev)
self.assertEqual(part_result, result)
execute_calls = [
mock.call('fdisk', '-l', self.dev, run_as_root=True),
mock.call('lsblk', partition_id, '--pairs', '--bytes', '--ascii',
'--output', 'UUID', use_standard_locale=True,
run_as_root=True)
]
mock_execute.assert_has_calls(execute_calls)
@mock.patch.object(utils, 'execute', autospec=True)
def test_get_uefi_disk_identifier_non_uefi_bootable_image(self,
mock_execute):
mock_execute.return_value = ('', '')
fdisk_output = """
Disk /dev/vda: 50 GiB, 53687091200 bytes, 104857600 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0xb82b9faf
Device Boot Start End Sectors Size Id Type
/dev/fake-part1 * 2048 104857566 104855519 50G 83 Linux
"""
partition_id = None
mock_execute.side_effect = [(fdisk_output, ''),
processutils.ProcessExecutionError()]
self.assertRaises(exception.InstanceDeployFailure,
disk_utils.get_uefi_disk_identifier, self.dev)
execute_calls = [
mock.call('fdisk', '-l', self.dev, run_as_root=True),
mock.call('lsblk', partition_id, '--pairs', '--bytes', '--ascii',
'--output', 'UUID', use_standard_locale=True,
run_as_root=True)
]
mock_execute.assert_has_calls(execute_calls)
@mock.patch.object(utils, 'execute', autospec=True)
class MakePartitionsTestCase(base.IronicLibTestCase):
def setUp(self):
super(MakePartitionsTestCase, self).setUp()
self.dev = 'fake-dev'
self.root_mb = 1024
self.swap_mb = 512
self.ephemeral_mb = 0
self.configdrive_mb = 0
self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
self.efi_size = CONF.disk_utils.efi_system_partition_size
self.bios_size = CONF.disk_utils.bios_boot_partition_size
def _get_parted_cmd(self, dev, label=None):
if label is None:
label = 'msdos'
return ['parted', '-a', 'optimal', '-s', dev,
'--', 'unit', 'MiB', 'mklabel', label]
def _add_efi_sz(self, x):
return str(x + self.efi_size)
def _add_bios_sz(self, x):
return str(x + self.bios_size)
def _test_make_partitions(self, mock_exc, boot_option, boot_mode='bios',
disk_label=None, cpu_arch=""):
mock_exc.return_value = ('', '')
disk_utils.make_partitions(self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb, self.configdrive_mb,
self.node_uuid, boot_option=boot_option,
boot_mode=boot_mode, disk_label=disk_label,
cpu_arch=cpu_arch)
if boot_option == "local" and boot_mode == "uefi":
expected_mkpart = ['mkpart', 'primary', 'fat32', '1',
self._add_efi_sz(1),
'set', '1', 'boot', 'on',
'mkpart', 'primary', 'linux-swap',
self._add_efi_sz(1), self._add_efi_sz(513),
'mkpart', 'primary', '', self._add_efi_sz(513),
self._add_efi_sz(1537)]
else:
if boot_option == "local":
if disk_label == "gpt":
if cpu_arch.startswith('ppc64'):
expected_mkpart = ['mkpart', 'primary', '', '1', '9',
'set', '1', 'prep', 'on',
'mkpart', 'primary', 'linux-swap',
'9', '521', 'mkpart', 'primary',
'', '521', '1545']
else:
expected_mkpart = ['mkpart', 'primary', '', '1',
self._add_bios_sz(1),
'set', '1', 'bios_grub', 'on',
'mkpart', 'primary', 'linux-swap',
self._add_bios_sz(1),
self._add_bios_sz(513),
'mkpart', 'primary', '',
self._add_bios_sz(513),
self._add_bios_sz(1537)]
elif cpu_arch.startswith('ppc64'):
expected_mkpart = ['mkpart', 'primary', '', '1', '9',
'set', '1', 'boot', 'on',
'set', '1', 'prep', 'on',
'mkpart', 'primary', 'linux-swap',
'9', '521', 'mkpart', 'primary',
'', '521', '1545']
else:
expected_mkpart = ['mkpart', 'primary', 'linux-swap', '1',
'513', 'mkpart', 'primary', '', '513',
'1537', 'set', '2', 'boot', 'on']
else:
expected_mkpart = ['mkpart', 'primary', 'linux-swap', '1',
'513', 'mkpart', 'primary', '', '513',
'1537']
self.dev = 'fake-dev'
parted_cmd = (self._get_parted_cmd(self.dev, disk_label)
+ expected_mkpart)
parted_call = mock.call(*parted_cmd, use_standard_locale=True,
run_as_root=True)
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
sync_calls = [mock.call('sync'),
mock.call('udevadm', 'settle'),
mock.call('partprobe', self.dev, attempts=10,
run_as_root=True),
mock.call('udevadm', 'settle'),
mock.call('sgdisk', '-v', self.dev, run_as_root=True)]
mock_exc.assert_has_calls([parted_call, fuser_call] + sync_calls)
def test_make_partitions(self, mock_exc):
self._test_make_partitions(mock_exc, boot_option="netboot")
def test_make_partitions_local_boot(self, mock_exc):
self._test_make_partitions(mock_exc, boot_option="local")
def test_make_partitions_local_boot_uefi(self, mock_exc):
self._test_make_partitions(mock_exc, boot_option="local",
boot_mode="uefi", disk_label="gpt")
def test_make_partitions_local_boot_gpt_bios(self, mock_exc):
self._test_make_partitions(mock_exc, boot_option="local",
disk_label="gpt")
def test_make_partitions_disk_label_gpt(self, mock_exc):
self._test_make_partitions(mock_exc, boot_option="netboot",
disk_label="gpt")
def test_make_partitions_mbr_with_prep(self, mock_exc):
self._test_make_partitions(mock_exc, boot_option="local",
disk_label="msdos", cpu_arch="ppc64le")
def test_make_partitions_gpt_with_prep(self, mock_exc):
self._test_make_partitions(mock_exc, boot_option="local",
disk_label="gpt", cpu_arch="ppc64le")
def test_make_partitions_with_ephemeral(self, mock_exc):
self.ephemeral_mb = 2048
expected_mkpart = ['mkpart', 'primary', '', '1', '2049',
'mkpart', 'primary', 'linux-swap', '2049', '2561',
'mkpart', 'primary', '', '2561', '3585']
self.dev = 'fake-dev'
cmd = self._get_parted_cmd(self.dev) + expected_mkpart
mock_exc.return_value = ('', '')
disk_utils.make_partitions(self.dev, self.root_mb, self.swap_mb,
self.ephemeral_mb, self.configdrive_mb,
self.node_uuid)
parted_call = mock.call(*cmd, use_standard_locale=True,
run_as_root=True)
mock_exc.assert_has_calls([parted_call])
def test_make_partitions_with_iscsi_device(self, mock_exc):
self.ephemeral_mb = 2048
expected_mkpart = ['mkpart', 'primary', '', '1', '2049',
'mkpart', 'primary', 'linux-swap', '2049', '2561',
'mkpart', 'primary', '', '2561', '3585']
self.dev = '/dev/iqn.2008-10.org.openstack:%s.fake-9' % self.node_uuid
ep = '/dev/iqn.2008-10.org.openstack:%s.fake-9-part1' % self.node_uuid
swap = ('/dev/iqn.2008-10.org.openstack:%s.fake-9-part2'
% self.node_uuid)
root = ('/dev/iqn.2008-10.org.openstack:%s.fake-9-part3'
% self.node_uuid)
expected_result = {'ephemeral': ep,
'swap': swap,
'root': root}
cmd = self._get_parted_cmd(self.dev) + expected_mkpart
mock_exc.return_value = ('', '')
result = disk_utils.make_partitions(
self.dev, self.root_mb, self.swap_mb, self.ephemeral_mb,
self.configdrive_mb, self.node_uuid)
parted_call = mock.call(*cmd, use_standard_locale=True,
run_as_root=True)
mock_exc.assert_has_calls([parted_call])
self.assertEqual(expected_result, result)
def test_make_partitions_with_nvme_device(self, mock_exc):
self.ephemeral_mb = 2048
expected_mkpart = ['mkpart', 'primary', '', '1', '2049',
'mkpart', 'primary', 'linux-swap', '2049', '2561',
'mkpart', 'primary', '', '2561', '3585']
self.dev = '/dev/nvmefake-9'
ep = '/dev/nvmefake-9p1'
swap = '/dev/nvmefake-9p2'
root = '/dev/nvmefake-9p3'
expected_result = {'ephemeral': ep,
'swap': swap,
'root': root}
cmd = self._get_parted_cmd(self.dev) + expected_mkpart
mock_exc.return_value = ('', '')
result = disk_utils.make_partitions(
self.dev, self.root_mb, self.swap_mb, self.ephemeral_mb,
self.configdrive_mb, self.node_uuid)
parted_call = mock.call(*cmd, use_standard_locale=True,
run_as_root=True)
mock_exc.assert_has_calls([parted_call])
self.assertEqual(expected_result, result)
def test_make_partitions_with_local_device(self, mock_exc):
self.ephemeral_mb = 2048
expected_mkpart = ['mkpart', 'primary', '', '1', '2049',
'mkpart', 'primary', 'linux-swap', '2049', '2561',
'mkpart', 'primary', '', '2561', '3585']
self.dev = 'fake-dev'
expected_result = {'ephemeral': 'fake-dev1',
'swap': 'fake-dev2',
'root': 'fake-dev3'}
cmd = self._get_parted_cmd(self.dev) + expected_mkpart
mock_exc.return_value = ('', '')
result = disk_utils.make_partitions(
self.dev, self.root_mb, self.swap_mb, self.ephemeral_mb,
self.configdrive_mb, self.node_uuid)
parted_call = mock.call(*cmd, use_standard_locale=True,
run_as_root=True)
mock_exc.assert_has_calls([parted_call])
self.assertEqual(expected_result, result)
@mock.patch.object(utils, 'execute', autospec=True)
class DestroyMetaDataTestCase(base.IronicLibTestCase):
def setUp(self):
super(DestroyMetaDataTestCase, self).setUp()
self.dev = 'fake-dev'
self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
def test_destroy_disk_metadata_4096(self, mock_exec):
mock_exec.side_effect = iter([
(None, None),
('4096\n', None),
('524288\n', None),
(None, None),
(None, None),
(None, None),
(None, None)])
expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev',
run_as_root=True,
use_standard_locale=True),
mock.call('blockdev', '--getss', 'fake-dev',
run_as_root=True),
mock.call('blockdev', '--getsize64', 'fake-dev',
run_as_root=True),
mock.call('dd', 'bs=4096', 'if=/dev/zero',
'of=fake-dev', 'count=5', 'oflag=direct',
run_as_root=True,
use_standard_locale=True),
mock.call('dd', 'bs=4096', 'if=/dev/zero',
'of=fake-dev', 'count=5', 'oflag=direct',
'seek=123',
run_as_root=True,
use_standard_locale=True),
mock.call('sgdisk', '-Z', 'fake-dev',
run_as_root=True,
use_standard_locale=True),
mock.call('fuser', self.dev, check_exit_code=[0, 1],
run_as_root=True)]
disk_utils.destroy_disk_metadata(self.dev, self.node_uuid)
mock_exec.assert_has_calls(expected_calls)
def test_destroy_disk_metadata(self, mock_exec):
# Note(TheJulia): This list will get-reused, but only the second
# execution returning a string is needed for the test as otherwise
# command output is not used.
mock_exec.side_effect = iter([
(None, None),
('512\n', None),
('524288\n', None),
(None, None),
(None, None),
(None, None),
(None, None)])
expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev',
run_as_root=True,
use_standard_locale=True),
mock.call('blockdev', '--getss', 'fake-dev',
run_as_root=True),
mock.call('blockdev', '--getsize64', 'fake-dev',
run_as_root=True),
mock.call('dd', 'bs=512', 'if=/dev/zero',
'of=fake-dev', 'count=33', 'oflag=direct',
run_as_root=True,
use_standard_locale=True),
mock.call('dd', 'bs=512', 'if=/dev/zero',
'of=fake-dev', 'count=33', 'oflag=direct',
'seek=991', run_as_root=True,
use_standard_locale=True),
mock.call('sgdisk', '-Z', 'fake-dev',
run_as_root=True,
use_standard_locale=True),
mock.call('fuser', self.dev,
check_exit_code=[0, 1],
run_as_root=True)]
disk_utils.destroy_disk_metadata(self.dev, self.node_uuid)
mock_exec.assert_has_calls(expected_calls)
def test_destroy_disk_metadata_wipefs_fail(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError
expected_call = [mock.call('wipefs', '--force', '--all', 'fake-dev',
run_as_root=True,
use_standard_locale=True)]
self.assertRaises(processutils.ProcessExecutionError,
disk_utils.destroy_disk_metadata,
self.dev,
self.node_uuid)
mock_exec.assert_has_calls(expected_call)
def test_destroy_disk_metadata_sgdisk_fail(self, mock_exec):
expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev',
run_as_root=True,
use_standard_locale=True),
mock.call('blockdev', '--getss', 'fake-dev',
run_as_root=True),
mock.call('blockdev', '--getsize64', 'fake-dev',
run_as_root=True),
mock.call('dd', 'bs=512', 'if=/dev/zero',
'of=fake-dev', 'count=33', 'oflag=direct',
run_as_root=True,
use_standard_locale=True),
mock.call('dd', 'bs=512', 'if=/dev/zero',
'of=fake-dev', 'count=33', 'oflag=direct',
'seek=991', run_as_root=True,
use_standard_locale=True),
mock.call('sgdisk', '-Z', 'fake-dev',
run_as_root=True,
use_standard_locale=True)]
mock_exec.side_effect = iter([
(None, None),
('512\n', None),
('524288\n', None),
(None, None),
(None, None),
processutils.ProcessExecutionError()])
self.assertRaises(processutils.ProcessExecutionError,
disk_utils.destroy_disk_metadata,
self.dev,
self.node_uuid)
mock_exec.assert_has_calls(expected_calls)
def test_destroy_disk_metadata_wipefs_not_support_force(self, mock_exec):
mock_exec.side_effect = iter([
processutils.ProcessExecutionError(description='--force'),
(None, None),
('512\n', None),
('524288\n', None),
(None, None),
(None, None),
(None, None),
(None, None)])
expected_call = [mock.call('wipefs', '--force', '--all', 'fake-dev',
run_as_root=True,
use_standard_locale=True),
mock.call('wipefs', '--all', 'fake-dev',
run_as_root=True,
use_standard_locale=True)]
disk_utils.destroy_disk_metadata(self.dev, self.node_uuid)
mock_exec.assert_has_calls(expected_call)
def test_destroy_disk_metadata_ebr(self, mock_exec):
expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev',
run_as_root=True,
use_standard_locale=True),
mock.call('blockdev', '--getss', 'fake-dev',
run_as_root=True),
mock.call('blockdev', '--getsize64', 'fake-dev',
run_as_root=True),
mock.call('dd', 'bs=512', 'if=/dev/zero',
'of=fake-dev', 'count=2', 'oflag=direct',
run_as_root=True,
use_standard_locale=True),
mock.call('sgdisk', '-Z', 'fake-dev',
run_as_root=True,
use_standard_locale=True)]
mock_exec.side_effect = iter([
(None, None),
('512\n', None),
('1024\n', None), # an EBR is 2 sectors
(None, None),
(None, None),
(None, None),
(None, None)])
disk_utils.destroy_disk_metadata(self.dev, self.node_uuid)
mock_exec.assert_has_calls(expected_calls)
def test_destroy_disk_metadata_tiny_partition(self, mock_exec):
expected_calls = [mock.call('wipefs', '--force', '--all', 'fake-dev',
run_as_root=True,
use_standard_locale=True),
mock.call('blockdev', '--getss', 'fake-dev',
run_as_root=True),
mock.call('blockdev', '--getsize64', 'fake-dev',
run_as_root=True),
mock.call('dd', 'bs=512', 'if=/dev/zero',
'of=fake-dev', 'count=33', 'oflag=direct',
run_as_root=True,
use_standard_locale=True),
mock.call('dd', 'bs=512', 'if=/dev/zero',
'of=fake-dev', 'count=33', 'oflag=direct',
'seek=9', run_as_root=True,
use_standard_locale=True),
mock.call('sgdisk', '-Z', 'fake-dev',
run_as_root=True,
use_standard_locale=True)]
mock_exec.side_effect = iter([
(None, None),
('512\n', None),
('21504\n', None),
(None, None),
(None, None),
(None, None),
(None, None)])
disk_utils.destroy_disk_metadata(self.dev, self.node_uuid)
mock_exec.assert_has_calls(expected_calls)
@mock.patch.object(utils, 'execute', autospec=True)
class GetDeviceBlockSizeTestCase(base.IronicLibTestCase):
def setUp(self):
super(GetDeviceBlockSizeTestCase, self).setUp()
self.dev = 'fake-dev'
self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
def test_get_dev_block_size(self, mock_exec):
mock_exec.return_value = ("64", "")
expected_call = [mock.call('blockdev', '--getsz', self.dev,
run_as_root=True)]
disk_utils.get_dev_block_size(self.dev)
mock_exec.assert_has_calls(expected_call)
@mock.patch.object(utils, 'execute', autospec=True)
class GetDeviceByteSizeTestCase(base.IronicLibTestCase):
def setUp(self):
super(GetDeviceByteSizeTestCase, self).setUp()
self.dev = 'fake-dev'
self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
def test_get_dev_byte_size(self, mock_exec):
mock_exec.return_value = ("64", "")
expected_call = [mock.call('blockdev', '--getsize64', self.dev,
run_as_root=True)]
disk_utils.get_dev_byte_size(self.dev)
mock_exec.assert_has_calls(expected_call)
@mock.patch.object(disk_utils, 'dd', autospec=True)
@mock.patch.object(qemu_img, 'image_info', autospec=True)
@mock.patch.object(qemu_img, 'convert_image', autospec=True)
class PopulateImageTestCase(base.IronicLibTestCase):
def test_populate_raw_image(self, mock_cg, mock_qinfo, mock_dd):
type(mock_qinfo.return_value).file_format = mock.PropertyMock(
return_value='raw')
disk_utils.populate_image('src', 'dst')
mock_dd.assert_called_once_with('src', 'dst', conv_flags=None)
self.assertFalse(mock_cg.called)
def test_populate_raw_image_with_convert(self, mock_cg, mock_qinfo,
mock_dd):
type(mock_qinfo.return_value).file_format = mock.PropertyMock(
return_value='raw')
disk_utils.populate_image('src', 'dst', conv_flags='sparse')
mock_dd.assert_called_once_with('src', 'dst', conv_flags='sparse')
self.assertFalse(mock_cg.called)
def test_populate_qcow2_image(self, mock_cg, mock_qinfo, mock_dd):
type(mock_qinfo.return_value).file_format = mock.PropertyMock(
return_value='qcow2')
disk_utils.populate_image('src', 'dst')
mock_cg.assert_called_once_with('src', 'dst', 'raw', True,
sparse_size='0')
self.assertFalse(mock_dd.called)
@mock.patch('time.sleep', lambda sec: None)
class OtherFunctionTestCase(base.IronicLibTestCase):
@mock.patch.object(os, 'stat', autospec=True)
@mock.patch.object(stat, 'S_ISBLK', autospec=True)
def test_is_block_device_works(self, mock_is_blk, mock_os):
device = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9'
mock_is_blk.return_value = True
mock_os().st_mode = 10000
self.assertTrue(disk_utils.is_block_device(device))
mock_is_blk.assert_called_once_with(mock_os().st_mode)
@mock.patch.object(os, 'stat', autospec=True)
def test_is_block_device_raises(self, mock_os):
device = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9'
mock_os.side_effect = OSError
self.assertRaises(exception.InstanceDeployFailure,
disk_utils.is_block_device, device)
mock_os.assert_has_calls([mock.call(device)] * 3)
@mock.patch.object(os, 'stat', autospec=True)
def test_is_block_device_attempts(self, mock_os):
CONF.set_override('partition_detection_attempts', 2,
group='disk_utils')
device = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9'
mock_os.side_effect = OSError
self.assertRaises(exception.InstanceDeployFailure,
disk_utils.is_block_device, device)
mock_os.assert_has_calls([mock.call(device)] * 2)
@mock.patch.object(os.path, 'getsize', autospec=True)
@mock.patch.object(qemu_img, 'image_info', autospec=True)
def test_get_image_mb(self, mock_qinfo, mock_getsize):
mb = 1024 * 1024
mock_getsize.return_value = 0
type(mock_qinfo.return_value).virtual_size = mock.PropertyMock(
return_value=0)
self.assertEqual(0, disk_utils.get_image_mb('x', False))
self.assertEqual(0, disk_utils.get_image_mb('x', True))
mock_getsize.return_value = 1
type(mock_qinfo.return_value).virtual_size = mock.PropertyMock(
return_value=1)
self.assertEqual(1, disk_utils.get_image_mb('x', False))
self.assertEqual(1, disk_utils.get_image_mb('x', True))
mock_getsize.return_value = mb
type(mock_qinfo.return_value).virtual_size = mock.PropertyMock(
return_value=mb)
self.assertEqual(1, disk_utils.get_image_mb('x', False))
self.assertEqual(1, disk_utils.get_image_mb('x', True))
mock_getsize.return_value = mb + 1
type(mock_qinfo.return_value).virtual_size = mock.PropertyMock(
return_value=mb + 1)
self.assertEqual(2, disk_utils.get_image_mb('x', False))
self.assertEqual(2, disk_utils.get_image_mb('x', True))
def _test_count_mbr_partitions(self, output, mock_execute):
mock_execute.return_value = (output, '')
out = disk_utils.count_mbr_partitions('/dev/fake')
mock_execute.assert_called_once_with('partprobe', '-d', '-s',
'/dev/fake', run_as_root=True,
use_standard_locale=True)
return out
@mock.patch.object(utils, 'execute', autospec=True)
def test_count_mbr_partitions(self, mock_execute):
output = "/dev/fake: msdos partitions 1 2 3 <5 6>"
pp, lp = self._test_count_mbr_partitions(output, mock_execute)
self.assertEqual(3, pp)
self.assertEqual(2, lp)
@mock.patch.object(utils, 'execute', autospec=True)
def test_count_mbr_partitions_no_logical_partitions(self, mock_execute):
output = "/dev/fake: msdos partitions 1 2"
pp, lp = self._test_count_mbr_partitions(output, mock_execute)
self.assertEqual(2, pp)
self.assertEqual(0, lp)
@mock.patch.object(utils, 'execute', autospec=True)
def test_count_mbr_partitions_wrong_partition_table(self, mock_execute):
output = "/dev/fake: gpt partitions 1 2 3 4 5 6"
mock_execute.return_value = (output, '')
self.assertRaises(ValueError, disk_utils.count_mbr_partitions,
'/dev/fake')
mock_execute.assert_called_once_with('partprobe', '-d', '-s',
'/dev/fake', run_as_root=True,
use_standard_locale=True)
@mock.patch.object(disk_utils, 'get_device_information', autospec=True)
def test_block_uuid(self, mock_get_device_info):
mock_get_device_info.return_value = {'UUID': '123',
'PARTUUID': '123456'}
self.assertEqual('123', disk_utils.block_uuid('/dev/fake'))
mock_get_device_info.assert_called_once_with(
'/dev/fake', fields=['UUID', 'PARTUUID'])
@mock.patch.object(disk_utils, 'get_device_information', autospec=True)
def test_block_uuid_fallback_to_uuid(self, mock_get_device_info):
mock_get_device_info.return_value = {'PARTUUID': '123456'}
self.assertEqual('123456', disk_utils.block_uuid('/dev/fake'))
mock_get_device_info.assert_called_once_with(
'/dev/fake', fields=['UUID', 'PARTUUID'])
@mock.patch.object(utils, 'execute', autospec=True)
class FixGptStructsTestCases(base.IronicLibTestCase):
def setUp(self):
super(FixGptStructsTestCases, self).setUp()
self.dev = "/dev/fake"
self.config_part_label = "config-2"
self.node_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
def test_fix_gpt_structs_fix_required(self, mock_execute):
sgdisk_v_output = """
Problem: The secondary header's self-pointer indicates that it doesn't reside
at the end of the disk. If you've added a disk to a RAID array, use the 'e'
option on the experts' menu to adjust the secondary header's and partition
table's locations.
Identified 1 problems!
"""
mock_execute.return_value = (sgdisk_v_output, '')
execute_calls = [
mock.call('sgdisk', '-v', '/dev/fake', run_as_root=True),
mock.call('sgdisk', '-e', '/dev/fake', run_as_root=True)
]
disk_utils._fix_gpt_structs('/dev/fake', self.node_uuid)
mock_execute.assert_has_calls(execute_calls)
def test_fix_gpt_structs_fix_not_required(self, mock_execute):
mock_execute.return_value = ('', '')
disk_utils._fix_gpt_structs('/dev/fake', self.node_uuid)
mock_execute.assert_called_once_with('sgdisk', '-v', '/dev/fake',
run_as_root=True)
@mock.patch.object(disk_utils.LOG, 'error', autospec=True)
def test_fix_gpt_structs_exc(self, mock_log, mock_execute):
mock_execute.side_effect = processutils.ProcessExecutionError
self.assertRaisesRegex(exception.InstanceDeployFailure,
'Failed to fix GPT data structures on disk',
disk_utils._fix_gpt_structs,
self.dev, self.node_uuid)
mock_execute.assert_called_once_with('sgdisk', '-v', '/dev/fake',
run_as_root=True)
self.assertEqual(1, mock_log.call_count)
@mock.patch.object(utils, 'execute', autospec=True)
class TriggerDeviceRescanTestCase(base.IronicLibTestCase):
def test_trigger(self, mock_execute):
self.assertTrue(disk_utils.trigger_device_rescan('/dev/fake'))
mock_execute.assert_has_calls([
mock.call('sync'),
mock.call('udevadm', 'settle'),
mock.call('partprobe', '/dev/fake', run_as_root=True, attempts=10),
mock.call('udevadm', 'settle'),
mock.call('sgdisk', '-v', '/dev/fake', run_as_root=True),
])
def test_custom_attempts(self, mock_execute):
self.assertTrue(
disk_utils.trigger_device_rescan('/dev/fake', attempts=1))
mock_execute.assert_has_calls([
mock.call('sync'),
mock.call('udevadm', 'settle'),
mock.call('partprobe', '/dev/fake', run_as_root=True, attempts=1),
mock.call('udevadm', 'settle'),
mock.call('sgdisk', '-v', '/dev/fake', run_as_root=True),
])
def test_fails(self, mock_execute):
mock_execute.side_effect = [('', '')] * 4 + [
processutils.ProcessExecutionError
]
self.assertFalse(disk_utils.trigger_device_rescan('/dev/fake'))
mock_execute.assert_has_calls([
mock.call('sync'),
mock.call('udevadm', 'settle'),
mock.call('partprobe', '/dev/fake', run_as_root=True, attempts=10),
mock.call('udevadm', 'settle'),
mock.call('sgdisk', '-v', '/dev/fake', run_as_root=True),
])
BLKID_PROBE = ("""
/dev/disk/by-path/ip-10.1.0.52:3260-iscsi-iqn.2008-10.org.openstack: """
"""PTUUID="123456" PTTYPE="gpt"
""")
LSBLK_NORMAL = (
'UUID="123" BLOCK_SIZE="512" TYPE="vfat" '
'PARTLABEL="EFI System Partition" PARTUUID="123456"'
)
@mock.patch.object(utils, 'execute', autospec=True)
class GetDeviceInformationTestCase(base.IronicLibTestCase):
def test_normal(self, mock_execute):
mock_execute.return_value = LSBLK_NORMAL, ""
result = disk_utils.get_device_information('/dev/fake')
self.assertEqual(
{'UUID': '123', 'BLOCK_SIZE': '512', 'TYPE': 'vfat',
'PARTLABEL': 'EFI System Partition', 'PARTUUID': '123456'},
result
)
mock_execute.assert_called_once_with(
'lsblk', '/dev/fake', '--pairs', '--bytes', '--ascii', '--nodeps',
'--output-all', use_standard_locale=True, run_as_root=True)
def test_probe(self, mock_execute):
mock_execute.return_value = BLKID_PROBE, ""
result = disk_utils.get_device_information('/dev/fake', probe=True)
self.assertEqual({'PTUUID': '123456', 'PTTYPE': 'gpt'}, result)
mock_execute.assert_called_once_with('blkid', '/dev/fake', '-p',
use_standard_locale=True,
run_as_root=True)
def test_fields(self, mock_execute):
mock_execute.return_value = LSBLK_NORMAL, ""
result = disk_utils.get_device_information('/dev/fake',
fields=['UUID', 'LABEL'])
# No filtering on our side, so returning all fake fields
self.assertEqual(
{'UUID': '123', 'BLOCK_SIZE': '512', 'TYPE': 'vfat',
'PARTLABEL': 'EFI System Partition', 'PARTUUID': '123456'},
result
)
mock_execute.assert_called_once_with(
'lsblk', '/dev/fake', '--pairs', '--bytes', '--ascii', '--nodeps',
'--output', 'UUID,LABEL',
use_standard_locale=True, run_as_root=True)
def test_empty(self, mock_execute):
mock_execute.return_value = "\n", ""
result = disk_utils.get_device_information('/dev/fake', probe=True)
self.assertEqual({}, result)
mock_execute.assert_called_once_with('blkid', '/dev/fake',
'-p', use_standard_locale=True,
run_as_root=True)
@mock.patch.object(utils, 'execute', autospec=True)
class GetPartitionTableTypeTestCase(base.IronicLibTestCase):
def test_gpt(self, mocked_execute):
self._test_by_type(mocked_execute, 'gpt', 'gpt')
def test_msdos(self, mocked_execute):
self._test_by_type(mocked_execute, 'msdos', 'msdos')
def test_unknown(self, mocked_execute):
self._test_by_type(mocked_execute, 'whatever', 'unknown')
def _test_by_type(self, mocked_execute, table_type_output,
expected_table_type):
parted_ret = PARTED_OUTPUT_UNFORMATTED.format(table_type_output)
mocked_execute.side_effect = [
(parted_ret, None),
]
ret = disk_utils.get_partition_table_type('hello')
mocked_execute.assert_called_once_with(
'parted', '--script', 'hello', '--', 'print',
run_as_root=True, use_standard_locale=True)
self.assertEqual(expected_table_type, ret)
PARTED_OUTPUT_UNFORMATTED = '''Model: whatever
Disk /dev/sda: 450GB
Sector size (logical/physical): 512B/512B
Partition Table: {}
Disk Flags:
Number Start End Size File system Name Flags
14 1049kB 5243kB 4194kB bios_grub
15 5243kB 116MB 111MB fat32 boot, esp
1 116MB 2361MB 2245MB ext4
'''
@mock.patch.object(disk_utils, 'list_partitions', autospec=True)
@mock.patch.object(disk_utils, 'get_partition_table_type', autospec=True)
class FindEfiPartitionTestCase(base.IronicLibTestCase):
def test_find_efi_partition(self, mocked_type, mocked_parts):
mocked_parts.return_value = [
{'number': '1', 'flags': ''},
{'number': '14', 'flags': 'bios_grub'},
{'number': '15', 'flags': 'esp, boot'},
]
ret = disk_utils.find_efi_partition('/dev/sda')
self.assertEqual({'number': '15', 'flags': 'esp, boot'}, ret)
def test_find_efi_partition_only_boot_flag_gpt(self, mocked_type,
mocked_parts):
mocked_type.return_value = 'gpt'
mocked_parts.return_value = [
{'number': '1', 'flags': ''},
{'number': '14', 'flags': 'bios_grub'},
{'number': '15', 'flags': 'boot'},
]
ret = disk_utils.find_efi_partition('/dev/sda')
self.assertEqual({'number': '15', 'flags': 'boot'}, ret)
def test_find_efi_partition_only_boot_flag_mbr(self, mocked_type,
mocked_parts):
mocked_type.return_value = 'msdos'
mocked_parts.return_value = [
{'number': '1', 'flags': ''},
{'number': '14', 'flags': 'bios_grub'},
{'number': '15', 'flags': 'boot'},
]
self.assertIsNone(disk_utils.find_efi_partition('/dev/sda'))
def test_find_efi_partition_not_found(self, mocked_type, mocked_parts):
mocked_parts.return_value = [
{'number': '1', 'flags': ''},
{'number': '14', 'flags': 'bios_grub'},
]
self.assertIsNone(disk_utils.find_efi_partition('/dev/sda'))

View File

@ -1,169 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import imageutils
from ironic_lib import qemu_img
from ironic_lib.tests import base
from ironic_lib import utils
CONF = cfg.CONF
class ImageInfoTestCase(base.IronicLibTestCase):
@mock.patch.object(os.path, 'exists', return_value=False, autospec=True)
def test_image_info_path_doesnt_exist(self, path_exists_mock):
self.assertRaises(FileNotFoundError, qemu_img.image_info, 'noimg')
path_exists_mock.assert_called_once_with('noimg')
@mock.patch.object(utils, 'execute', return_value=('out', 'err'),
autospec=True)
@mock.patch.object(imageutils, 'QemuImgInfo', autospec=True)
@mock.patch.object(os.path, 'exists', return_value=True, autospec=True)
def test_image_info_path_exists(self, path_exists_mock,
image_info_mock, execute_mock):
qemu_img.image_info('img')
path_exists_mock.assert_called_once_with('img')
execute_mock.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', 'img',
'--output=json',
prlimit=mock.ANY)
image_info_mock.assert_called_once_with('out', format='json')
class ConvertImageTestCase(base.IronicLibTestCase):
@mock.patch.object(utils, 'execute', autospec=True)
def test_convert_image(self, execute_mock):
qemu_img.convert_image('source', 'dest', 'out_format')
execute_mock.assert_called_once_with(
'qemu-img', 'convert', '-O',
'out_format', 'source', 'dest',
run_as_root=False,
prlimit=mock.ANY,
use_standard_locale=True,
env_variables={'MALLOC_ARENA_MAX': '3'})
@mock.patch.object(utils, 'execute', autospec=True)
def test_convert_image_flags(self, execute_mock):
qemu_img.convert_image('source', 'dest', 'out_format',
cache='directsync', out_of_order=True,
sparse_size='0')
execute_mock.assert_called_once_with(
'qemu-img', 'convert', '-O',
'out_format', '-t', 'directsync',
'-S', '0', '-W', 'source', 'dest',
run_as_root=False,
prlimit=mock.ANY,
use_standard_locale=True,
env_variables={'MALLOC_ARENA_MAX': '3'})
@mock.patch.object(utils, 'execute', autospec=True)
def test_convert_image_retries(self, execute_mock):
ret_err = 'qemu: qemu_thread_create: Resource temporarily unavailable'
execute_mock.side_effect = [
processutils.ProcessExecutionError(stderr=ret_err), ('', ''),
processutils.ProcessExecutionError(stderr=ret_err), ('', ''),
('', ''),
]
qemu_img.convert_image('source', 'dest', 'out_format')
convert_call = mock.call('qemu-img', 'convert', '-O',
'out_format', 'source', 'dest',
run_as_root=False,
prlimit=mock.ANY,
use_standard_locale=True,
env_variables={'MALLOC_ARENA_MAX': '3'})
execute_mock.assert_has_calls([
convert_call,
mock.call('sync'),
convert_call,
mock.call('sync'),
convert_call,
])
@mock.patch.object(utils, 'execute', autospec=True)
def test_convert_image_retries_alternate_error(self, execute_mock):
ret_err = 'Failed to allocate memory: Cannot allocate memory\n'
execute_mock.side_effect = [
processutils.ProcessExecutionError(stderr=ret_err), ('', ''),
processutils.ProcessExecutionError(stderr=ret_err), ('', ''),
('', ''),
]
qemu_img.convert_image('source', 'dest', 'out_format')
convert_call = mock.call('qemu-img', 'convert', '-O',
'out_format', 'source', 'dest',
run_as_root=False,
prlimit=mock.ANY,
use_standard_locale=True,
env_variables={'MALLOC_ARENA_MAX': '3'})
execute_mock.assert_has_calls([
convert_call,
mock.call('sync'),
convert_call,
mock.call('sync'),
convert_call,
])
@mock.patch.object(utils, 'execute', autospec=True)
def test_convert_image_retries_and_fails(self, execute_mock):
ret_err = 'qemu: qemu_thread_create: Resource temporarily unavailable'
execute_mock.side_effect = [
processutils.ProcessExecutionError(stderr=ret_err), ('', ''),
processutils.ProcessExecutionError(stderr=ret_err), ('', ''),
processutils.ProcessExecutionError(stderr=ret_err), ('', ''),
processutils.ProcessExecutionError(stderr=ret_err),
]
self.assertRaises(processutils.ProcessExecutionError,
qemu_img.convert_image,
'source', 'dest', 'out_format')
convert_call = mock.call('qemu-img', 'convert', '-O',
'out_format', 'source', 'dest',
run_as_root=False,
prlimit=mock.ANY,
use_standard_locale=True,
env_variables={'MALLOC_ARENA_MAX': '3'})
execute_mock.assert_has_calls([
convert_call,
mock.call('sync'),
convert_call,
mock.call('sync'),
convert_call,
])
@mock.patch.object(utils, 'execute', autospec=True)
def test_convert_image_just_fails(self, execute_mock):
ret_err = 'Aliens'
execute_mock.side_effect = [
processutils.ProcessExecutionError(stderr=ret_err),
]
self.assertRaises(processutils.ProcessExecutionError,
qemu_img.convert_image,
'source', 'dest', 'out_format')
convert_call = mock.call('qemu-img', 'convert', '-O',
'out_format', 'source', 'dest',
run_as_root=False,
prlimit=mock.ANY,
use_standard_locale=True,
env_variables={'MALLOC_ARENA_MAX': '3'})
execute_mock.assert_has_calls([
convert_call,
])

View File

@ -501,148 +501,6 @@ class MatchRootDeviceTestCase(base.IronicLibTestCase):
self.assertEqual([self.devices[0]], devs)
class WaitForDisk(base.IronicLibTestCase):
def setUp(self):
super(WaitForDisk, self).setUp()
CONF.set_override('check_device_interval', .01,
group='disk_partitioner')
CONF.set_override('check_device_max_retries', 2,
group='disk_partitioner')
@mock.patch.object(utils, 'execute', autospec=True)
def test_wait_for_disk_to_become_available(self, mock_exc):
mock_exc.return_value = ('', '')
utils.wait_for_disk_to_become_available('fake-dev')
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(1, mock_exc.call_count)
mock_exc.assert_has_calls([fuser_call])
@mock.patch.object(utils, 'execute', autospec=True,
side_effect=processutils.ProcessExecutionError(
stderr='fake'))
def test_wait_for_disk_to_become_available_no_fuser(self, mock_exc):
self.assertRaises(exception.IronicException,
utils.wait_for_disk_to_become_available,
'fake-dev')
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(2, mock_exc.call_count)
mock_exc.assert_has_calls([fuser_call, fuser_call])
@mock.patch.object(utils, 'execute', autospec=True)
def test_wait_for_disk_to_become_available_device_in_use_psmisc(
self, mock_exc):
# Test that the device is not available. This version has the 'psmisc'
# version of 'fuser' values for stdout and stderr.
# NOTE(TheJulia): Looks like fuser returns the actual list of pids
# in the stdout output, where as all other text is returned in
# stderr.
# The 'psmisc' version has a leading space character in stdout. The
# filename is output to stderr
mock_exc.side_effect = [(' 1234 ', 'fake-dev: '),
(' 15503 3919 15510 15511', 'fake-dev:')]
expected_error = ('Processes with the following PIDs are '
'holding device fake-dev: 15503, 3919, 15510, '
'15511. Timed out waiting for completion.')
self.assertRaisesRegex(
exception.IronicException,
expected_error,
utils.wait_for_disk_to_become_available,
'fake-dev')
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(2, mock_exc.call_count)
mock_exc.assert_has_calls([fuser_call, fuser_call])
@mock.patch.object(utils, 'execute', autospec=True)
def test_wait_for_disk_to_become_available_device_in_use_busybox(
self, mock_exc):
# Test that the device is not available. This version has the 'busybox'
# version of 'fuser' values for stdout and stderr.
# NOTE(TheJulia): Looks like fuser returns the actual list of pids
# in the stdout output, where as all other text is returned in
# stderr.
# The 'busybox' version does not have a leading space character in
# stdout. Also nothing is output to stderr.
mock_exc.side_effect = [('1234', ''),
('15503 3919 15510 15511', '')]
expected_error = ('Processes with the following PIDs are '
'holding device fake-dev: 15503, 3919, 15510, '
'15511. Timed out waiting for completion.')
self.assertRaisesRegex(
exception.IronicException,
expected_error,
utils.wait_for_disk_to_become_available,
'fake-dev')
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(2, mock_exc.call_count)
mock_exc.assert_has_calls([fuser_call, fuser_call])
@mock.patch.object(utils, 'execute', autospec=True)
def test_wait_for_disk_to_become_available_no_device(self, mock_exc):
# NOTE(TheJulia): Looks like fuser returns the actual list of pids
# in the stdout output, where as all other text is returned in
# stderr.
mock_exc.return_value = ('', 'Specified filename /dev/fake '
'does not exist.')
expected_error = ('Fuser exited with "Specified filename '
'/dev/fake does not exist." while checking '
'locks for device fake-dev. Timed out waiting '
'for completion.')
self.assertRaisesRegex(
exception.IronicException,
expected_error,
utils.wait_for_disk_to_become_available,
'fake-dev')
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(2, mock_exc.call_count)
mock_exc.assert_has_calls([fuser_call, fuser_call])
@mock.patch.object(utils, 'execute', autospec=True)
def test_wait_for_disk_to_become_available_dev_becomes_avail_psmisc(
self, mock_exc):
# Test that initially device is not available but then becomes
# available. This version has the 'psmisc' version of 'fuser' values
# for stdout and stderr.
# The 'psmisc' version has a leading space character in stdout. The
# filename is output to stderr
mock_exc.side_effect = [(' 1234 ', 'fake-dev: '),
('', '')]
utils.wait_for_disk_to_become_available('fake-dev')
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(2, mock_exc.call_count)
mock_exc.assert_has_calls([fuser_call, fuser_call])
@mock.patch.object(utils, 'execute', autospec=True)
def test_wait_for_disk_to_become_available_dev_becomes_avail_busybox(
self, mock_exc):
# Test that initially device is not available but then becomes
# available. This version has the 'busybox' version of 'fuser' values
# for stdout and stderr.
# The 'busybox' version does not have a leading space character in
# stdout. Also nothing is output to stderr.
mock_exc.side_effect = [('1234 5895', ''),
('', '')]
utils.wait_for_disk_to_become_available('fake-dev')
fuser_cmd = ['fuser', 'fake-dev']
fuser_call = mock.call(*fuser_cmd, run_as_root=True,
check_exit_code=[0, 1])
self.assertEqual(2, mock_exc.call_count)
mock_exc.assert_has_calls([fuser_call, fuser_call])
@mock.patch.object(utils, 'execute', autospec=True)
class GetRouteSourceTestCase(base.IronicLibTestCase):

View File

@ -36,7 +36,6 @@ from oslo_utils import excutils
from oslo_utils import specs_matcher
from oslo_utils import strutils
from oslo_utils import units
import tenacity
from ironic_lib.common.i18n import _
from ironic_lib import exception
@ -501,89 +500,6 @@ def match_root_device_hints(devices, root_device_hints):
return dev
def wait_for_disk_to_become_available(device):
"""Wait for a disk device to become available.
Waits for a disk device to become available for use by
waiting until all process locks on the device have been
released.
Timeout and iteration settings come from the configuration
options used by the in-library disk_partitioner:
``check_device_interval`` and ``check_device_max_retries``.
:params device: The path to the device.
:raises: IronicException If the disk fails to become
available.
"""
pids = ['']
stderr = ['']
interval = CONF.disk_partitioner.check_device_interval
max_retries = CONF.disk_partitioner.check_device_max_retries
def _wait_for_disk():
# A regex is likely overkill here, but variations in fuser
# means we should likely use it.
fuser_pids_re = re.compile(r'\d+')
# There are 'psmisc' and 'busybox' versions of the 'fuser' program. The
# 'fuser' programs differ in how they output data to stderr. The
# busybox version does not output the filename to stderr, while the
# standard 'psmisc' version does output the filename to stderr. How
# they output to stdout is almost identical in that only the PIDs are
# output to stdout, with the 'psmisc' version adding a leading space
# character to the list of PIDs.
try:
# NOTE(ifarkas): fuser returns a non-zero return code if none of
# the specified files is accessed.
# NOTE(TheJulia): fuser does not report LVM devices as in use
# unless the LVM device-mapper device is the
# device that is directly polled.
# NOTE(TheJulia): The -m flag allows fuser to reveal data about
# mounted filesystems, which should be considered
# busy/locked. That being said, it is not used
# because busybox fuser has a different behavior.
# NOTE(TheJuia): fuser outputs a list of found PIDs to stdout.
# All other text is returned via stderr, and the
# output to a terminal is merged as a result.
out, err = execute('fuser', device, check_exit_code=[0, 1],
run_as_root=True)
if not out and not err:
return True
stderr[0] = err
# NOTE: findall() returns a list of matches, or an empty list if no
# matches
pids[0] = fuser_pids_re.findall(out)
except processutils.ProcessExecutionError as exc:
LOG.warning('Failed to check the device %(device)s with fuser:'
' %(err)s', {'device': device, 'err': exc})
return False
retry = tenacity.retry(
retry=tenacity.retry_if_result(lambda r: not r),
stop=tenacity.stop_after_attempt(max_retries),
wait=tenacity.wait_fixed(interval),
reraise=True)
try:
retry(_wait_for_disk)()
except tenacity.RetryError:
if pids[0]:
raise exception.IronicException(
_('Processes with the following PIDs are holding '
'device %(device)s: %(pids)s. '
'Timed out waiting for completion.')
% {'device': device, 'pids': ', '.join(pids[0])})
else:
raise exception.IronicException(
_('Fuser exited with "%(fuser_err)s" while checking '
'locks for device %(device)s. Timed out waiting for '
'completion.')
% {'device': device, 'fuser_err': stderr[0]})
def get_route_source(dest, ignore_link_local=True):
"""Get the IP address to send packages to destination."""
try:

View File

@ -6,4 +6,3 @@ oslo.utils>=3.34.0 # Apache-2.0
zeroconf>=0.24.0 # LGPL
bcrypt>=3.1.3 # Apache-2.0
WebOb>=1.7.1 # MIT
tenacity>=6.2.0 # Apache-2.0