Merge "Import disk_{utils,partitioner} from ironic-lib"
This commit is contained in:
commit
cdd0a83448
@ -372,11 +372,55 @@ cli_opts = [
|
||||
'determine if this action is necessary.'),
|
||||
]
|
||||
|
||||
disk_utils_opts = [
|
||||
cfg.IntOpt('efi_system_partition_size',
|
||||
default=550,
|
||||
help='Size of EFI system partition in MiB when configuring '
|
||||
'UEFI systems for local boot. A common minimum is ~200 '
|
||||
'megabytes, however OS driven firmware updates and '
|
||||
'unikernel usage generally requires more space on the '
|
||||
'efi partition.'),
|
||||
cfg.IntOpt('bios_boot_partition_size',
|
||||
default=1,
|
||||
help='Size of BIOS Boot partition in MiB when configuring '
|
||||
'GPT partitioned systems for local boot in BIOS.'),
|
||||
cfg.StrOpt('dd_block_size',
|
||||
default='1M',
|
||||
help='Block size to use when writing to the nodes disk.'),
|
||||
cfg.IntOpt('partition_detection_attempts',
|
||||
default=3,
|
||||
min=1,
|
||||
help='Maximum attempts to detect a newly created partition.'),
|
||||
cfg.IntOpt('partprobe_attempts',
|
||||
default=10,
|
||||
help='Maximum number of attempts to try to read the '
|
||||
'partition.'),
|
||||
]
|
||||
|
||||
disk_part_opts = [
|
||||
cfg.IntOpt('check_device_interval',
|
||||
default=1,
|
||||
help='After Ironic has completed creating the partition table, '
|
||||
'it continues to check for activity on the attached iSCSI '
|
||||
'device status at this interval prior to copying the image'
|
||||
' to the node, in seconds'),
|
||||
cfg.IntOpt('check_device_max_retries',
|
||||
default=20,
|
||||
help='The maximum number of times to check that the device is '
|
||||
'not accessed by another process. If the device is still '
|
||||
'busy after that, the disk partitioning will be treated as'
|
||||
' having failed.')
|
||||
]
|
||||
|
||||
CONF.register_cli_opts(cli_opts)
|
||||
CONF.register_opts(disk_utils_opts, group='disk_utils')
|
||||
CONF.register_opts(disk_part_opts, group='disk_partitioner')
|
||||
|
||||
|
||||
def list_opts():
|
||||
return [('DEFAULT', cli_opts)]
|
||||
return [('DEFAULT', cli_opts),
|
||||
('disk_utils', disk_utils_opts),
|
||||
('disk_partitioner', disk_part_opts)]
|
||||
|
||||
|
||||
def override(params):
|
||||
|
124
ironic_python_agent/disk_partitioner.py
Normal file
124
ironic_python_agent/disk_partitioner.py
Normal file
@ -0,0 +1,124 @@
|
||||
# Copyright 2014 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Code for creating partitions on a disk.
|
||||
|
||||
Imported from ironic-lib's disk_utils as of the following commit:
|
||||
https://opendev.org/openstack/ironic-lib/commit/42fa5d63861ba0f04b9a4f67212173d7013a1332
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from ironic_lib.common.i18n import _
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import utils
|
||||
from oslo_config import cfg
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DiskPartitioner(object):
|
||||
|
||||
def __init__(self, device, disk_label='msdos', alignment='optimal'):
|
||||
"""A convenient wrapper around the parted tool.
|
||||
|
||||
:param device: The device path.
|
||||
:param disk_label: The type of the partition table. Valid types are:
|
||||
"bsd", "dvh", "gpt", "loop", "mac", "msdos",
|
||||
"pc98", or "sun".
|
||||
:param alignment: Set alignment for newly created partitions.
|
||||
Valid types are: none, cylinder, minimal and
|
||||
optimal.
|
||||
|
||||
"""
|
||||
self._device = device
|
||||
self._disk_label = disk_label
|
||||
self._alignment = alignment
|
||||
self._partitions = []
|
||||
|
||||
def _exec(self, *args):
|
||||
# NOTE(lucasagomes): utils.execute() is already a wrapper on top
|
||||
# of processutils.execute() which raises specific
|
||||
# exceptions. It also logs any failure so we don't
|
||||
# need to log it again here.
|
||||
utils.execute('parted', '-a', self._alignment, '-s', self._device,
|
||||
'--', 'unit', 'MiB', *args, use_standard_locale=True)
|
||||
|
||||
def add_partition(self, size, part_type='primary', fs_type='',
|
||||
boot_flag=None, extra_flags=None):
|
||||
"""Add a partition.
|
||||
|
||||
:param size: The size of the partition in MiB.
|
||||
:param part_type: The type of the partition. Valid values are:
|
||||
primary, logical, or extended.
|
||||
:param fs_type: The filesystem type. Valid types are: ext2, fat32,
|
||||
fat16, HFS, linux-swap, NTFS, reiserfs, ufs.
|
||||
If blank (''), it will create a Linux native
|
||||
partition (83).
|
||||
:param boot_flag: Boot flag that needs to be configured on the
|
||||
partition. Ignored if None. It can take values
|
||||
'bios_grub', 'boot'.
|
||||
:param extra_flags: List of flags to set on the partition. Ignored
|
||||
if None.
|
||||
:returns: The partition number.
|
||||
|
||||
"""
|
||||
self._partitions.append({'size': size,
|
||||
'type': part_type,
|
||||
'fs_type': fs_type,
|
||||
'boot_flag': boot_flag,
|
||||
'extra_flags': extra_flags})
|
||||
return len(self._partitions)
|
||||
|
||||
def get_partitions(self):
|
||||
"""Get the partitioning layout.
|
||||
|
||||
:returns: An iterator with the partition number and the
|
||||
partition layout.
|
||||
|
||||
"""
|
||||
return enumerate(self._partitions, 1)
|
||||
|
||||
def commit(self):
|
||||
"""Write to the disk."""
|
||||
LOG.debug("Committing partitions to disk.")
|
||||
cmd_args = ['mklabel', self._disk_label]
|
||||
# NOTE(lucasagomes): Lead in with 1MiB to allow room for the
|
||||
# partition table itself.
|
||||
start = 1
|
||||
for num, part in self.get_partitions():
|
||||
end = start + part['size']
|
||||
cmd_args.extend(['mkpart', part['type'], part['fs_type'],
|
||||
str(start), str(end)])
|
||||
if part['boot_flag']:
|
||||
cmd_args.extend(['set', str(num), part['boot_flag'], 'on'])
|
||||
if part['extra_flags']:
|
||||
for flag in part['extra_flags']:
|
||||
cmd_args.extend(['set', str(num), flag, 'on'])
|
||||
start = end
|
||||
|
||||
self._exec(*cmd_args)
|
||||
|
||||
try:
|
||||
from ironic_python_agent import disk_utils # circular dependency
|
||||
disk_utils.wait_for_disk_to_become_available(self._device)
|
||||
except exception.IronicException as e:
|
||||
raise exception.InstanceDeployFailure(
|
||||
_('Disk partitioning failed on device %(device)s. '
|
||||
'Error: %(error)s')
|
||||
% {'device': self._device, 'error': e})
|
709
ironic_python_agent/disk_utils.py
Normal file
709
ironic_python_agent/disk_utils.py
Normal file
@ -0,0 +1,709 @@
|
||||
# Copyright 2014 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Various utilities related to disk handling.
|
||||
|
||||
Imported from ironic-lib's disk_utils as of the following commit:
|
||||
https://opendev.org/openstack/ironic-lib/commit/42fa5d63861ba0f04b9a4f67212173d7013a1332
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import stat
|
||||
import time
|
||||
|
||||
from ironic_lib.common.i18n import _
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import qemu_img
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import excutils
|
||||
import tenacity
|
||||
|
||||
from ironic_python_agent import disk_partitioner
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_PARTED_PRINT_RE = re.compile(r"^(\d+):([\d\.]+)MiB:"
|
||||
r"([\d\.]+)MiB:([\d\.]+)MiB:(\w*):(.*):(.*);")
|
||||
_PARTED_TABLE_TYPE_RE = re.compile(r'^.*partition\s+table\s*:\s*(gpt|msdos)',
|
||||
re.IGNORECASE | re.MULTILINE)
|
||||
|
||||
CONFIGDRIVE_LABEL = "config-2"
|
||||
MAX_CONFIG_DRIVE_SIZE_MB = 64
|
||||
|
||||
GPT_SIZE_SECTORS = 33
|
||||
|
||||
# Maximum disk size supported by MBR is 2TB (2 * 1024 * 1024 MB)
|
||||
MAX_DISK_SIZE_MB_SUPPORTED_BY_MBR = 2097152
|
||||
|
||||
|
||||
def list_partitions(device):
|
||||
"""Get partitions information from given device.
|
||||
|
||||
:param device: The device path.
|
||||
:returns: list of dictionaries (one per partition) with keys:
|
||||
number, start, end, size (in MiB), filesystem, partition_name,
|
||||
flags, path.
|
||||
"""
|
||||
output = utils.execute(
|
||||
'parted', '-s', '-m', device, 'unit', 'MiB', 'print',
|
||||
use_standard_locale=True)[0]
|
||||
if isinstance(output, bytes):
|
||||
output = output.decode("utf-8")
|
||||
lines = [line for line in output.split('\n') if line.strip()][2:]
|
||||
# Example of line: 1:1.00MiB:501MiB:500MiB:ext4::boot
|
||||
fields = ('number', 'start', 'end', 'size', 'filesystem', 'partition_name',
|
||||
'flags')
|
||||
result = []
|
||||
for line in lines:
|
||||
match = _PARTED_PRINT_RE.match(line)
|
||||
if match is None:
|
||||
LOG.warning("Partition information from parted for device "
|
||||
"%(device)s does not match "
|
||||
"expected format: %(line)s",
|
||||
dict(device=device, line=line))
|
||||
continue
|
||||
# Cast int fields to ints (some are floats and we round them down)
|
||||
groups = [int(float(x)) if i < 4 else x
|
||||
for i, x in enumerate(match.groups())]
|
||||
item = dict(zip(fields, groups))
|
||||
item['path'] = partition_index_to_path(device, item['number'])
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
|
||||
def count_mbr_partitions(device):
|
||||
"""Count the number of primary and logical partitions on a MBR
|
||||
|
||||
:param device: The device path.
|
||||
:returns: A tuple with the number of primary partitions and logical
|
||||
partitions.
|
||||
:raise: ValueError if the device does not have a valid MBR partition
|
||||
table.
|
||||
"""
|
||||
# -d do not update the kernel table
|
||||
# -s print a summary of the partition table
|
||||
output, err = utils.execute('partprobe', '-d', '-s', device,
|
||||
use_standard_locale=True)
|
||||
if 'msdos' not in output:
|
||||
raise ValueError('The device %s does not have a valid MBR '
|
||||
'partition table' % device)
|
||||
# Sample output: /dev/vdb: msdos partitions 1 2 3 <5 6 7>
|
||||
# The partitions with number > 4 (and inside <>) are logical partitions
|
||||
output = output.replace('<', '').replace('>', '')
|
||||
partitions = [int(s) for s in output.split() if s.isdigit()]
|
||||
|
||||
return (sum(i < 5 for i in partitions), sum(i > 4 for i in partitions))
|
||||
|
||||
|
||||
def get_disk_identifier(dev):
|
||||
"""Get the disk identifier from the disk being exposed by the ramdisk.
|
||||
|
||||
This disk identifier is appended to the pxe config which will then be
|
||||
used by chain.c32 to detect the correct disk to chainload. This is helpful
|
||||
in deployments to nodes with multiple disks.
|
||||
|
||||
http://www.syslinux.org/wiki/index.php/Comboot/chain.c32#mbr:
|
||||
|
||||
:param dev: Path for the already populated disk device.
|
||||
:raises OSError: When the hexdump binary is unavailable.
|
||||
:returns: The Disk Identifier.
|
||||
"""
|
||||
disk_identifier = utils.execute('hexdump', '-s', '440', '-n', '4',
|
||||
'-e', '''\"0x%08x\"''',
|
||||
dev, attempts=5, delay_on_retry=True)
|
||||
return disk_identifier[0]
|
||||
|
||||
|
||||
def get_partition_table_type(device):
|
||||
"""Get partition table type, msdos or gpt.
|
||||
|
||||
:param device: the name of the device
|
||||
:return: dos, gpt or None
|
||||
"""
|
||||
out = utils.execute('parted', '--script', device, '--', 'print',
|
||||
use_standard_locale=True)[0]
|
||||
m = _PARTED_TABLE_TYPE_RE.search(out)
|
||||
if m:
|
||||
return m.group(1)
|
||||
|
||||
LOG.warning("Unable to get partition table type for device %s", device)
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def _blkid(device, probe=False, fields=None):
|
||||
args = []
|
||||
if probe:
|
||||
args.append('-p')
|
||||
if fields:
|
||||
args += sum((['-s', field] for field in fields), [])
|
||||
|
||||
output, err = utils.execute('blkid', device, *args,
|
||||
use_standard_locale=True)
|
||||
if output.strip():
|
||||
return output.split(': ', 1)[1]
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def _lsblk(device, deps=True, fields=None):
|
||||
args = ['--pairs', '--bytes', '--ascii']
|
||||
if not deps:
|
||||
args.append('--nodeps')
|
||||
if fields:
|
||||
args.extend(['--output', ','.join(fields)])
|
||||
else:
|
||||
args.append('--output-all')
|
||||
|
||||
output, err = utils.execute('lsblk', device, *args,
|
||||
use_standard_locale=True)
|
||||
return output.strip()
|
||||
|
||||
|
||||
def get_device_information(device, fields=None):
|
||||
"""Get information about a device using blkid.
|
||||
|
||||
Can be applied to all block devices: disks, RAID, partitions.
|
||||
|
||||
:param device: Device name.
|
||||
:param fields: A list of fields to request (all by default).
|
||||
:return: A dictionary with requested fields as keys.
|
||||
:raises: ProcessExecutionError
|
||||
"""
|
||||
output = _lsblk(device, fields=fields, deps=False)
|
||||
if output:
|
||||
return next(utils.parse_device_tags(output))
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
def find_efi_partition(device):
|
||||
"""Looks for the EFI partition on a given device.
|
||||
|
||||
A boot partition on a GPT disk is assumed to be an EFI partition as well.
|
||||
|
||||
:param device: the name of the device
|
||||
:return: the EFI partition record from `list_partitions` or None
|
||||
"""
|
||||
is_gpt = get_partition_table_type(device) == 'gpt'
|
||||
for part in list_partitions(device):
|
||||
flags = {x.strip() for x in part['flags'].split(',')}
|
||||
if 'esp' in flags or ('boot' in flags and is_gpt):
|
||||
LOG.debug("Found EFI partition %s on device %s", part, device)
|
||||
return part
|
||||
else:
|
||||
LOG.debug("No efi partition found on device %s", device)
|
||||
|
||||
|
||||
_ISCSI_PREFIX = "iqn.2008-10.org.openstack:"
|
||||
|
||||
|
||||
def is_last_char_digit(dev):
|
||||
"""check whether device name ends with a digit"""
|
||||
if len(dev) >= 1:
|
||||
return dev[-1].isdigit()
|
||||
return False
|
||||
|
||||
|
||||
def partition_index_to_path(device, index):
|
||||
"""Guess a partition path based on its device and index.
|
||||
|
||||
:param device: Device path.
|
||||
:param index: Partition index.
|
||||
"""
|
||||
# the actual device names in the baremetal are like /dev/sda, /dev/sdb etc.
|
||||
# While for the iSCSI device, the naming convention has a format which has
|
||||
# iqn also embedded in it.
|
||||
# When this function is called by ironic-conductor, the iSCSI device name
|
||||
# should be appended by "part%d". While on the baremetal, it should name
|
||||
# the device partitions as /dev/sda1 and not /dev/sda-part1.
|
||||
if _ISCSI_PREFIX in device:
|
||||
part_template = '%s-part%d'
|
||||
elif is_last_char_digit(device):
|
||||
part_template = '%sp%d'
|
||||
else:
|
||||
part_template = '%s%d'
|
||||
return part_template % (device, index)
|
||||
|
||||
|
||||
def make_partitions(dev, root_mb, swap_mb, ephemeral_mb,
|
||||
configdrive_mb, node_uuid, commit=True,
|
||||
boot_option="netboot", boot_mode="bios",
|
||||
disk_label=None, cpu_arch=""):
|
||||
"""Partition the disk device.
|
||||
|
||||
Create partitions for root, swap, ephemeral and configdrive on a
|
||||
disk device.
|
||||
|
||||
:param dev: Path for the device to work on.
|
||||
:param root_mb: Size of the root partition in mebibytes (MiB).
|
||||
:param swap_mb: Size of the swap partition in mebibytes (MiB). If 0,
|
||||
no partition will be created.
|
||||
:param ephemeral_mb: Size of the ephemeral partition in mebibytes (MiB).
|
||||
If 0, no partition will be created.
|
||||
:param configdrive_mb: Size of the configdrive partition in
|
||||
mebibytes (MiB). If 0, no partition will be created.
|
||||
:param commit: True/False. Default for this setting is True. If False
|
||||
partitions will not be written to disk.
|
||||
:param boot_option: Can be "local" or "netboot". "netboot" by default.
|
||||
:param boot_mode: Can be "bios" or "uefi". "bios" by default.
|
||||
:param node_uuid: Node's uuid. Used for logging.
|
||||
:param disk_label: The disk label to be used when creating the
|
||||
partition table. Valid values are: "msdos", "gpt" or None; If None
|
||||
Ironic will figure it out according to the boot_mode parameter.
|
||||
:param cpu_arch: Architecture of the node the disk device belongs to.
|
||||
When using the default value of None, no architecture specific
|
||||
steps will be taken. This default should be used for x86_64. When
|
||||
set to ppc64*, architecture specific steps are taken for booting a
|
||||
partition image locally.
|
||||
:returns: A dictionary containing the partition type as Key and partition
|
||||
path as Value for the partitions created by this method.
|
||||
|
||||
"""
|
||||
LOG.debug("Starting to partition the disk device: %(dev)s "
|
||||
"for node %(node)s",
|
||||
{'dev': dev, 'node': node_uuid})
|
||||
part_dict = {}
|
||||
|
||||
if disk_label is None:
|
||||
disk_label = 'gpt' if boot_mode == 'uefi' else 'msdos'
|
||||
|
||||
dp = disk_partitioner.DiskPartitioner(dev, disk_label=disk_label)
|
||||
|
||||
# For uefi localboot, switch partition table to gpt and create the efi
|
||||
# system partition as the first partition.
|
||||
if boot_mode == "uefi" and boot_option == "local":
|
||||
part_num = dp.add_partition(CONF.disk_utils.efi_system_partition_size,
|
||||
fs_type='fat32',
|
||||
boot_flag='boot')
|
||||
part_dict['efi system partition'] = partition_index_to_path(
|
||||
dev, part_num)
|
||||
|
||||
if (boot_mode == "bios" and boot_option == "local" and disk_label == "gpt"
|
||||
and not cpu_arch.startswith('ppc64')):
|
||||
part_num = dp.add_partition(CONF.disk_utils.bios_boot_partition_size,
|
||||
boot_flag='bios_grub')
|
||||
part_dict['BIOS Boot partition'] = partition_index_to_path(
|
||||
dev, part_num)
|
||||
|
||||
# NOTE(mjturek): With ppc64* nodes, partition images are expected to have
|
||||
# a PrEP partition at the start of the disk. This is an 8 MiB partition
|
||||
# with the boot and prep flags set. The bootloader should be installed
|
||||
# here.
|
||||
if (cpu_arch.startswith("ppc64") and boot_mode == "bios"
|
||||
and boot_option == "local"):
|
||||
LOG.debug("Add PReP boot partition (8 MB) to device: "
|
||||
"%(dev)s for node %(node)s",
|
||||
{'dev': dev, 'node': node_uuid})
|
||||
boot_flag = 'boot' if disk_label == 'msdos' else None
|
||||
part_num = dp.add_partition(8, part_type='primary',
|
||||
boot_flag=boot_flag, extra_flags=['prep'])
|
||||
part_dict['PReP Boot partition'] = partition_index_to_path(
|
||||
dev, part_num)
|
||||
if ephemeral_mb:
|
||||
LOG.debug("Add ephemeral partition (%(size)d MB) to device: %(dev)s "
|
||||
"for node %(node)s",
|
||||
{'dev': dev, 'size': ephemeral_mb, 'node': node_uuid})
|
||||
part_num = dp.add_partition(ephemeral_mb)
|
||||
part_dict['ephemeral'] = partition_index_to_path(dev, part_num)
|
||||
if swap_mb:
|
||||
LOG.debug("Add Swap partition (%(size)d MB) to device: %(dev)s "
|
||||
"for node %(node)s",
|
||||
{'dev': dev, 'size': swap_mb, 'node': node_uuid})
|
||||
part_num = dp.add_partition(swap_mb, fs_type='linux-swap')
|
||||
part_dict['swap'] = partition_index_to_path(dev, part_num)
|
||||
if configdrive_mb:
|
||||
LOG.debug("Add config drive partition (%(size)d MB) to device: "
|
||||
"%(dev)s for node %(node)s",
|
||||
{'dev': dev, 'size': configdrive_mb, 'node': node_uuid})
|
||||
part_num = dp.add_partition(configdrive_mb)
|
||||
part_dict['configdrive'] = partition_index_to_path(dev, part_num)
|
||||
|
||||
# NOTE(lucasagomes): Make the root partition the last partition. This
|
||||
# enables tools like cloud-init's growroot utility to expand the root
|
||||
# partition until the end of the disk.
|
||||
LOG.debug("Add root partition (%(size)d MB) to device: %(dev)s "
|
||||
"for node %(node)s",
|
||||
{'dev': dev, 'size': root_mb, 'node': node_uuid})
|
||||
|
||||
boot_val = 'boot' if (not cpu_arch.startswith("ppc64")
|
||||
and boot_mode == "bios"
|
||||
and boot_option == "local"
|
||||
and disk_label == "msdos") else None
|
||||
|
||||
part_num = dp.add_partition(root_mb, boot_flag=boot_val)
|
||||
|
||||
part_dict['root'] = partition_index_to_path(dev, part_num)
|
||||
|
||||
if commit:
|
||||
# write to the disk
|
||||
dp.commit()
|
||||
trigger_device_rescan(dev)
|
||||
return part_dict
|
||||
|
||||
|
||||
def is_block_device(dev):
|
||||
"""Check whether a device is block or not."""
|
||||
attempts = CONF.disk_utils.partition_detection_attempts
|
||||
for attempt in range(attempts):
|
||||
try:
|
||||
s = os.stat(dev)
|
||||
except OSError as e:
|
||||
LOG.debug("Unable to stat device %(dev)s. Attempt %(attempt)d "
|
||||
"out of %(total)d. Error: %(err)s",
|
||||
{"dev": dev, "attempt": attempt + 1,
|
||||
"total": attempts, "err": e})
|
||||
time.sleep(1)
|
||||
else:
|
||||
return stat.S_ISBLK(s.st_mode)
|
||||
msg = _("Unable to stat device %(dev)s after attempting to verify "
|
||||
"%(attempts)d times.") % {'dev': dev, 'attempts': attempts}
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
|
||||
|
||||
def dd(src, dst, conv_flags=None):
|
||||
"""Execute dd from src to dst."""
|
||||
if conv_flags:
|
||||
extra_args = ['conv=%s' % conv_flags]
|
||||
else:
|
||||
extra_args = []
|
||||
|
||||
utils.dd(src, dst, 'bs=%s' % CONF.disk_utils.dd_block_size, 'oflag=direct',
|
||||
*extra_args)
|
||||
|
||||
|
||||
def populate_image(src, dst, conv_flags=None):
|
||||
data = qemu_img.image_info(src)
|
||||
if data.file_format == 'raw':
|
||||
dd(src, dst, conv_flags=conv_flags)
|
||||
else:
|
||||
qemu_img.convert_image(src, dst, 'raw', True, sparse_size='0')
|
||||
|
||||
|
||||
def block_uuid(dev):
|
||||
"""Get UUID of a block device.
|
||||
|
||||
Try to fetch the UUID, if that fails, try to fetch the PARTUUID.
|
||||
"""
|
||||
info = get_device_information(dev, fields=['UUID', 'PARTUUID'])
|
||||
if info.get('UUID'):
|
||||
return info['UUID']
|
||||
else:
|
||||
LOG.debug('Falling back to partition UUID as the block device UUID '
|
||||
'was not found while examining %(device)s',
|
||||
{'device': dev})
|
||||
return info.get('PARTUUID', '')
|
||||
|
||||
|
||||
def get_image_mb(image_path, virtual_size=True):
|
||||
"""Get size of an image in Megabyte."""
|
||||
mb = 1024 * 1024
|
||||
if not virtual_size:
|
||||
image_byte = os.path.getsize(image_path)
|
||||
else:
|
||||
data = qemu_img.image_info(image_path)
|
||||
image_byte = data.virtual_size
|
||||
|
||||
# round up size to MB
|
||||
image_mb = int((image_byte + mb - 1) / mb)
|
||||
return image_mb
|
||||
|
||||
|
||||
def get_dev_block_size(dev):
|
||||
"""Get the device size in 512 byte sectors."""
|
||||
block_sz, cmderr = utils.execute('blockdev', '--getsz', dev)
|
||||
return int(block_sz)
|
||||
|
||||
|
||||
def destroy_disk_metadata(dev, node_uuid):
|
||||
"""Destroy metadata structures on node's disk.
|
||||
|
||||
Ensure that node's disk magic strings are wiped without zeroing the
|
||||
entire drive. To do this we use the wipefs tool from util-linux.
|
||||
|
||||
:param dev: Path for the device to work on.
|
||||
:param node_uuid: Node's uuid. Used for logging.
|
||||
"""
|
||||
# NOTE(NobodyCam): This is needed to work around bug:
|
||||
# https://bugs.launchpad.net/ironic/+bug/1317647
|
||||
LOG.debug("Start destroy disk metadata for node %(node)s.",
|
||||
{'node': node_uuid})
|
||||
try:
|
||||
utils.execute('wipefs', '--force', '--all', dev,
|
||||
use_standard_locale=True)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
# NOTE(zhenguo): Check if --force option is supported for wipefs,
|
||||
# if not, we should try without it.
|
||||
if '--force' in str(e):
|
||||
ctxt.reraise = False
|
||||
utils.execute('wipefs', '--all', dev,
|
||||
use_standard_locale=True)
|
||||
# NOTE(TheJulia): sgdisk attempts to load and make sense of the
|
||||
# partition tables in advance of wiping the partition data.
|
||||
# This means when a CRC error is found, sgdisk fails before
|
||||
# erasing partition data.
|
||||
# This is the same bug as
|
||||
# https://bugs.launchpad.net/ironic-python-agent/+bug/1737556
|
||||
|
||||
# Overwrite the Primary GPT, catch very small partitions (like EBRs)
|
||||
dd_device = 'of=%s' % dev
|
||||
dd_count = 'count=%s' % GPT_SIZE_SECTORS
|
||||
dev_size = get_dev_block_size(dev)
|
||||
if dev_size < GPT_SIZE_SECTORS:
|
||||
dd_count = 'count=%s' % dev_size
|
||||
utils.execute('dd', 'bs=512', 'if=/dev/zero', dd_device, dd_count,
|
||||
'oflag=direct', use_standard_locale=True)
|
||||
|
||||
# Overwrite the Secondary GPT, do this only if there could be one
|
||||
if dev_size > GPT_SIZE_SECTORS:
|
||||
gpt_backup = dev_size - GPT_SIZE_SECTORS
|
||||
dd_seek = 'seek=%i' % gpt_backup
|
||||
dd_count = 'count=%s' % GPT_SIZE_SECTORS
|
||||
utils.execute('dd', 'bs=512', 'if=/dev/zero', dd_device, dd_count,
|
||||
'oflag=direct', dd_seek, use_standard_locale=True)
|
||||
|
||||
# Go ahead and let sgdisk run as well.
|
||||
utils.execute('sgdisk', '-Z', dev, use_standard_locale=True)
|
||||
|
||||
try:
|
||||
wait_for_disk_to_become_available(dev)
|
||||
except exception.IronicException as e:
|
||||
raise exception.InstanceDeployFailure(
|
||||
_('Destroying metadata failed on device %(device)s. '
|
||||
'Error: %(error)s')
|
||||
% {'device': dev, 'error': e})
|
||||
|
||||
LOG.info("Disk metadata on %(dev)s successfully destroyed for node "
|
||||
"%(node)s", {'dev': dev, 'node': node_uuid})
|
||||
|
||||
|
||||
def _fix_gpt_structs(device, node_uuid):
|
||||
"""Checks backup GPT data structures and moves them to end of the device
|
||||
|
||||
:param device: The device path.
|
||||
:param node_uuid: UUID of the Node. Used for logging.
|
||||
:raises: InstanceDeployFailure, if any disk partitioning related
|
||||
commands fail.
|
||||
"""
|
||||
try:
|
||||
output, _err = utils.execute('sgdisk', '-v', device)
|
||||
|
||||
search_str = "it doesn't reside\nat the end of the disk"
|
||||
if search_str in output:
|
||||
utils.execute('sgdisk', '-e', device)
|
||||
except (processutils.UnknownArgumentError,
|
||||
processutils.ProcessExecutionError, OSError) as e:
|
||||
msg = (_('Failed to fix GPT data structures on disk %(disk)s '
|
||||
'for node %(node)s. Error: %(error)s') %
|
||||
{'disk': device, 'node': node_uuid, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
|
||||
|
||||
def fix_gpt_partition(device, node_uuid):
|
||||
"""Fix GPT partition
|
||||
|
||||
Fix GPT table information when image is written to a disk which
|
||||
has a bigger extend (e.g. 30GB image written on a 60Gb physical disk).
|
||||
|
||||
:param device: The device path.
|
||||
:param node_uuid: UUID of the Node.
|
||||
:raises: InstanceDeployFailure if exception is caught.
|
||||
"""
|
||||
try:
|
||||
disk_is_gpt_partitioned = (get_partition_table_type(device) == 'gpt')
|
||||
if disk_is_gpt_partitioned:
|
||||
_fix_gpt_structs(device, node_uuid)
|
||||
except Exception as e:
|
||||
msg = (_('Failed to fix GPT partition on disk %(disk)s '
|
||||
'for node %(node)s. Error: %(error)s') %
|
||||
{'disk': device, 'node': node_uuid, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
|
||||
|
||||
def udev_settle():
|
||||
"""Wait for the udev event queue to settle.
|
||||
|
||||
Wait for the udev event queue to settle to make sure all devices
|
||||
are detected once the machine boots up.
|
||||
|
||||
:return: True on success, False otherwise.
|
||||
"""
|
||||
LOG.debug('Waiting until udev event queue is empty')
|
||||
try:
|
||||
utils.execute('udevadm', 'settle')
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Something went wrong when waiting for udev '
|
||||
'to settle. Error: %s', e)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def partprobe(device, attempts=None):
|
||||
"""Probe partitions on the given device.
|
||||
|
||||
:param device: The block device containing partitions that is attempting
|
||||
to be updated.
|
||||
:param attempts: Number of attempts to run partprobe, the default is read
|
||||
from the configuration.
|
||||
:return: True on success, False otherwise.
|
||||
"""
|
||||
if attempts is None:
|
||||
attempts = CONF.disk_utils.partprobe_attempts
|
||||
|
||||
try:
|
||||
utils.execute('partprobe', device, attempts=attempts)
|
||||
except (processutils.UnknownArgumentError,
|
||||
processutils.ProcessExecutionError, OSError) as e:
|
||||
LOG.warning("Unable to probe for partitions on device %(device)s, "
|
||||
"the partitioning table may be broken. Error: %(error)s",
|
||||
{'device': device, 'error': e})
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def trigger_device_rescan(device, attempts=None):
|
||||
"""Sync and trigger device rescan.
|
||||
|
||||
Disk partition performed via parted, when performed on a ramdisk
|
||||
do not have to honor the fsync mechanism. In essence, fsync is used
|
||||
on the file representing the block device, which falls to the kernel
|
||||
filesystem layer to trigger a sync event. On a ramdisk using ramfs,
|
||||
this is an explicit non-operation.
|
||||
|
||||
As a result of this, we need to trigger a system wide sync operation
|
||||
which will trigger cache to flush to disk, after which partition changes
|
||||
should be visible upon re-scan.
|
||||
|
||||
When ramdisks are not in use, this also helps ensure that data has
|
||||
been safely flushed across the wire, such as on iscsi connections.
|
||||
|
||||
:param device: The block device containing partitions that is attempting
|
||||
to be updated.
|
||||
:param attempts: Number of attempts to run partprobe, the default is read
|
||||
from the configuration.
|
||||
:return: True on success, False otherwise.
|
||||
"""
|
||||
LOG.debug('Explicitly calling sync to force buffer/cache flush')
|
||||
utils.execute('sync')
|
||||
# Make sure any additions to the partitioning are reflected in the
|
||||
# kernel.
|
||||
udev_settle()
|
||||
partprobe(device, attempts=attempts)
|
||||
udev_settle()
|
||||
try:
|
||||
# Also verify that the partitioning is correct now.
|
||||
utils.execute('sgdisk', '-v', device)
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
LOG.warning('Failed to verify partition tables on device %(dev)s: '
|
||||
'%(err)s', {'dev': device, 'err': exc})
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
# NOTE(dtantsur): this function was in ironic_lib.utils before migration
|
||||
# (presumably to avoid a circular dependency with disk_partitioner)
|
||||
def wait_for_disk_to_become_available(device):
|
||||
"""Wait for a disk device to become available.
|
||||
|
||||
Waits for a disk device to become available for use by
|
||||
waiting until all process locks on the device have been
|
||||
released.
|
||||
|
||||
Timeout and iteration settings come from the configuration
|
||||
options used by the in-library disk_partitioner:
|
||||
``check_device_interval`` and ``check_device_max_retries``.
|
||||
|
||||
:params device: The path to the device.
|
||||
:raises: IronicException If the disk fails to become
|
||||
available.
|
||||
"""
|
||||
pids = ['']
|
||||
stderr = ['']
|
||||
interval = CONF.disk_partitioner.check_device_interval
|
||||
max_retries = CONF.disk_partitioner.check_device_max_retries
|
||||
|
||||
def _wait_for_disk():
|
||||
# A regex is likely overkill here, but variations in fuser
|
||||
# means we should likely use it.
|
||||
fuser_pids_re = re.compile(r'\d+')
|
||||
|
||||
# There are 'psmisc' and 'busybox' versions of the 'fuser' program. The
|
||||
# 'fuser' programs differ in how they output data to stderr. The
|
||||
# busybox version does not output the filename to stderr, while the
|
||||
# standard 'psmisc' version does output the filename to stderr. How
|
||||
# they output to stdout is almost identical in that only the PIDs are
|
||||
# output to stdout, with the 'psmisc' version adding a leading space
|
||||
# character to the list of PIDs.
|
||||
try:
|
||||
# NOTE(ifarkas): fuser returns a non-zero return code if none of
|
||||
# the specified files is accessed.
|
||||
# NOTE(TheJulia): fuser does not report LVM devices as in use
|
||||
# unless the LVM device-mapper device is the
|
||||
# device that is directly polled.
|
||||
# NOTE(TheJulia): The -m flag allows fuser to reveal data about
|
||||
# mounted filesystems, which should be considered
|
||||
# busy/locked. That being said, it is not used
|
||||
# because busybox fuser has a different behavior.
|
||||
# NOTE(TheJuia): fuser outputs a list of found PIDs to stdout.
|
||||
# All other text is returned via stderr, and the
|
||||
# output to a terminal is merged as a result.
|
||||
out, err = utils.execute('fuser', device, check_exit_code=[0, 1])
|
||||
|
||||
if not out and not err:
|
||||
return True
|
||||
|
||||
stderr[0] = err
|
||||
# NOTE: findall() returns a list of matches, or an empty list if no
|
||||
# matches
|
||||
pids[0] = fuser_pids_re.findall(out)
|
||||
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
LOG.warning('Failed to check the device %(device)s with fuser:'
|
||||
' %(err)s', {'device': device, 'err': exc})
|
||||
return False
|
||||
|
||||
retry = tenacity.retry(
|
||||
retry=tenacity.retry_if_result(lambda r: not r),
|
||||
stop=tenacity.stop_after_attempt(max_retries),
|
||||
wait=tenacity.wait_fixed(interval),
|
||||
reraise=True)
|
||||
try:
|
||||
retry(_wait_for_disk)()
|
||||
except tenacity.RetryError:
|
||||
if pids[0]:
|
||||
raise exception.IronicException(
|
||||
_('Processes with the following PIDs are holding '
|
||||
'device %(device)s: %(pids)s. '
|
||||
'Timed out waiting for completion.')
|
||||
% {'device': device, 'pids': ', '.join(pids[0])})
|
||||
else:
|
||||
raise exception.IronicException(
|
||||
_('Fuser exited with "%(fuser_err)s" while checking '
|
||||
'locks for device %(device)s. Timed out waiting for '
|
||||
'completion.')
|
||||
% {'device': device, 'fuser_err': stderr[0]})
|
@ -15,10 +15,10 @@ import re
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from ironic_lib import disk_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_log import log
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent import partition_utils
|
||||
|
@ -19,13 +19,14 @@ import tempfile
|
||||
import time
|
||||
from urllib import parse as urlparse
|
||||
|
||||
from ironic_lib import disk_utils
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import qemu_img
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
import requests
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent.extensions import base
|
||||
from ironic_python_agent import hardware
|
||||
@ -349,9 +350,9 @@ def _write_whole_disk_image(image, image_info, device):
|
||||
image, device]
|
||||
LOG.info('Writing image with command: %s', ' '.join(command))
|
||||
try:
|
||||
disk_utils.convert_image(image, device, out_format='host_device',
|
||||
cache='directsync', out_of_order=True,
|
||||
sparse_size='0')
|
||||
qemu_img.convert_image(image, device, out_format='host_device',
|
||||
cache='directsync', out_of_order=True,
|
||||
sparse_size='0')
|
||||
except processutils.ProcessExecutionError as e:
|
||||
raise errors.ImageWriteError(device, e.exit_code, e.stdout, e.stderr)
|
||||
|
||||
@ -750,17 +751,7 @@ def _validate_partitioning(device):
|
||||
Check if after writing the image to disk we have a valid partition
|
||||
table by trying to read it. This will fail if the disk is junk.
|
||||
"""
|
||||
try:
|
||||
# Ensure we re-read the partition table before we try to list
|
||||
# partitions
|
||||
utils.execute('partprobe', device,
|
||||
attempts=CONF.disk_utils.partprobe_attempts)
|
||||
except (processutils.UnknownArgumentError,
|
||||
processutils.ProcessExecutionError, OSError) as e:
|
||||
LOG.warning("Unable to probe for partitions on device %(device)s "
|
||||
"after writing the image, the partitioning table may "
|
||||
"be broken. Error: %(error)s",
|
||||
{'device': device, 'error': e})
|
||||
disk_utils.partprobe(device)
|
||||
|
||||
try:
|
||||
nparts = len(disk_utils.list_partitions(device))
|
||||
|
@ -29,7 +29,6 @@ import stat
|
||||
import string
|
||||
import time
|
||||
|
||||
from ironic_lib import disk_utils
|
||||
from ironic_lib import utils as il_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
@ -41,6 +40,7 @@ import stevedore
|
||||
import yaml
|
||||
|
||||
from ironic_python_agent import burnin
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import encoding
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent.extensions import base as ext_base
|
||||
@ -102,21 +102,6 @@ def _get_device_info(dev, devclass, field):
|
||||
{'field': field, 'dev': dev, 'class': devclass})
|
||||
|
||||
|
||||
def _udev_settle():
|
||||
"""Wait for the udev event queue to settle.
|
||||
|
||||
Wait for the udev event queue to settle to make sure all devices
|
||||
are detected once the machine boots up.
|
||||
|
||||
"""
|
||||
try:
|
||||
il_utils.execute('udevadm', 'settle')
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Something went wrong when waiting for udev '
|
||||
'to settle. Error: %s', e)
|
||||
return
|
||||
|
||||
|
||||
def _load_ipmi_modules():
|
||||
"""Load kernel modules required for IPMI interaction.
|
||||
|
||||
@ -508,7 +493,7 @@ def list_all_block_devices(block_type='disk',
|
||||
|
||||
check_multipath = not ignore_multipath and get_multipath_status()
|
||||
|
||||
_udev_settle()
|
||||
disk_utils.udev_settle()
|
||||
|
||||
# map device names to /dev/disk/by-path symbolic links that points to it
|
||||
|
||||
|
@ -16,12 +16,12 @@ import base64
|
||||
import contextlib
|
||||
import os
|
||||
|
||||
from ironic_lib import disk_utils
|
||||
from ironic_lib import utils as ironic_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent import utils
|
||||
|
@ -26,7 +26,6 @@ import shutil
|
||||
import stat
|
||||
import tempfile
|
||||
|
||||
from ironic_lib import disk_utils
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
@ -37,6 +36,7 @@ from oslo_utils import units
|
||||
from oslo_utils import uuidutils
|
||||
import requests
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent import utils as ipa_utils
|
||||
|
@ -14,11 +14,11 @@ import copy
|
||||
import re
|
||||
import shlex
|
||||
|
||||
from ironic_lib import disk_utils
|
||||
from ironic_lib import utils as il_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
@ -18,10 +18,10 @@ import shutil
|
||||
import tempfile
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import disk_utils
|
||||
from ironic_lib import utils as ilib_utils
|
||||
from oslo_concurrency import processutils
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import efi_utils
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent.extensions import image
|
||||
|
@ -279,11 +279,14 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
None,
|
||||
image_info['id'])
|
||||
|
||||
@mock.patch('ironic_lib.disk_utils.fix_gpt_partition', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.trigger_device_rescan', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.convert_image', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.udev_settle', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.destroy_disk_metadata', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.fix_gpt_partition',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.trigger_device_rescan',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_lib.qemu_img.convert_image', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.udev_settle', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.destroy_disk_metadata',
|
||||
autospec=True)
|
||||
def test_write_image(self, wipe_mock, udev_mock, convert_mock,
|
||||
rescan_mock, fix_gpt_mock):
|
||||
image_info = _build_fake_image_info()
|
||||
@ -302,11 +305,14 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
rescan_mock.assert_called_once_with(device)
|
||||
fix_gpt_mock.assert_called_once_with(device, node_uuid=None)
|
||||
|
||||
@mock.patch('ironic_lib.disk_utils.fix_gpt_partition', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.trigger_device_rescan', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.convert_image', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.udev_settle', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.destroy_disk_metadata', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.fix_gpt_partition',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.trigger_device_rescan',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_lib.qemu_img.convert_image', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.udev_settle', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.destroy_disk_metadata',
|
||||
autospec=True)
|
||||
def test_write_image_gpt_fails(self, wipe_mock, udev_mock, convert_mock,
|
||||
rescan_mock, fix_gpt_mock):
|
||||
image_info = _build_fake_image_info()
|
||||
@ -315,9 +321,10 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
fix_gpt_mock.side_effect = exception.InstanceDeployFailure
|
||||
standby._write_image(image_info, device)
|
||||
|
||||
@mock.patch('ironic_lib.disk_utils.convert_image', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.udev_settle', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.destroy_disk_metadata', autospec=True)
|
||||
@mock.patch('ironic_lib.qemu_img.convert_image', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.udev_settle', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.destroy_disk_metadata',
|
||||
autospec=True)
|
||||
def test_write_image_fails(self, wipe_mock, udev_mock, convert_mock):
|
||||
image_info = _build_fake_image_info()
|
||||
device = '/dev/sda'
|
||||
@ -332,7 +339,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch('builtins.open', autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'work_on_disk', autospec=True)
|
||||
def test_write_partition_image_exception(self, work_on_disk_mock,
|
||||
image_mb_mock,
|
||||
@ -376,7 +383,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch('builtins.open', autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'work_on_disk', autospec=True)
|
||||
def test_write_partition_image_no_node_uuid(self, work_on_disk_mock,
|
||||
image_mb_mock,
|
||||
@ -423,7 +430,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch('builtins.open', autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'work_on_disk', autospec=True)
|
||||
def test_write_partition_image_exception_image_mb(self,
|
||||
work_on_disk_mock,
|
||||
@ -450,7 +457,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
@mock.patch('builtins.open', autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'work_on_disk', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_image_mb', autospec=True)
|
||||
def test_write_partition_image(self, image_mb_mock, work_on_disk_mock,
|
||||
execute_mock, open_mock, dispatch_mock):
|
||||
image_info = _build_fake_partition_image_info()
|
||||
@ -837,11 +844,10 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
standby.ImageDownload,
|
||||
image_info)
|
||||
|
||||
@mock.patch('ironic_lib.disk_utils.get_disk_identifier',
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
lambda dev: 'ROOT')
|
||||
@mock.patch('ironic_python_agent.utils.execute',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.list_partitions',
|
||||
@mock.patch('ironic_lib.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
autospec=True)
|
||||
@ -891,8 +897,8 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
self.assertEqual({'root uuid': 'ROOT'},
|
||||
self.agent_extension.partition_uuids)
|
||||
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.list_partitions',
|
||||
@mock.patch('ironic_lib.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
autospec=True)
|
||||
@ -962,12 +968,12 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
self.assertEqual({'root uuid': 'root_uuid'},
|
||||
self.agent_extension.partition_uuids)
|
||||
|
||||
@mock.patch('ironic_lib.disk_utils.get_disk_identifier',
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
lambda dev: 'ROOT')
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_lib.utils.execute', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.list_partitions',
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.hardware.dispatch_to_managers',
|
||||
autospec=True)
|
||||
@ -1007,12 +1013,12 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
'root_uuid=ROOT').format(image_info['id'], 'manager')
|
||||
self.assertEqual(cmd_result, async_result.command_result['result'])
|
||||
|
||||
@mock.patch('ironic_lib.disk_utils.get_disk_identifier',
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
lambda dev: 'ROOT')
|
||||
@mock.patch.object(partition_utils, 'work_on_disk', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.list_partitions',
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.hardware.dispatch_to_managers',
|
||||
autospec=True)
|
||||
@ -1054,11 +1060,11 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
self.assertFalse(configdrive_copy_mock.called)
|
||||
self.assertEqual('FAILED', async_result.command_status)
|
||||
|
||||
@mock.patch('ironic_lib.disk_utils.get_disk_identifier',
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
side_effect=OSError, autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute',
|
||||
@mock.patch('ironic_lib.utils.execute',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.list_partitions',
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
autospec=True)
|
||||
@ -1108,10 +1114,10 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
attempts=mock.ANY)
|
||||
self.assertEqual({}, self.agent_extension.partition_uuids)
|
||||
|
||||
@mock.patch('ironic_python_agent.utils.execute', mock.Mock())
|
||||
@mock.patch('ironic_lib.disk_utils.list_partitions',
|
||||
@mock.patch('ironic_lib.utils.execute', mock.Mock())
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
lambda _dev: [mock.Mock()])
|
||||
@mock.patch('ironic_lib.disk_utils.get_disk_identifier',
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
lambda dev: 'ROOT')
|
||||
@mock.patch.object(partition_utils, 'work_on_disk', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
@ -1346,8 +1352,9 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
'configdrive_data')
|
||||
|
||||
@mock.patch('ironic_python_agent.extensions.standby.LOG', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.block_uuid', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.fix_gpt_partition', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.block_uuid', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.fix_gpt_partition',
|
||||
autospec=True)
|
||||
@mock.patch('hashlib.new', autospec=True)
|
||||
@mock.patch('builtins.open', autospec=True)
|
||||
@mock.patch('requests.get', autospec=True)
|
||||
@ -1444,7 +1451,8 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
mock.call(b'some')]
|
||||
file_mock.write.assert_has_calls(write_calls)
|
||||
|
||||
@mock.patch('ironic_lib.disk_utils.fix_gpt_partition', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.fix_gpt_partition',
|
||||
autospec=True)
|
||||
@mock.patch('hashlib.new', autospec=True)
|
||||
@mock.patch('builtins.open', autospec=True)
|
||||
@mock.patch('requests.get', autospec=True)
|
||||
@ -1570,7 +1578,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch('builtins.open', autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_lib.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_image_mb', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'work_on_disk', autospec=True)
|
||||
def test_write_partition_image_no_node_uuid_uefi(
|
||||
self, work_on_disk_mock,
|
||||
|
202
ironic_python_agent/tests/unit/test_disk_partitioner.py
Normal file
202
ironic_python_agent/tests/unit/test_disk_partitioner.py
Normal file
@ -0,0 +1,202 @@
|
||||
# Copyright 2014 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import exception
|
||||
from ironic_lib.tests import base
|
||||
from ironic_lib import utils
|
||||
|
||||
from ironic_python_agent import disk_partitioner
|
||||
|
||||
|
||||
CONF = disk_partitioner.CONF
|
||||
|
||||
|
||||
class DiskPartitionerTestCase(base.IronicLibTestCase):
|
||||
|
||||
def test_add_partition(self):
|
||||
dp = disk_partitioner.DiskPartitioner('/dev/fake')
|
||||
dp.add_partition(1024)
|
||||
dp.add_partition(512, fs_type='linux-swap')
|
||||
dp.add_partition(2048, boot_flag='boot')
|
||||
dp.add_partition(2048, boot_flag='bios_grub')
|
||||
expected = [(1, {'boot_flag': None,
|
||||
'extra_flags': None,
|
||||
'fs_type': '',
|
||||
'type': 'primary',
|
||||
'size': 1024}),
|
||||
(2, {'boot_flag': None,
|
||||
'extra_flags': None,
|
||||
'fs_type': 'linux-swap',
|
||||
'type': 'primary',
|
||||
'size': 512}),
|
||||
(3, {'boot_flag': 'boot',
|
||||
|