# Copyright 2020 Red Hat, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is a python implementation of virtual disk format inspection routines gathered from various public specification documents, as well as qemu disk driver code. It attempts to store and parse the minimum amount of data required, and in a streaming-friendly manner to collect metadata about complex-format images. """ import abc import struct import logging from oslo_utils._i18n import _ from oslo_utils import units LOG = logging.getLogger(__name__) def _chunked_reader(fileobj, chunk_size=512): while True: chunk = fileobj.read(chunk_size) if not chunk: break yield chunk class CaptureRegion(object): """Represents a region of a file we want to capture. A region of a file we want to capture requires a byte offset into the file and a length. This is expected to be used by a data processing loop, calling capture() with the most recently-read chunk. This class handles the task of grabbing the desired region of data across potentially multiple fractional and unaligned reads. :param offset: Byte offset into the file starting the region :param length: The length of the region :param min_length: Consider this region complete if it has captured at least this much data. This should generally NOT be used but may be required for certain formats with highly variable data structures. """ def __init__(self, offset, length, min_length=None): self.offset = offset self.length = length self.data = b'' self.min_length = min_length @property def complete(self): """Returns True when we have captured the desired data.""" if self.min_length is not None: return self.min_length <= len(self.data) else: return self.length == len(self.data) def capture(self, chunk, current_position): """Process a chunk of data. This should be called for each chunk in the read loop, at least until complete returns True. :param chunk: A chunk of bytes in the file :param current_position: The position of the file processed by the read loop so far. Note that this will be the position in the file *after* the chunk being presented. """ read_start = current_position - len(chunk) if (read_start <= self.offset <= current_position or self.offset <= read_start <= (self.offset + self.length)): if read_start < self.offset: lead_gap = self.offset - read_start else: lead_gap = 0 self.data += chunk[lead_gap:] self.data = self.data[:self.length] class EndCaptureRegion(CaptureRegion): """Represents a region that captures the last N bytes of a stream. This can only capture the last N bytes of a stream and not an arbitrary region referenced from the end of the file since in most cases we do not know how much data we will read. :param offset: Byte offset from the end of the stream to capture (which will also be the region length) """ def __init__(self, offset): super().__init__(offset, offset) # We don't want to indicate completeness until we have the data we # want *and* have reached EOF self._complete = False def capture(self, chunk, current_position): self.data += chunk self.data = self.data[0 - self.length:] self.offset = current_position - len(self.data) @property def complete(self): return super().complete and self._complete def finish(self): """Indicate that the entire stream has been read.""" self._complete = True class SafetyCheck: """Represents a named safety check on an inspector""" def __init__(self, name, target_fn, description=None): """A safety check, it's meta info, and result. @name should be a short name of the check (ideally no spaces) @target_fn is the implementation we run (no args) which returns either None if the check passes, or a string reason why it failed. @description is a optional longer-format human-readable string that describes the check. """ self.name = name self.target_fn = target_fn self.description = description def __call__(self): """Executes the target check function, records the result. Returns True if the check succeeded (i.e. no failure reason) or False if it did not. """ try: self.target_fn() except SafetyViolation: raise except Exception as e: LOG.error('Failed to run safety check %s on %s inspector: %s', self.name, self, e) raise SafetyViolation(_('Unexpected error')) @classmethod def null(cls): """The "null" safety check always returns True. This should only be used if there is no meaningful checks that can be done for a given format. """ return cls('null', lambda: None, _('This file format has no meaningful safety check')) @classmethod def banned(cls): """The "banned" safety check always returns False. This should be used for formats we want to identify but never allow, generally because they are unsupported by any of our users and/or we are unable to check for safety. """ def fail(): raise SafetyViolation(_('This file format is not allowed')) return cls('banned', fail, _('This file format is not allowed')) class ImageFormatError(Exception): """An unrecoverable image format error that aborts the process.""" pass class SafetyViolation(Exception): """Indicates a failure of a single safety violation.""" pass class SafetyCheckFailed(Exception): """Indictes that one or more of a series of safety checks failed.""" def __init__(self, failures): super().__init__(_('Safety checks failed: %s') % ','.join( failures.keys())) self.failures = failures class FileInspector(abc.ABC): """A stream-based disk image inspector. This base class works on raw images and is subclassed for more complex types. It is to be presented with the file to be examined one chunk at a time, during read processing and will only store as much data as necessary to determine required attributes of the file. """ # This should match what qemu-img thinks this format is NAME = '' def __init__(self, tracing=False): self._total_count = 0 # NOTE(danms): The logging in here is extremely verbose for a reason, # but should never really be enabled at that level at runtime. To # retain all that work and assist in future debug, we have a separate # debug flag that can be passed from a manual tool to turn it on. self._tracing = tracing self._capture_regions = {} self._safety_checks = {} self._finished = False self._initialize() if not self._safety_checks: # Make sure we actively declare some safety check, even if it # is a no-op. raise RuntimeError( 'All inspectors must define at least one safety check') def _trace(self, *args, **kwargs): if self._tracing: LOG.debug(*args, **kwargs) @abc.abstractmethod def _initialize(self): """Set up inspector before we start processing data. This should add the initial set of capture regions and safety checks. """ def finish(self): """Indicate that the entire stream has been read. This should be called when the entire stream has been completely read, which will mark any EndCaptureRegion objects as complete. """ self._finished = True for region in self._capture_regions.values(): if isinstance(region, EndCaptureRegion): region.finish() def _capture(self, chunk, only=None): if self._finished: raise RuntimeError('Inspector has been marked finished, ' 'no more data processing allowed') for name, region in self._capture_regions.items(): if only and name not in only: continue if isinstance(region, EndCaptureRegion) or not region.complete: region.capture(chunk, self._total_count) def eat_chunk(self, chunk): """Call this to present chunks of the file to the inspector.""" pre_regions = set(self._capture_regions.values()) pre_complete = {region for region in self._capture_regions.values() if region.complete} # Increment our position-in-file counter self._total_count += len(chunk) # Run through the regions we know of to see if they want this # data self._capture(chunk) # Let the format do some post-read processing of the stream self.post_process() # Check to see if the post-read processing added new regions # which may require the current chunk. new_regions = set(self._capture_regions.values()) - pre_regions if new_regions: self._capture(chunk, only=[self.region_name(r) for r in new_regions]) post_complete = {region for region in self._capture_regions.values() if region.complete} # Call the handler for any regions that are newly complete for region in post_complete - pre_complete: self.region_complete(self.region_name(region)) def post_process(self): """Post-read hook to process what has been read so far. This will be called after each chunk is read and potentially captured by the defined regions. If any regions are defined by this call, those regions will be presented with the current chunk in case it is within one of the new regions. """ pass def region(self, name): """Get a CaptureRegion by name.""" return self._capture_regions[name] def region_name(self, region): """Return the region name for a region object.""" for name in self._capture_regions: if self._capture_regions[name] is region: return name raise ValueError('No such region') def new_region(self, name, region): """Add a new CaptureRegion by name.""" if self.has_region(name): # This is a bug, we tried to add the same region twice raise ImageFormatError('Inspector re-added region %s' % name) self._capture_regions[name] = region def has_region(self, name): """Returns True if named region has been defined.""" return name in self._capture_regions def delete_region(self, name): """Remove a capture region by name. This will raise KeyError if the region does not exist. """ del self._capture_regions[name] def region_complete(self, region_name): """Called when a region becomes complete. Subclasses may implement this if they need to do one-time processing of a region's data. """ pass def add_safety_check(self, check): if not isinstance(check, SafetyCheck): raise RuntimeError(_('Unable to add safety check of type %s') % ( type(check).__name__)) if check.name in self._safety_checks: raise RuntimeError(_('Duplicate check of name %s') % check.name) self._safety_checks[check.name] = check @property @abc.abstractmethod def format_match(self): """Returns True if the file appears to be the expected format.""" @property def virtual_size(self): """Returns the virtual size of the disk image, or zero if unknown.""" return self._total_count @property def actual_size(self): """Returns the total size of the file, usually smaller than virtual_size. NOTE: this will only be accurate if the entire file is read and processed. """ return self._total_count @property def complete(self): """Returns True if we have all the information needed.""" return all(r.complete for r in self._capture_regions.values()) def __str__(self): """The string name of this file format.""" return self.NAME @property def context_info(self): """Return info on amount of data held in memory for auditing. This is a dict of region:sizeinbytes items that the inspector uses to examine the file. """ return {name: len(region.data) for name, region in self._capture_regions.items()} @classmethod def from_file(cls, filename): """Read as much of a file as necessary to complete inspection. NOTE: Because we only read as much of the file as necessary, the actual_size property will not reflect the size of the file, but the amount of data we read before we satisfied the inspector. Raises ImageFormatError if we cannot parse the file. """ inspector = cls() with open(filename, 'rb') as f: for chunk in _chunked_reader(f): inspector.eat_chunk(chunk) if inspector.complete: # No need to eat any more data break inspector.finish() if not inspector.complete or not inspector.format_match: raise ImageFormatError('File is not in requested format') return inspector def safety_check(self): """Perform all checks to determine if this file is safe. Returns if safe, raises otherwise. It may raise ImageFormatError if safety cannot be guaranteed because of parsing or other errors. It will raise SafetyCheckFailed if one or more checks fails. """ if not self.complete: raise ImageFormatError( _('Incomplete file cannot be safety checked')) if not self.format_match: raise ImageFormatError( _('Unable to safety check format %s ' 'because content does not match') % self) failures = {} for check in self._safety_checks.values(): try: result = check() if result is not None: raise RuntimeError('check returned result') except SafetyViolation as exc: exc.check = check failures[check.name] = exc LOG.warning('Safety check %s on %s failed because %s', check.name, self, exc) if failures: raise SafetyCheckFailed(failures) class RawFileInspector(FileInspector): NAME = 'raw' def _initialize(self): """Raw files have nothing to capture and no safety checks.""" self.add_safety_check(SafetyCheck.null()) @property def format_match(self): # By definition, raw files are unformatted and thus we always match return True # The qcow2 format consists of a big-endian 72-byte header, of which # only a small portion has information we care about: # # Dec Hex Name # 0 0x00 Magic 4-bytes 'QFI\xfb' # 4 0x04 Version (uint32_t, should always be 2 for modern files) # . . . # 8 0x08 Backing file offset (uint64_t) # 24 0x18 Size in bytes (unint64_t) # . . . # 72 0x48 Incompatible features bitfield (6 bytes) # # https://gitlab.com/qemu-project/qemu/-/blob/master/docs/interop/qcow2.txt class QcowInspector(FileInspector): """QEMU QCOW Format This should only require about 32 bytes of the beginning of the file to determine the virtual size, and 104 bytes to perform the safety check. This recognizes the (very) old v1 format but will raise a SafetyViolation for it, as it should definitely not be in production use at this point. """ NAME = 'qcow2' BF_OFFSET = 0x08 BF_OFFSET_LEN = 8 I_FEATURES = 0x48 I_FEATURES_LEN = 8 I_FEATURES_DATAFILE_BIT = 3 I_FEATURES_MAX_BIT = 4 def _initialize(self): self.qemu_header_info = {} self.new_region('header', CaptureRegion(0, 512)) self.add_safety_check( SafetyCheck('backing_file', self.check_backing_file)) self.add_safety_check( SafetyCheck('data_file', self.check_data_file)) self.add_safety_check( SafetyCheck('unknown_features', self.check_unknown_features)) def region_complete(self, region): self.qemu_header_info = dict(zip( ('magic', 'version', 'bf_offset', 'bf_sz', 'cluster_bits', 'size'), struct.unpack('>4sIQIIQ', self.region('header').data[:32]))) if not self.format_match: self.qemu_header_info = {} @property def virtual_size(self): return self.qemu_header_info.get('size', 0) @property def format_match(self): if not self.region('header').complete: return False return self.qemu_header_info.get('magic') == b'QFI\xFB' def check_backing_file(self): bf_offset_bytes = self.region('header').data[ self.BF_OFFSET:self.BF_OFFSET + self.BF_OFFSET_LEN] # nonzero means "has a backing file" bf_offset, = struct.unpack('>Q', bf_offset_bytes) if bf_offset != 0: raise SafetyViolation('Image has a backing file') def check_unknown_features(self): ver = self.qemu_header_info.get('version') if ver == 2: # Version 2 did not have the feature flag array, so no need to # check it here. return elif ver != 3: raise SafetyViolation('Unsupported qcow2 version') i_features = self.region('header').data[ self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN] # This is the maximum byte number we should expect any bits to be set max_byte = self.I_FEATURES_MAX_BIT // 8 # The flag bytes are in big-endian ordering, so if we process # them in index-order, they're reversed for i, byte_num in enumerate(reversed(range(self.I_FEATURES_LEN))): if byte_num == max_byte: # If we're in the max-allowed byte, allow any bits less than # the maximum-known feature flag bit to be set allow_mask = ((1 << (self.I_FEATURES_MAX_BIT % 8)) - 1) elif byte_num > max_byte: # If we're above the byte with the maximum known feature flag # bit, then we expect all zeroes allow_mask = 0x0 else: # Any earlier-than-the-maximum byte can have any of the flag # bits set allow_mask = 0xFF if i_features[i] & ~allow_mask: LOG.warning('Found unknown feature bit in byte %i: %s/%s', byte_num, bin(i_features[byte_num] & ~allow_mask), bin(allow_mask)) raise SafetyViolation('Unknown QCOW2 features found') def check_data_file(self): i_features = self.region('header').data[ self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN] # First byte of bitfield, which is i_features[7] byte = self.I_FEATURES_LEN - 1 - self.I_FEATURES_DATAFILE_BIT // 8 # Third bit of bitfield, which is 0x04 bit = 1 << (self.I_FEATURES_DATAFILE_BIT - 1 % 8) if bool(i_features[byte] & bit): raise SafetyViolation('Image has data_file set') class QEDInspector(FileInspector): NAME = 'qed' def _initialize(self): self.new_region('header', CaptureRegion(0, 512)) # QED format is not supported by anyone, but we want to detect it # and mark it as just always unsafe. self.add_safety_check(SafetyCheck.banned()) @property def format_match(self): if not self.region('header').complete: return False return self.region('header').data.startswith(b'QED\x00') # The VHD (or VPC as QEMU calls it) format consists of a big-endian # 512-byte "footer" at the beginning of the file with various # information, most of which does not matter to us: # # Dec Hex Name # 0 0x00 Magic string (8-bytes, always 'conectix') # 40 0x28 Disk size (uint64_t) # # https://github.com/qemu/qemu/blob/master/block/vpc.c class VHDInspector(FileInspector): """Connectix/MS VPC VHD Format This should only require about 512 bytes of the beginning of the file to determine the virtual size. """ NAME = 'vhd' def _initialize(self): self.new_region('header', CaptureRegion(0, 512)) self.add_safety_check(SafetyCheck.null()) @property def format_match(self): return self.region('header').data.startswith(b'conectix') @property def virtual_size(self): if not self.region('header').complete: return 0 if not self.format_match: return 0 return struct.unpack('>Q', self.region('header').data[40:48])[0] # The VHDX format consists of a complex dynamic little-endian # structure with multiple regions of metadata and data, linked by # offsets with in the file (and within regions), identified by MSFT # GUID strings. The header is a 320KiB structure, only a few pieces of # which we actually need to capture and interpret: # # Dec Hex Name # 0 0x00000 Identity (Technically 9-bytes, padded to 64KiB, the first # 8 bytes of which are 'vhdxfile') # 196608 0x30000 The Region table (64KiB of a 32-byte header, followed # by up to 2047 36-byte region table entry structures) # # The region table header includes two items we need to read and parse, # which are: # # 196608 0x30000 4-byte signature ('regi') # 196616 0x30008 Entry count (uint32-t) # # The region table entries follow the region table header immediately # and are identified by a 16-byte GUID, and provide an offset of the # start of that region. We care about the "metadata region", identified # by the METAREGION class variable. The region table entry is (offsets # from the beginning of the entry, since it could be in multiple places): # # 0 0x00000 16-byte MSFT GUID # 16 0x00010 Offset of the actual metadata region (uint64_t) # # When we find the METAREGION table entry, we need to grab that offset # and start examining the region structure at that point. That # consists of a metadata table of structures, which point to places in # the data in an unstructured space that follows. The header is # (offsets relative to the region start): # # 0 0x00000 8-byte signature ('metadata') # . . . # 16 0x00010 2-byte entry count (up to 2047 entries max) # # This header is followed by the specified number of metadata entry # structures, identified by GUID: # # 0 0x00000 16-byte MSFT GUID # 16 0x00010 4-byte offset (uint32_t, relative to the beginning of # the metadata region) # # We need to find the "Virtual Disk Size" metadata item, identified by # the GUID in the VIRTUAL_DISK_SIZE class variable, grab the offset, # add it to the offset of the metadata region, and examine that 8-byte # chunk of data that follows. # # The "Virtual Disk Size" is a naked uint64_t which contains the size # of the virtual disk, and is our ultimate target here. # # https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-vhdx/83e061f8-f6e2-4de1-91bd-5d518a43d477 class VHDXInspector(FileInspector): """MS VHDX Format This requires some complex parsing of the stream. The first 256KiB of the image is stored to get the header and region information, and then we capture the first metadata region to read those records, find the location of the virtual size data and parse it. This needs to store the metadata table entries up until the VDS record, which may consist of up to 2047 32-byte entries at max. Finally, it must store a chunk of data at the offset of the actual VDS uint64. """ NAME = 'vhdx' METAREGION = '8B7CA206-4790-4B9A-B8FE-575F050F886E' VIRTUAL_DISK_SIZE = '2FA54224-CD1B-4876-B211-5DBED83BF4B8' VHDX_METADATA_TABLE_MAX_SIZE = 32 * 2048 # From qemu def _initialize(self): self.new_region('ident', CaptureRegion(0, 32)) self.new_region('header', CaptureRegion(192 * 1024, 64 * 1024)) self.add_safety_check(SafetyCheck.null()) def post_process(self): # After reading a chunk, we may have the following conditions: # # 1. We may have just completed the header region, and if so, # we need to immediately read and calculate the location of # the metadata region, as it may be starting in the same # read we just did. # 2. We may have just completed the metadata region, and if so, # we need to immediately calculate the location of the # "virtual disk size" record, as it may be starting in the # same read we just did. if self.region('header').complete and not self.has_region('metadata'): region = self._find_meta_region() if region: self.new_region('metadata', region) elif self.has_region('metadata') and not self.has_region('vds'): region = self._find_meta_entry(self.VIRTUAL_DISK_SIZE) if region: self.new_region('vds', region) @property def format_match(self): return self.region('ident').data.startswith(b'vhdxfile') @staticmethod def _guid(buf): """Format a MSFT GUID from the 16-byte input buffer.""" guid_format = '= 2048: raise ImageFormatError('Region count is %i (limit 2047)' % count) # Process the regions until we find the metadata one; grab the # offset and return self._trace('Region entry first is %x', region_entry_first) self._trace('Region entries %i', count) meta_offset = 0 for i in range(0, count): entry_start = region_entry_first + (i * 32) entry_end = entry_start + 32 entry = self.region('header').data[entry_start:entry_end] self._trace('Entry offset is %x', entry_start) # GUID is the first 16 bytes guid = self._guid(entry[:16]) if guid == self.METAREGION: # This entry is the metadata region entry meta_offset, meta_len, meta_req = struct.unpack( '= 2048: raise ImageFormatError( 'Metadata item count is %i (limit 2047)' % count) for i in range(0, count): entry_offset = 32 + (i * 32) guid = self._guid(meta_buffer[entry_offset:entry_offset + 16]) if guid == desired_guid: # Found the item we are looking for by id. # Stop our region from capturing item_offset, item_length, _reserved = struct.unpack( ' 1: # Multiple format matches mean that not only can we not return a # decision here, but also means that there may be something # nefarious going on (i.e. hiding one header in another). raise ImageFormatError('Multiple formats detected: %s' % ','.join( str(i) for i in matches)) if not matches: try: # If nothing *specific* matched, we return the raw format to # indicate that we do not recognize this content at all. return [x for x in self._inspectors if str(x) == 'raw'][0] except IndexError: raise ImageFormatError( 'Content does not match any allowed format') # The expected outcome of this is a single match of something specific return matches[0] ALL_FORMATS = { 'raw': RawFileInspector, 'qcow2': QcowInspector, 'vhd': VHDInspector, 'vhdx': VHDXInspector, 'vmdk': VMDKInspector, 'vdi': VDIInspector, 'qed': QEDInspector, 'iso': ISOInspector, 'gpt': GPTInspector, } def get_inspector(format_name): """Returns a FormatInspector class based on the given name. :param format_name: The name of the disk_format (raw, qcow2, etc). :returns: A FormatInspector or None if unsupported. """ return ALL_FORMATS.get(format_name) def detect_file_format(filename): """Attempts to detect the format of a file. This runs through a file one time, running all the known inspectors in parallel. It stops reading the file once all of them matches or all of them are sure they don't match. :param filename: The path to the file to inspect. :returns: A FormatInspector instance matching the file. :raises: ImageFormatError if multiple formats are detected. """ with open(filename, 'rb') as f: wrapper = InspectWrapper(f) try: for _chunk in _chunked_reader(wrapper, 4096): if wrapper.format: return wrapper.format finally: wrapper.close() return wrapper.format