Merge "Add decompression import plugin"
This commit is contained in:
commit
2d80135f9b
@ -592,6 +592,80 @@ You will need to configure 'glance-image-import.conf' file as shown below:
|
||||
[image_conversion]
|
||||
output_format = raw
|
||||
|
||||
The Image Decompression
|
||||
-----------------------
|
||||
.. list-table::
|
||||
|
||||
* - release introduced
|
||||
- Ussuri (Glance 20.0.0)
|
||||
* - configuration file
|
||||
- ``glance-image-import.conf``
|
||||
|
||||
This plugin implements automated image decompression for Interoperable Image
|
||||
Import. One use case for this plugin would be environments where user or
|
||||
operator wants to use 'web-download' method and the image provider supplies
|
||||
only compressed images.
|
||||
|
||||
.. note::
|
||||
|
||||
This plugin may only be used as part of the interoperable image import
|
||||
workflow (``POST v2/images/{image_id}/import``). *It has no effect on the
|
||||
image data upload call* (``PUT v2/images/{image_id}/file``).
|
||||
|
||||
You can guarantee that your end users must use interoperable image import by
|
||||
restricting the ``upload_image`` policy appropriately in the Glance
|
||||
``policy.json`` file. By default, this policy is unrestricted (that is,
|
||||
any authorized user may make the image upload call).
|
||||
|
||||
For example, to allow only admin or service users to make the image upload
|
||||
call, the policy could be restricted as follows:
|
||||
|
||||
.. code-block:: text
|
||||
|
||||
"upload_image": "role:admin or (service_user_id:<uuid of nova user>) or
|
||||
(service_roles:<service user role>)"
|
||||
|
||||
where "service_role" is the role which is created for the service user
|
||||
and assigned to trusted services.
|
||||
|
||||
To use the Image Decompression Plugin, the following configuration is
|
||||
required.
|
||||
|
||||
You will need to add "image_decompression" to 'glance-image-import.conf' file
|
||||
as shown below:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[image_import_opts]
|
||||
image_import_plugins = ['image_decompression']
|
||||
|
||||
.. note::
|
||||
|
||||
The supported archive types for Image Decompression are zip, lha/lzh and gzip.
|
||||
Currently the plugin does not support multi-layered archives (like tar.gz).
|
||||
Lha/lzh is only supported in case python3 `lhafile` dependency library is
|
||||
installed, absence of this dependency will fail the import job where lha file
|
||||
is provided. (In this case we know it won't be bootable as the image is
|
||||
compressed and we do not have means to decompress it.)
|
||||
|
||||
.. note::
|
||||
|
||||
``image_import_plugins`` config option is a list and multiple plugins can be
|
||||
enabled for the import flow. The plugins are not run in parallel. One can
|
||||
enable multiple plugins by configuring them in the
|
||||
``glance-image-import.conf`` for example as following:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[image_import_opts]
|
||||
image_import_plugins = ['image_decompression', 'image_conversion']
|
||||
|
||||
[image_conversion]
|
||||
output_format = raw
|
||||
|
||||
If Image Conversion is used together, decompression must happen first, this
|
||||
is ensured by ordering the plugins.
|
||||
|
||||
.. _glance-api.conf: https://opendev.org/openstack/glance/src/branch/master/etc/glance-api.conf
|
||||
.. _glance-image-import.conf.sample: https://opendev.org/openstack/glance/src/branch/master/etc/glance-image-import.conf.sample
|
||||
.. _`Image Import Refactor`: https://specs.openstack.org/openstack/glance-specs/specs/mitaka/approved/image-import/image-import-refactor.html
|
||||
|
166
glance/async_/flows/plugins/image_decompression.py
Normal file
166
glance/async_/flows/plugins/image_decompression.py
Normal file
@ -0,0 +1,166 @@
|
||||
# Copyright 2020 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import gzip
|
||||
import os
|
||||
import shutil
|
||||
import zipfile
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import encodeutils
|
||||
from taskflow.patterns import linear_flow as lf
|
||||
from taskflow import task
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Note(jokke): The number before '_' is offset for the magic number in header
|
||||
MAGIC_NUMBERS = {
|
||||
'0_zipfile': bytes([0x50, 0x4B, 0x03, 0x04]),
|
||||
'2_lhafile': bytes([0x2D, 0x6C, 0x68]),
|
||||
'0_gzipfile': bytes([0x1F, 0x8B, 0x08])}
|
||||
|
||||
NO_LHA = False
|
||||
|
||||
try:
|
||||
import lhafile
|
||||
except ImportError:
|
||||
LOG.debug("No lhafile available.")
|
||||
NO_LHA = True
|
||||
|
||||
|
||||
def header_lengths():
|
||||
headers = []
|
||||
for key, val in MAGIC_NUMBERS.items():
|
||||
offset, key = key.split("_")
|
||||
headers.append(int(offset) + len(val))
|
||||
return headers
|
||||
|
||||
|
||||
MAX_HEADER = max(header_lengths())
|
||||
|
||||
|
||||
def _zipfile(src_path, dest_path, image_id):
|
||||
try:
|
||||
with zipfile.ZipFile(src_path, 'r') as zfd:
|
||||
content = zfd.namelist()
|
||||
if len(content) != 1:
|
||||
raise Exception("Archive contains more than one file.")
|
||||
else:
|
||||
zfd.extract(content[0], dest_path)
|
||||
except Exception as e:
|
||||
LOG.debug("ZIP: Error decompressing image %(iid)s: %(msg)s", {
|
||||
"iid": image_id,
|
||||
"msg": encodeutils.exception_to_unicode(e)})
|
||||
raise
|
||||
|
||||
|
||||
def _lhafile(src_path, dest_path, image_id):
|
||||
if NO_LHA:
|
||||
raise Exception("No lhafile available.")
|
||||
try:
|
||||
with lhafile.LhaFile(src_path, 'r') as lfd:
|
||||
content = lfd.namelist()
|
||||
if len(content) != 1:
|
||||
raise Exception("Archive contains more than one file.")
|
||||
else:
|
||||
lfd.extract(content[0], dest_path)
|
||||
except Exception as e:
|
||||
LOG.debug("LHA: Error decompressing image %(iid)s: %(msg)s", {
|
||||
"iid": image_id,
|
||||
"msg": encodeutils.exception_to_unicode(e)})
|
||||
raise
|
||||
|
||||
|
||||
def _gzipfile(src_path, dest_path, image_id):
|
||||
try:
|
||||
with gzip.open(src_path, 'r') as gzfd:
|
||||
with open(dest_path, 'wb') as fd:
|
||||
shutil.copyfileobj(gzfd, fd)
|
||||
except gzip.BadGzipFile as e:
|
||||
LOG.debug("ZIP: Error decompressing image %(iid)s: Bad GZip file: "
|
||||
"%(msg)s", {"iid": image_id,
|
||||
"msg": encodeutils.exception_to_unicode(e)})
|
||||
raise
|
||||
except Exception as e:
|
||||
LOG.debug("GZIP: Error decompressing image %(iid)s: %(msg)s", {
|
||||
"iid": image_id,
|
||||
"msg": encodeutils.exception_to_unicode(e)})
|
||||
raise
|
||||
|
||||
|
||||
class _DecompressImage(task.Task):
|
||||
|
||||
default_provides = 'file_path'
|
||||
|
||||
def __init__(self, context, task_id, task_type,
|
||||
image_repo, image_id):
|
||||
self.context = context
|
||||
self.task_id = task_id
|
||||
self.task_type = task_type
|
||||
self.image_repo = image_repo
|
||||
self.image_id = image_id
|
||||
self.dest_path = ""
|
||||
super(_DecompressImage, self).__init__(
|
||||
name='%s-Decompress_Image-%s' % (task_type, task_id))
|
||||
|
||||
def execute(self, file_path, **kwargs):
|
||||
|
||||
# TODO(jokke): Once we support other schemas we need to take them into
|
||||
# account and handle the paths here.
|
||||
src_path = file_path.split('file://')[-1]
|
||||
self.dest_path = "%(path)s.uc" % {'path': src_path}
|
||||
head = None
|
||||
with open(src_path, 'rb') as fd:
|
||||
head = fd.read(MAX_HEADER)
|
||||
for key, val in MAGIC_NUMBERS.items():
|
||||
offset, key = key.split("_")
|
||||
offset = int(offset)
|
||||
key = "_" + key
|
||||
if head.startswith(val, offset):
|
||||
globals()[key](src_path, self.dest_path, self.image_id)
|
||||
os.replace(self.dest_path, src_path)
|
||||
|
||||
return "file://%s" % src_path
|
||||
|
||||
def revert(self, result=None, **kwargs):
|
||||
# NOTE(flaper87, jokke): If result is None, it probably
|
||||
# means this task failed. Otherwise, we would have
|
||||
# a result from its execution. This includes the case
|
||||
# that nothing was to be compressed.
|
||||
if result is not None:
|
||||
LOG.debug("Image decompression failed.")
|
||||
if os.path.exists(self.dest_path):
|
||||
os.remove(self.dest_path)
|
||||
|
||||
|
||||
def get_flow(**kwargs):
|
||||
"""Return task flow for no-op.
|
||||
|
||||
:param context: request context
|
||||
:param task_id: Task ID.
|
||||
:param task_type: Type of the task.
|
||||
:param image_repo: Image repository used.
|
||||
:param image_id: Image ID
|
||||
"""
|
||||
context = kwargs.get('context')
|
||||
task_id = kwargs.get('task_id')
|
||||
task_type = kwargs.get('task_type')
|
||||
image_repo = kwargs.get('image_repo')
|
||||
image_id = kwargs.get('image_id')
|
||||
|
||||
return lf.Flow(task_type).add(
|
||||
_DecompressImage(context, task_id, task_type,
|
||||
image_repo, image_id),
|
||||
)
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
New Interoperable Image Import plugin has been introduced
|
||||
to address the use case of providing compressed images
|
||||
either through 'web-download' or to optimize the network
|
||||
utilization between the client and Glance.
|
@ -81,6 +81,7 @@ glance.image_import.plugins =
|
||||
no_op = glance.async_.flows.plugins.no_op:get_flow
|
||||
inject_image_metadata=glance.async_.flows.plugins.inject_image_metadata:get_flow
|
||||
image_conversion=glance.async_.flows.plugins.image_conversion:get_flow
|
||||
image_decompression=glance.async_.flows.plugins.image_decompression:get_flow
|
||||
|
||||
glance.image_import.internal_plugins =
|
||||
web_download = glance.async_.flows._internal_plugins.web_download:get_flow
|
||||
|
Loading…
Reference in New Issue
Block a user