Export images to be served by httpd

This change detects whether a registry supports the push API, and if
it does not then exports the images to /var/lib/image-serve to
be served by Apache.

Blueprint: podman-support
Change-Id: I48e23c8078847c9d69eeda64fad3f76f9d02dc9d
This commit is contained in:
Steve Baker
2018-12-10 10:10:09 +13:00
parent 14b7513747
commit 0a745d596a
3 changed files with 419 additions and 2 deletions

View File

@@ -0,0 +1,163 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import hashlib
import json
import os
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
IMAGE_EXPORT_DIR = '/var/lib/image-serve'
def make_dir(path):
if os.path.exists(path):
return
try:
os.makedirs(path, 0o775)
except os.error:
# Handle race for directory already existing
pass
def image_tag_from_url(image_url):
parts = image_url.path.split(':')
if len(parts) == 1:
tag = None
image = parts[0]
else:
tag = parts[-1]
image = ':'.join(parts[:-1])
# strip leading slash
if image.startswith('/'):
image = image[1:]
return image, tag
def export_stream(target_url, layer, calc_digest, layer_stream):
image, tag = image_tag_from_url(target_url)
digest = layer['digest']
blob_dir_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', image, 'blobs')
make_dir(blob_dir_path)
blob_path = os.path.join(blob_dir_path, '%s.gz' % digest)
LOG.debug('export layer to %s' % blob_path)
length = 0
with open(blob_path, 'w+b') as f:
for chunk in layer_stream:
if not chunk:
break
f.write(chunk)
calc_digest.update(chunk)
length += len(chunk)
layer_digest = 'sha256:%s' % calc_digest.hexdigest()
LOG.debug('Calculated layer digest: %s' % layer_digest)
layer['digest'] = layer_digest
layer['size'] = length
return layer_digest
def cross_repo_mount(target_image_url, image_layers, source_layers):
for layer in source_layers:
if layer not in image_layers:
continue
image_url = image_layers[layer]
image, tag = image_tag_from_url(image_url)
dir_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', image, 'blobs')
blob_path = os.path.join(dir_path, '%s.gz' % layer)
if not os.path.exists(blob_path):
LOG.debug('Layer not found: %s' % blob_path)
continue
target_image, tag = image_tag_from_url(target_image_url)
target_dir_path = os.path.join(
IMAGE_EXPORT_DIR, 'v2', target_image, 'blobs')
make_dir(target_dir_path)
target_blob_path = os.path.join(target_dir_path, '%s.gz' % layer)
if os.path.exists(target_blob_path):
continue
LOG.debug('Linking layers: %s -> %s' % (blob_path, target_blob_path))
# make a hard link so the layers can have independent lifecycles
os.link(blob_path, target_blob_path)
def export_manifest_config(target_url,
manifest_str,
manifest_type,
config_str):
image, tag = image_tag_from_url(target_url)
manifest = json.loads(manifest_str)
if config_str is not None:
blob_dir_path = os.path.join(
IMAGE_EXPORT_DIR, 'v2', image, 'blobs')
make_dir(blob_dir_path)
config_digest = manifest['config']['digest']
config_path = os.path.join(blob_dir_path, config_digest)
with open(config_path, 'w+') as f:
f.write(config_str)
calc_digest = hashlib.sha256()
calc_digest.update(manifest_str.encode('utf-8'))
manifest_digest = 'sha256:%s' % calc_digest.hexdigest()
manifests_path = os.path.join(
IMAGE_EXPORT_DIR, 'v2', image, 'manifests')
manifest_dir_path = os.path.join(manifests_path, manifest_digest)
manifest_symlink_path = os.path.join(manifests_path, tag)
manifest_path = os.path.join(manifest_dir_path, 'index.json')
htaccess_path = os.path.join(manifest_dir_path, '.htaccess')
tags_dir_path = os.path.join(IMAGE_EXPORT_DIR, 'v2', image, 'tags')
tags_list_path = os.path.join(tags_dir_path, 'list')
make_dir(manifest_dir_path)
make_dir(tags_dir_path)
headers = collections.OrderedDict()
headers['Content-Type'] = manifest_type
headers['Docker-Content-Digest'] = manifest_digest
headers['ETag'] = manifest_digest
with open(htaccess_path, 'w+') as f:
for header in headers.items():
f.write('Header set %s "%s"\n' % header)
with open(manifest_path, 'w+') as f:
f.write(manifest_str)
if os.path.exists(manifest_symlink_path):
os.remove(manifest_symlink_path)
os.symlink(manifest_dir_path, manifest_symlink_path)
tags = []
for f in os.listdir(manifests_path):
f_path = os.path.join(manifests_path, f)
if os.path.islink(f_path):
tags.append(f)
tags_data = {
"name": image,
"tags": tags
}
with open(tags_list_path, 'w+') as f:
json.dump(tags_data, f)

View File

@@ -41,6 +41,7 @@ from tripleo_common.actions import ansible
from tripleo_common.image.base import BaseImageManager
from tripleo_common.image.exception import ImageNotFoundException
from tripleo_common.image.exception import ImageUploaderException
from tripleo_common.image import image_export
LOG = logging.getLogger(__name__)
@@ -193,6 +194,8 @@ class BaseImageUploader(object):
mirrors = {}
insecure_registries = set()
secure_registries = set(SECURE_REGISTRIES)
export_registries = set()
push_registries = set()
def __init__(self):
self.upload_tasks = []
@@ -206,6 +209,8 @@ class BaseImageUploader(object):
cls.secure_registries.clear()
cls.secure_registries.update(SECURE_REGISTRIES)
cls.mirrors.clear()
cls.export_registries.clear()
cls.push_registries.clear()
def cleanup(self):
pass
@@ -396,7 +401,7 @@ class BaseImageUploader(object):
manifest_r = manifest_f.result()
tags_r = tags_f.result()
if manifest_r.status_code == 404:
if manifest_r.status_code in (403, 404):
raise ImageNotFoundException('Not found image: %s' %
image_url.geturl())
manifest_r.raise_for_status()
@@ -584,6 +589,12 @@ class BaseImageUploader(object):
source_layers, session):
netloc = target_image_url.netloc
name = target_image_url.path.split(':')[0][1:]
export = netloc in cls.export_registries
if export:
image_export.cross_repo_mount(
target_image_url, image_layers, source_layers)
return
if netloc in cls.insecure_registries:
scheme = 'http'
else:
@@ -904,6 +915,8 @@ class PythonImageUploader(BaseImageUploader):
target_session = self.authenticate(
t.target_image_url)
self._detect_target_export(t.target_image_url, target_session)
if t.modify_role:
if self._image_exists(
t.target_image, target_session):
@@ -977,6 +990,34 @@ class PythonImageUploader(BaseImageUploader):
self.image_layers.setdefault(layer, t.target_image_url)
return to_cleanup
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
retry=tenacity.retry_if_exception_type(
requests.exceptions.RequestException
),
wait=tenacity.wait_random_exponential(multiplier=1, max=10),
stop=tenacity.stop_after_attempt(5)
)
def _detect_target_export(cls, image_url, session):
if image_url.netloc in cls.export_registries:
return True
if image_url.netloc in cls.push_registries:
return False
# detect if the registry is push-capable by requesting an upload URL.
image, tag = cls._image_tag_from_url(image_url)
upload_req_url = cls._build_url(
image_url,
path=CALL_UPLOAD % {'image': image})
r = session.post(upload_req_url, timeout=30)
if r.status_code in (501, 403, 404, 405):
cls.export_registries.add(image_url.netloc)
return True
r.raise_for_status()
cls.push_registries.add(image_url.netloc)
return False
@classmethod
@tenacity.retry( # Retry up to 5 times with jittered exponential backoff
reraise=True,
@@ -997,7 +1038,7 @@ class PythonImageUploader(BaseImageUploader):
)
manifest_headers = {'Accept': MEDIA_MANIFEST_V2}
r = session.get(url, headers=manifest_headers, timeout=30)
if r.status_code == 404:
if r.status_code in (403, 404):
raise ImageNotFoundException('Not found image: %s' %
url.geturl())
r.raise_for_status()
@@ -1147,6 +1188,7 @@ class PythonImageUploader(BaseImageUploader):
manifest_str,
config_str,
target_session=None):
manifest = json.loads(manifest_str)
if config_str is not None:
manifest['config']['size'] = len(config_str)
@@ -1160,6 +1202,16 @@ class PythonImageUploader(BaseImageUploader):
else:
manifest_type = MEDIA_MANIFEST_V1
export = target_url.netloc in cls.export_registries
if export:
image_export.export_manifest_config(
target_url,
manifest_str,
manifest_type,
config_str
)
return
if config_str is not None:
config_digest = manifest['config']['digest']
# Upload the config json as a blob
@@ -1340,6 +1392,11 @@ class PythonImageUploader(BaseImageUploader):
length = 0
upload_resp = None
export = target_url.netloc in cls.export_registries
if export:
return image_export.export_stream(
target_url, layer, calc_digest, layer_stream)
for chunk in layer_stream:
if not chunk:
break

View File

@@ -0,0 +1,197 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import hashlib
import io
import json
import os
import shutil
import six
from six.moves.urllib.parse import urlparse
import tempfile
import zlib
from tripleo_common.image import image_export
from tripleo_common.image import image_uploader
from tripleo_common.tests import base
class TestImageExport(base.TestCase):
def setUp(self):
super(TestImageExport, self).setUp()
export_dir = image_export.IMAGE_EXPORT_DIR
with tempfile.NamedTemporaryFile() as f:
temp_export_dir = f.name
image_export.make_dir(temp_export_dir)
def restore_export_dir():
shutil.rmtree(temp_export_dir)
image_export.IMAGE_EXPORT_DIR = export_dir
image_export.IMAGE_EXPORT_DIR = temp_export_dir
self.addCleanup(restore_export_dir)
def test_make_dir(self):
path = os.path.join(image_export.IMAGE_EXPORT_DIR, 'foo/bar')
self.assertFalse(os.path.exists(path))
self.addCleanup(os.rmdir, path)
image_export.make_dir(path)
self.assertTrue(os.path.isdir(path))
# Call again to assert no error is raised
image_export.make_dir(path)
def test_image_tag_from_url(self):
url = urlparse('docker://docker.io/t/nova-api:latest')
self.assertEqual(
('t/nova-api', 'latest'),
image_export.image_tag_from_url(url)
)
url = urlparse('containers-storage:localhost:8787/t/nova-api:latest')
self.assertEqual(
('localhost:8787/t/nova-api', 'latest'),
image_export.image_tag_from_url(url)
)
url = urlparse('docker://docker.io/t/nova-api')
self.assertEqual(
('t/nova-api', None),
image_export.image_tag_from_url(url)
)
def test_export_stream(self):
blob_data = six.b('The Blob')
blob_compressed = zlib.compress(blob_data)
calc_digest = hashlib.sha256()
calc_digest.update(blob_compressed)
compressed_digest = 'sha256:' + calc_digest.hexdigest()
target_url = urlparse('docker://localhost:8787/t/nova-api:latest')
layer = {
'digest': compressed_digest
}
calc_digest = hashlib.sha256()
layer_stream = io.BytesIO(blob_compressed)
layer_digest = image_export.export_stream(
target_url, layer, calc_digest, layer_stream
)
self.assertEqual(compressed_digest, layer_digest)
self.assertEqual(compressed_digest, layer['digest'])
self.assertEqual(len(blob_compressed), layer['size'])
blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR,
'v2/t/nova-api/blobs')
blob_path = os.path.join(blob_dir, '%s.gz' % compressed_digest)
self.assertTrue(os.path.isdir(blob_dir))
self.assertTrue(os.path.isfile(blob_path))
with open(blob_path, 'rb') as f:
self.assertEqual(blob_compressed, f.read())
def test_cross_repo_mount(self):
target_url = urlparse('docker://localhost:8787/t/nova-api:latest')
other_url = urlparse('docker://localhost:8787/t/nova-compute:latest')
image_layers = {
'sha256:1234': other_url
}
source_layers = [
'sha256:1234', 'sha256:6789'
]
source_blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR,
'v2/t/nova-compute/blobs')
source_blob_path = os.path.join(source_blob_dir, 'sha256:1234.gz')
target_blob_dir = os.path.join(image_export.IMAGE_EXPORT_DIR,
'v2/t/nova-api/blobs')
target_blob_path = os.path.join(target_blob_dir, 'sha256:1234.gz')
# call with missing source, no change
image_export.cross_repo_mount(target_url, image_layers, source_layers)
self.assertFalse(os.path.exists(source_blob_path))
self.assertFalse(os.path.exists(target_blob_path))
image_export.make_dir(source_blob_dir)
with open(source_blob_path, 'w') as f:
f.write('blob')
self.assertTrue(os.path.exists(source_blob_path))
# call with existing source
image_export.cross_repo_mount(target_url, image_layers, source_layers)
self.assertTrue(os.path.exists(target_blob_path))
with open(target_blob_path, 'r') as f:
self.assertEqual('blob', f.read())
def test_export_manifest_config(self):
target_url = urlparse('docker://localhost:8787/t/nova-api:latest')
config_str = '{"config": {}}'
config_digest = 'sha256:1234'
manifest = {
'config': {
'digest': config_digest,
'size': 2,
'mediaType': 'application/vnd.docker.container.image.v1+json'
},
'layers': [
{'digest': 'sha256:aeb786'},
{'digest': 'sha256:4dc536'},
],
'mediaType': 'application/vnd.docker.'
'distribution.manifest.v2+json',
}
manifest_str = json.dumps(manifest)
calc_digest = hashlib.sha256()
calc_digest.update(manifest_str.encode('utf-8'))
manifest_digest = 'sha256:%s' % calc_digest.hexdigest()
image_export.export_manifest_config(
target_url, manifest_str,
image_uploader.MEDIA_MANIFEST_V2, config_str
)
config_path = os.path.join(
image_export.IMAGE_EXPORT_DIR,
'v2/t/nova-api/blobs/sha256:1234'
)
manifest_path = os.path.join(
image_export.IMAGE_EXPORT_DIR,
'v2/t/nova-api/manifests',
manifest_digest,
'index.json'
)
manifest_htaccess_path = os.path.join(
image_export.IMAGE_EXPORT_DIR,
'v2/t/nova-api/manifests',
manifest_digest,
'.htaccess'
)
expected_htaccess = '''Header set Content-Type "%s"
Header set Docker-Content-Digest "%s"
Header set ETag "%s"
''' % (
'application/vnd.docker.distribution.manifest.v2+json',
manifest_digest,
manifest_digest
)
with open(config_path, 'r') as f:
self.assertEqual(config_str, f.read())
with open(manifest_path, 'r') as f:
self.assertEqual(manifest_str, f.read())
with open(manifest_htaccess_path, 'r') as f:
self.assertEqual(expected_htaccess, f.read())