Browse Source

Update container build to better reign in threads

This restructures the buildah build process to ensure we're
managing our thread pool correctly and not spawing more
threads than needed or expected. By updating the thread
process we'll ensure we're building each container only
once and that we're maintaining build order.

Tests have been added to ensure the new methods are called,
and that the data processing is consistent.

Closes-Bug: #1894856
Change-Id: I647212442c378aabbd1887a0b499c19bbc74b5f7
Signed-off-by: Kevin Carter <kecarter@redhat.com>
tags/12.6.0^0
Kevin Carter 3 weeks ago
committed by Emilien Macchi
parent
commit
16ff908a2d
2 changed files with 221 additions and 61 deletions
  1. +142
    -61
      tripleo_common/image/builder/buildah.py
  2. +79
    -0
      tripleo_common/tests/image/builder/test_buildah.py

+ 142
- 61
tripleo_common/image/builder/buildah.py View File

@@ -185,76 +185,157 @@ class BuildahBuilder(base.BaseBuilder):
process.execute(*args, run_as_root=False, use_standard_locale=True)

def build_all(self, deps=None):
"""Function that browse containers dependencies and build them.
"""Build all containers.

This function will thread the build process allowing it to complete
in the shortest possible time.

:params deps: Dictionary defining the container images
dependencies.
dependencies.
"""

if deps is None:
deps = self.deps

if isinstance(deps, (list,)):
# Only a list of images can be multi-processed because they
# are the last layer to build. Otherwise we could have issues
# to build multiple times the same layer.
# Number of workers will be based on CPU count with a min 2,
# max 8. Concurrency in Buildah isn't that great so it's not
# useful to go above 8.
workers = min(8, max(2, processutils.get_worker_count()))
with futures.ThreadPoolExecutor(max_workers=workers) as executor:
future_to_build = {executor.submit(self.build_all,
container): container for container in
deps}
done, not_done = futures.wait(
future_to_build,
timeout=self.build_timeout,
return_when=futures.FIRST_EXCEPTION
container_deps = self._generate_deps(deps=deps, containers=list())
LOG.debug("All container deps: {}".format(container_deps))
for containers in container_deps:
LOG.info("Processing containers: {}".format(containers))
if isinstance(deps, (list,)):
self._multi_build(containers=containers)
else:
self._multi_build(containers=[containers])

def _generate_deps(self, deps, containers, prio_list=None):
"""Browse containers dependencies and return an an array.

When the dependencies are generated they're captured in an array,
which contains additional arrays. This data structure is later
used in a futures queue.

:params deps: Dictionary defining the container images
dependencies.
:params containers: List used to keep track of dependent containers.
:params prio_list: List used to keep track of nested dependencies.
:returns: list
"""

LOG.debug("Process deps: {}".format(deps))
if isinstance(deps, (six.string_types,)):
if prio_list:
prio_list.append(deps)
else:
containers.append([deps])

elif isinstance(deps, (dict,)):
parents = list(deps.keys())
if prio_list:
prio_list.extend(parents)
else:
containers.append(parents)
for value in deps.values():
LOG.debug("Recursing with: {}".format(value))
self._generate_deps(
deps=value,
containers=containers
)

elif isinstance(deps, (list,)):
dep_list = list()
dep_rehash_list = list()
for item in deps:
if isinstance(item, (six.string_types,)):
dep_list.append(item)
else:
dep_rehash_list.append(item)

if dep_list:
containers.append(dep_list)

for item in dep_rehash_list:
LOG.debug("Recursing with: {}".format(item))
self._generate_deps(
deps=item,
containers=containers,
prio_list=dep_list
)
# NOTE(cloudnull): Once the job has been completed all completed
# jobs are checked for exceptions. If any jobs
# failed a SystemError will be raised using the
# exception information. If any job was loaded
# but not executed a SystemError will be raised.
exceptions = list()
for job in done:
if job._exception:
exceptions.append(
"\nException information: {exception}".format(
exception=job._exception
)

LOG.debug("Constructed containers: {}".format(containers))
return containers

def _multi_build(self, containers):
"""Build mutliple containers.

Multi-thread the build process for all containers defined within
the containers list.

:params containers: List defining the container images.
"""

# Workers will use the processor core count with a max of 8. If
# the containers array has a length less-than the expected processor
# count, the workers will be adjusted to meet the expectations of the
# work being processed.
workers = min(
min(
8,
max(
2,
processutils.get_worker_count()
)
),
len(containers)
)
with futures.ThreadPoolExecutor(max_workers=workers) as executor:
future_to_build = {
executor.submit(
self._generate_container, container_name
): container_name for container_name in containers
}
done, not_done = futures.wait(
future_to_build,
timeout=self.build_timeout,
return_when=futures.FIRST_EXCEPTION
)

# NOTE(cloudnull): Once the job has been completed all completed
# jobs are checked for exceptions. If any jobs
# failed a SystemError will be raised using the
# exception information. If any job was loaded
# but not executed a SystemError will be raised.
exceptions = list()
for job in done:
if job._exception:
exceptions.append(
"\nException information: {exception}".format(
exception=job._exception
)
else:
if exceptions:
raise RuntimeError(
'\nThe following errors were detected during '
'container build(s):\n{exceptions}'.format(
exceptions='\n'.join(exceptions)
)
)
else:
if exceptions:
raise RuntimeError(
'\nThe following errors were detected during '
'container build(s):\n{exceptions}'.format(
exceptions='\n'.join(exceptions)
)
)

if not_done:
error_msg = ('The following jobs were '
'incomplete: {}'.format(
[future_to_build[job] for job
in not_done]))

jobs_with_exceptions = [{
'container': future_to_build[job],
'exception': job._exception}
for job in not_done if job._exception]
if jobs_with_exceptions:
for job_with_exception in jobs_with_exceptions:
error_msg = error_msg + os.linesep + (
"%(container)s raised the following "
"exception: %(exception)s" %
job_with_exception)

raise SystemError(error_msg)
if not_done:
error_msg = (
'The following jobs were incomplete: {}'.format(
[future_to_build[job] for job in not_done]
)
)

elif isinstance(deps, (dict,)):
for container in deps:
self._generate_container(container)
self.build_all(deps.get(container))
elif isinstance(deps, six.string_types):
self._generate_container(deps)
jobs_with_exceptions = [{
'container': future_to_build[job],
'exception': job._exception}
for job in not_done if job._exception]
if jobs_with_exceptions:
for job_with_exception in jobs_with_exceptions:
error_msg = error_msg + os.linesep + (
"%(container)s raised the following "
"exception: %(exception)s" %
job_with_exception)

raise SystemError(error_msg)

+ 79
- 0
tripleo_common/tests/image/builder/test_buildah.py View File

@@ -37,6 +37,39 @@ BUILD_ALL_DICT_CONTAINERS = {
}
BUILD_ALL_STR_CONTAINER = 'container1'

PREPROCESSED_CONTAINER_DEPS = [
{
"image0": [
"image1",
{
"image2": [
{
"image3": [
"image4",
"image5"
]
},
"image8",
{
"image6": [
"image7"
]
},
"image9"
]
},
{
"image10": [
"image11",
"image12"
]
},
"image13",
"image14"
]
}
]


class ThreadPoolExecutorReturn(object):
_exception = None
@@ -223,3 +256,49 @@ class TestBuildahBuilder(base.TestCase):
def test_build_all_str_ok(self, mock_touch,
mock_build, mock_wait, mock_submit):
bb(WORK_DIR, DEPS).build_all(deps=BUILD_ALL_STR_CONTAINER)

def test_dep_processing(self):
containers = list()
self.assertEqual(
bb(WORK_DIR, DEPS)._generate_deps(
deps=PREPROCESSED_CONTAINER_DEPS,
containers=containers
),
[
[
'image0'
],
[
'image1',
'image13',
'image14',
'image2',
'image10'
],
[
'image8',
'image9',
'image3',
'image6'
],
[
'image4',
'image5'
],
[
'image7'
],
[
'image11',
'image12'
]
]
)

@mock.patch(
'tripleo_common.image.builder.buildah.BuildahBuilder._multi_build',
autospec=True
)
def test_build_all_multi_build(self, mock_multi_build):
bb(WORK_DIR, DEPS).build_all(deps=BUILD_ALL_LIST_CONTAINERS)
self.assertTrue(mock_multi_build.called)

Loading…
Cancel
Save