From 2ba6fda3a25ce8af6c6eb9169222140ebdb872dd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 11 Oct 2013 17:00:25 -0700 Subject: [PATCH] Add more comments that explain example & usage Change-Id: I4665cda50c2bbc8e0e323efb1baff653ace12579 --- taskflow/examples/create_parallel_volume.py | 53 +++++++++++++++++---- 1 file changed, 43 insertions(+), 10 deletions(-) diff --git a/taskflow/examples/create_parallel_volume.py b/taskflow/examples/create_parallel_volume.py index 402ba5ad..c271af71 100644 --- a/taskflow/examples/create_parallel_volume.py +++ b/taskflow/examples/create_parallel_volume.py @@ -36,22 +36,59 @@ from taskflow.patterns import unordered_flow as uf from taskflow import task from taskflow.utils import reflection +# INTRO: This examples shows how unordered_flow can be used to create a large +# number of fake volumes in parallel (or serially, depending on a constant that +# can be easily changed). + @contextlib.contextmanager -def show_time(name=''): +def show_time(name): start = time.time() yield end = time.time() print(" -- %s took %0.3f seconds" % (name, end - start)) +# This affects how many volumes to create and how much time to *simulate* +# passing for that volume to be created. MAX_CREATE_TIME = 3 VOLUME_COUNT = 5 + +# This will be used to determine if all the volumes are created in parallel +# or whether the volumes are created serially (in an undefined ordered since +# a unordered flow is used). Note that there is a disconnection between the +# ordering and the concept of parallelism (since unordered items can still be +# ran in a serial ordering). A typical use-case for offering both is to allow +# for debugging using a serial approach, while when running at a larger scale +# one would likely want to use the parallel approach. +# +# If you switch this flag from serial to parallel you can see the overall +# time difference that this causes. SERIAL = False +if SERIAL: + engine_conf = { + 'engine': 'serial', + } +else: + engine_conf = { + 'engine': 'parallel', + } class VolumeCreator(task.Task): def __init__(self, volume_id): + # Note here that the volume name is composed of the name of the class + # along with the volume id that is being created, since a name of a + # task uniquely identifies that task in storage it is important that + # the name be relevant and identifiable if the task is recreated for + # subsequent resumption (if applicable). + # + # UUIDs are *not* used as they can not be tied back to a previous tasks + # state on resumption (since they are unique and will vary for each + # task that is created). A name based off the volume id that is to be + # created is more easily tied back to the original task so that the + # volume create can be resumed/revert, and is much easier to use for + # audit and tracking purposes. base_name = reflection.get_callable_name(self) super(VolumeCreator, self).__init__(name="%s-%s" % (base_name, volume_id)) @@ -68,17 +105,13 @@ flow = uf.Flow("volume-maker") for i in range(0, VOLUME_COUNT): flow.add(VolumeCreator(volume_id="vol-%s" % (i))) -if SERIAL: - engine_conf = { - 'engine': 'serial', - } -else: - engine_conf = { - 'engine': 'parallel', - } - +# Show how much time the overall engine loading and running takes. with show_time(name=flow.name.title()): eng = engines.load(flow, engine_conf=engine_conf) + # This context manager automatically adds (and automatically removes) a + # helpful set of state transition notification printing helper utilities + # that show you exactly what transitions the engine is going through + # while running the various volume create tasks. with printing.PrintingListener(eng): eng.run()