From 1a70f8cb44e51c2ed47f787a93d2c5b648442273 Mon Sep 17 00:00:00 2001 From: Stanislav Kudriashev Date: Tue, 10 Dec 2013 13:13:30 +0200 Subject: [PATCH] Fix misspellings Change-Id: I6f332f01d197c6ba1b02de1145714718f8aea6fb --- taskflow/engines/action_engine/graph_action.py | 2 +- taskflow/examples/build_a_car.py | 4 ++-- taskflow/examples/calculate_in_parallel.py | 4 ++-- taskflow/examples/calculate_linear.py | 7 ++++--- taskflow/examples/fake_billing.py | 4 ++-- taskflow/examples/resume_many_flows.py | 2 +- taskflow/examples/resume_vm_boot.py | 6 +++--- taskflow/examples/resume_volume_create.py | 2 +- taskflow/examples/simple_linear.py | 2 +- taskflow/examples/simple_linear_listening.py | 4 ++-- taskflow/exceptions.py | 2 +- taskflow/persistence/backends/impl_dir.py | 4 ++-- taskflow/persistence/logbook.py | 8 ++++---- taskflow/storage.py | 2 +- taskflow/utils/lock_utils.py | 2 +- taskflow/utils/misc.py | 6 +++--- 16 files changed, 31 insertions(+), 30 deletions(-) diff --git a/taskflow/engines/action_engine/graph_action.py b/taskflow/engines/action_engine/graph_action.py index 77f61682..40aa9528 100644 --- a/taskflow/engines/action_engine/graph_action.py +++ b/taskflow/engines/action_engine/graph_action.py @@ -109,7 +109,7 @@ class ParallelGraphAction(SequentialGraphAction): nodes which can run (those which have there dependencies satisfied or those with no dependencies) and submitting them to the executor to be ran, and then after running this process will be repeated until - no more nodes can be ran (or a failure has a occured and all nodes + no more nodes can be ran (or a failure has a occurred and all nodes were stopped from further running). """ # A deque is a thread safe push/pop/popleft/append implementation diff --git a/taskflow/examples/build_a_car.py b/taskflow/examples/build_a_car.py index ca292846..4385797f 100644 --- a/taskflow/examples/build_a_car.py +++ b/taskflow/examples/build_a_car.py @@ -104,7 +104,7 @@ def verify(spec, **kwargs): # These two functions connect into the state transition notification emission # points that the engine outputs, they can be used to log state transitions -# that are occuring, or they can be used to suspend the engine (or perform +# that are occurring, or they can be used to suspend the engine (or perform # other useful activities). def flow_watch(state, details): print('Flow => %s' % state) @@ -124,7 +124,7 @@ flow = lf.Flow("make-auto").add( # These *_installed outputs allow for other tasks to depend on certain # actions being performed (aka the components were installed), another # way to do this is to link() the tasks manually instead of creating - # an 'artifical' data dependency that accomplishes the same goal the + # an 'artificial' data dependency that accomplishes the same goal the # manual linking would result in. task.FunctorTask(install_engine, provides='engine_installed'), task.FunctorTask(install_doors, provides='doors_installed'), diff --git a/taskflow/examples/calculate_in_parallel.py b/taskflow/examples/calculate_in_parallel.py index e653b50c..cd119221 100644 --- a/taskflow/examples/calculate_in_parallel.py +++ b/taskflow/examples/calculate_in_parallel.py @@ -80,7 +80,7 @@ flow = lf.Flow('root').add( # Calculate 'z1 = x1+y1 = 5' # # Rebind here means that the execute() function x argument will be - # satisified from a previous output named 'x1', and the y argument + # satisfied from a previous output named 'x1', and the y argument # of execute() will be populated from the previous output named 'y1' # # The output (result of adding) will be mapped into a variable named @@ -95,6 +95,6 @@ flow = lf.Flow('root').add( # The result here will be all results (from all tasks) which is stored in an # in-memory storage location that backs this engine since it is not configured -# with persistance storage. +# with persistence storage. result = taskflow.engines.run(flow, engine_conf='parallel') print(result) diff --git a/taskflow/examples/calculate_linear.py b/taskflow/examples/calculate_linear.py index 4dd481d3..04dfda01 100644 --- a/taskflow/examples/calculate_linear.py +++ b/taskflow/examples/calculate_linear.py @@ -39,7 +39,7 @@ from taskflow import task # are bound with ('z', 'd') keys from the engines storage mechanism. # # A multiplier task uses a binding that another task also provides, but this -# example explicitly shows that 'z' parameter is binded with 'a' key +# example explicitly shows that 'z' parameter is bound with 'a' key # This shows that if a task depends on a key named the same as a key provided # from another task the name can be remapped to take the desired key from a # different origin. @@ -73,7 +73,8 @@ class Adder(task.Task): return x + y -# This task multiplies an input variable by a multipler and returns the result. +# This task multiplies an input variable by a multiplier and returns the +# result. # # Note that since this task does not have a revert() function (since # multiplication is a stateless operation) and there are no side-effects that @@ -112,6 +113,6 @@ flow = lf.Flow('root').add( # The result here will be all results (from all tasks) which is stored in an # in-memory storage location that backs this engine since it is not configured -# with persistance storage. +# with persistence storage. results = taskflow.engines.run(flow) print(results) diff --git a/taskflow/examples/fake_billing.py b/taskflow/examples/fake_billing.py index cee51a64..63e95769 100644 --- a/taskflow/examples/fake_billing.py +++ b/taskflow/examples/fake_billing.py @@ -70,7 +70,7 @@ class UrlCaller(object): status_cb(float(i) / len(data)) -# Since engines save the output of tasks to a optional persistant storage +# Since engines save the output of tasks to a optional persistent storage # backend resources have to be dealt with in a slightly different manner since # resources are transient and can not be persisted (or serialized). For tasks # that require access to a set of resources it is a common pattern to provide @@ -158,7 +158,7 @@ class DeclareSuccess(task.Task): resources = ResourceFetcher() flow = lf.Flow("initialize-me") -# 1. First we extract the api request into a useable format. +# 1. First we extract the api request into a usable format. # 2. Then we go ahead and make a database entry for our request. flow.add(ExtractInputRequest(resources), MakeDBEntry(resources)) diff --git a/taskflow/examples/resume_many_flows.py b/taskflow/examples/resume_many_flows.py index a3aa395f..e3f08bca 100644 --- a/taskflow/examples/resume_many_flows.py +++ b/taskflow/examples/resume_many_flows.py @@ -28,7 +28,7 @@ import tempfile # script which doesn't 'crash' and it will resume all the given engines flows # that did not complete and run them to completion (instead of crashing). # -# This shows how a set of tasks can be finished even after repeatingly being +# This shows how a set of tasks can be finished even after repeatedly being # crashed, *crash resistance* if you may call it, due to the engine concept as # well as the persistence layer which keeps track of the state a flow # transitions through and persists the intermediary inputs and outputs and diff --git a/taskflow/examples/resume_vm_boot.py b/taskflow/examples/resume_vm_boot.py index 0a16f37f..e4ecc157 100644 --- a/taskflow/examples/resume_vm_boot.py +++ b/taskflow/examples/resume_vm_boot.py @@ -45,9 +45,9 @@ from taskflow.utils import eventlet_utils as e_utils from taskflow.utils import persistence_utils as p_utils -# INTRO: This examples shows how a hierachy of flows can be used to create a vm -# in a reliable & resumable manner using taskflow + a miniature version of what -# nova does while booting a vm. +# INTRO: This examples shows how a hierarchy of flows can be used to create a +# vm in a reliable & resumable manner using taskflow + a miniature version of +# what nova does while booting a vm. @contextlib.contextmanager diff --git a/taskflow/examples/resume_volume_create.py b/taskflow/examples/resume_volume_create.py index 1c35c735..35edbac6 100644 --- a/taskflow/examples/resume_volume_create.py +++ b/taskflow/examples/resume_volume_create.py @@ -41,7 +41,7 @@ from taskflow.persistence import backends from taskflow.utils import persistence_utils as p_utils -# INTRO: This examples shows how a hierachy of flows can be used to create a +# INTRO: This examples shows how a hierarchy of flows can be used to create a # pseudo-volume in a reliable & resumable manner using taskflow + a miniature # version of what cinder does while creating a volume (very miniature). diff --git a/taskflow/examples/simple_linear.py b/taskflow/examples/simple_linear.py index e285e686..e478d40e 100644 --- a/taskflow/examples/simple_linear.py +++ b/taskflow/examples/simple_linear.py @@ -33,7 +33,7 @@ from taskflow import task # INTRO: In this example we create two tasks, each of which ~calls~ a given # ~phone~ number (provided as a function input) in a linear fashion (one after -# the other). For a workflow which is serial this shows a extremly simple way +# the other). For a workflow which is serial this shows a extremely simple way # of structuring your tasks (the code that does the work) into a linear # sequence (the flow) and then passing the work off to an engine, with some # initial data to be ran in a reliable manner. diff --git a/taskflow/examples/simple_linear_listening.py b/taskflow/examples/simple_linear_listening.py index ca38fce1..b1bc7c91 100644 --- a/taskflow/examples/simple_linear_listening.py +++ b/taskflow/examples/simple_linear_listening.py @@ -37,7 +37,7 @@ from taskflow import task # a given ~phone~ number (provided as a function input) in a linear fashion # (one after the other). # -# For a workflow which is serial this shows a extremly simple way +# For a workflow which is serial this shows a extremely simple way # of structuring your tasks (the code that does the work) into a linear # sequence (the flow) and then passing the work off to an engine, with some # initial data to be ran in a reliable manner. @@ -78,7 +78,7 @@ def task_watch(state, details): # as tasks. There was previous work done to just allow a function to be # directly passed, but in python 3.0 there is no easy way to capture an # instance method, so this wrapping approach was decided upon instead which -# can attach to instance methods (if thats desired). +# can attach to instance methods (if that's desired). flow = lf.Flow("Call-them") flow.add(task.FunctorTask(execute=call_jim)) flow.add(task.FunctorTask(execute=call_joe)) diff --git a/taskflow/exceptions.py b/taskflow/exceptions.py index 7708f80e..29ae711e 100644 --- a/taskflow/exceptions.py +++ b/taskflow/exceptions.py @@ -73,7 +73,7 @@ class JobNotFound(TaskFlowException): class MissingDependencies(InvalidStateException): - """Raised when a entity has dependencies that can not be satisified.""" + """Raised when a entity has dependencies that can not be satisfied.""" message = ("%(who)s requires %(requirements)s but no other entity produces" " said requirements") diff --git a/taskflow/persistence/backends/impl_dir.py b/taskflow/persistence/backends/impl_dir.py index 0c5d74d9..3127f59d 100644 --- a/taskflow/persistence/backends/impl_dir.py +++ b/taskflow/persistence/backends/impl_dir.py @@ -333,7 +333,7 @@ class Connection(base.Connection): def _step_book(): self._run_with_process_lock("book", _step_flow) - # Acquire all locks by going through this little hiearchy. + # Acquire all locks by going through this little hierarchy. self._run_with_process_lock("init", _step_book) @lock_utils.locked @@ -368,7 +368,7 @@ class Connection(base.Connection): if e.errno != errno.ENOENT: raise - # Acquire all locks by going through this little hiearchy. + # Acquire all locks by going through this little hierarchy. self._run_with_process_lock("book", _destroy_book) def _get_logbook(self, book_uuid): diff --git a/taskflow/persistence/logbook.py b/taskflow/persistence/logbook.py index 18a0a44d..3744c8e5 100644 --- a/taskflow/persistence/logbook.py +++ b/taskflow/persistence/logbook.py @@ -55,7 +55,7 @@ class LogBook(object): def add(self, flow_detail): """Adds a new entry to the underlying logbook. - Does not *guarantee* that the details will be immediatly saved. + Does not *guarantee* that the details will be immediately saved. """ self._flowdetails.append(flow_detail) @@ -139,11 +139,11 @@ class FlowDetail(object): class TaskDetail(object): - """This class contains an entry that contains the persistance of a task + """This class contains an entry that contains the persistence of a task after or before (or during) it is running including any results it may have produced, any state that it may be in (failed for example), any exception - that occured when running and any associated stacktrace that may have - occuring during that exception being thrown and any other metadata that + that occurred when running and any associated stacktrace that may have + occurring during that exception being thrown and any other metadata that should be stored along-side the details about this task. The data contained within this class need *not* backed by the backend diff --git a/taskflow/storage.py b/taskflow/storage.py index 6c6ec6af..413bbb13 100644 --- a/taskflow/storage.py +++ b/taskflow/storage.py @@ -301,7 +301,7 @@ class Storage(object): """Add values into storage This method should be used to put flow parameters (requirements that - are not satisified by any task in the flow) into storage. + are not satisfied by any task in the flow) into storage. """ injector_td = self._flowdetail.find_by_name(self.injector_name) if injector_td is None: diff --git a/taskflow/utils/lock_utils.py b/taskflow/utils/lock_utils.py index 36217afc..8ae7dcd8 100644 --- a/taskflow/utils/lock_utils.py +++ b/taskflow/utils/lock_utils.py @@ -80,7 +80,7 @@ class MultiLock(object): def is_locked(lock): # NOTE(harlowja): the threading2 lock doesn't seem to have this - # attribute, so thats why we are checking it existing first. + # attribute, so that's why we are checking it existing first. if hasattr(lock, 'locked'): return lock.locked() return False diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index ab1e74d4..9d5988c8 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -167,7 +167,7 @@ def as_bool(val): def as_int(obj, quiet=False): - """Converts an arbitary value into a integer.""" + """Converts an arbitrary value into a integer.""" # Try "2" -> 2 try: return int(obj) @@ -204,7 +204,7 @@ def ensure_tree(path): class TransitionNotifier(object): """A utility helper class that can be used to subscribe to - notifications of events occuring as well as allow a entity to post said + notifications of events occurring as well as allow a entity to post said notifications to subscribers. """ @@ -412,7 +412,7 @@ class Failure(object): """Check if any of exc_classes caused the failure Arguments of this method can be exception types or type - names (stings). If captured excption is instance of + names (stings). If captured exception is instance of exception of given type, the corresponding argument is returned. Else, None is returned. """