diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 84848e7f..988f2856 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,8 +1,7 @@ If you would like to contribute to the development of OpenStack, -you must follow the steps in the "If you're a developer, start here" -section of this page: +you must follow the steps documented at: - http://wiki.openstack.org/HowToContribute + http://wiki.openstack.org/HowToContribute#If_you.27re_a_developer Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following diff --git a/README.rst b/README.rst index f16b9688..24c6e5d9 100644 --- a/README.rst +++ b/README.rst @@ -20,13 +20,14 @@ Requirements Because TaskFlow has many optional (pluggable) parts like persistence backends and engines, we decided to split our requirements into two -parts: - things that are absolutely required by TaskFlow (you can’t use -TaskFlow without them) are put to ``requirements.txt``; - things that -are required by some optional part of TaskFlow (you can use TaskFlow -without them) are put to ``optional-requirements.txt``; if you want to -use the feature in question, you should add that requirements to your -project or environment; - as usual, things that required only for -running tests are put to ``test-requirements.txt``. +parts: - things that are absolutely required by TaskFlow (you can't use +TaskFlow without them) are put into ``requirements-pyN.txt`` (``N`` being the +Python *major* version number used to install the package); - things that are +required by some optional part of TaskFlow (you can use TaskFlow without +them) are put into ``optional-requirements.txt``; if you want to use the +feature in question, you should add that requirements to your project or +environment; - as usual, things that required only for running tests are +put into ``test-requirements.txt``. Tox.ini ~~~~~~~ diff --git a/doc/diagrams/core.graffle b/doc/diagrams/core.graffle new file mode 100644 index 00000000..a570fe59 --- /dev/null +++ b/doc/diagrams/core.graffle @@ -0,0 +1,8023 @@ + + + + + ActiveLayerIndex + 0 + ApplicationVersion + + com.omnigroup.OmniGrafflePro + 139.18.0.187838 + + AutoAdjust + + BackgroundGraphic + + Bounds + {{0, 0}, {1152, 2199}} + Class + SolidGraphic + ID + 2 + Style + + shadow + + Draws + NO + + stroke + + Draws + NO + + + + BaseZoom + 0 + CanvasOrigin + {0, 0} + ColumnAlign + 1 + ColumnSpacing + 36 + CreationDate + 2014-07-08 20:47:01 +0000 + Creator + Joshua Harlow + DisplayScale + 1 0/72 in = 1.0000 in + ExportShapes + + + InspectorGroup + 255 + ShapeImageRect + {{2, 2}, {22, 22}} + ShapeName + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + ShouldExport + YES + StrokePath + + elements + + + element + MOVETO + point + {0.40652500000000003, 0.088786000000000004} + + + control1 + {0.39769700000000002, -0.059801} + control2 + {0.312282, -0.20657200000000001} + element + CURVETO + point + {0.15027599999999999, -0.32002000000000003} + + + control1 + {-0.028644599999999999, -0.44531500000000002} + control2 + {-0.26560600000000001, -0.50519099999999995} + element + CURVETO + point + {-0.5, -0.49964799999999998} + + + element + LINETO + point + {-0.5, -0.25638699999999998} + + + control1 + {-0.358902, -0.262291} + control2 + {-0.21507999999999999, -0.22622900000000001} + element + CURVETO + point + {-0.10728, -0.148201} + + + control1 + {-0.0160971, -0.082201999999999997} + control2 + {0.033605599999999999, 0.0024510600000000001} + element + CURVETO + point + {0.041826200000000001, 0.088786000000000004} + + + element + LINETO + point + {-0.043046000000000001, 0.088786000000000004} + + + element + LINETO + point + {0.22847700000000001, 0.5} + + + element + LINETO + point + {0.5, 0.088786000000000004} + + + element + LINETO + point + {0.40652500000000003, 0.088786000000000004} + + + element + CLOSE + + + element + MOVETO + point + {0.40652500000000003, 0.088786000000000004} + + + + TextBounds + {{0, 0}, {1, 1}} + + + GraphDocumentVersion + 8 + GraphicsList + + + Class + LineGraphic + ID + 1169 + Points + + {148.34850886899716, 1297.778564453125} + {148.34850886899716, 1565.8355233257191} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{108.29570600619962, 1459.9910998882619}, {30, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 1167 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Emits} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 1166 + Points + + {172.01007495190493, 1463.7899284362793} + {108.29570625246291, 1489.3205401648188} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{28, 1489.3205331673671}, {108.00000616531918, 60.376010894775391}} + Class + ShapedGraphic + ID + 1165 + Shape + Cloud + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Board\ +Notifications} + VerticalPad + 0 + + + + Class + LineGraphic + ID + 1161 + Points + + {16.938813712387287, 1214.6957778930664} + {550.61227271339396, 1214.6957778930664} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{177.05329513549805, 1254.1663719071589}, {82, 22}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1163 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 (optional)} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{56.053289698640896, 1231.7786193741999}, {116, 66}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-BoldOblique + Size + 18 + + ID + 1164 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 Posting & \ +Consumption\ +Phase} + VerticalPad + 0 + + Wrap + NO + + + ID + 1162 + + + Bounds + {{560.82414838901218, 1484.9621440334549}, {71, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 1159 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Consumption\ +Loop} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{521.16725664085266, 1494.1828820625792}, {27.016406012875592, 38.542124503311257}} + Class + ShapedGraphic + ID + 1155 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 90 + Shape + Bezier + ShapeData + + UnitPoints + + {0.406219, 0.101163} + {0.39736100000000002, -0.042958700000000002} + {0.31166700000000003, -0.185311} + {0.149117, -0.29534500000000002} + {-0.030395499999999999, -0.41686299999999998} + {-0.261517, -0.50514099999999995} + {-0.49668800000000002, -0.49976700000000002} + {-0.496693, -0.49976500000000001} + {-0.062913899999999995, -0.36058899999999999} + {-0.062913899999999995, -0.36058899999999999} + {-0.062918699999999994, -0.36058899999999999} + {-0.5, -0.21609700000000001} + {-0.5, -0.21609600000000001} + {-0.35843000000000003, -0.22182399999999999} + {-0.217449, -0.204378} + {-0.10928400000000001, -0.12870000000000001} + {-0.017806099999999998, -0.064687300000000003} + {0.032062500000000001, 0.0174179} + {0.040309900000000003, 0.101163} + {0.040309900000000003, 0.101163} + {-0.044847499999999998, 0.101163} + {-0.044847499999999998, 0.101163} + {-0.044847499999999998, 0.101163} + {0.22758200000000001, 0.5} + {0.22758200000000001, 0.5} + {0.22758200000000001, 0.5} + {0.5, 0.101163} + {0.5, 0.101163} + {0.5, 0.101163} + {0.406219, 0.101163} + + + + + Bounds + {{501.82414838901218, 1486.2594805049087}, {26.999999999999996, 38.288223134554855}} + Class + ShapedGraphic + ID + 1156 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 180 + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + Bounds + {{509.94240895873349, 1465.3378642343948}, {27.016406012875589, 38.264972185430459}} + Class + ShapedGraphic + ID + 1157 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 270 + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + Bounds + {{528.82414838901218, 1473.3774855848621}, {27.000000000000004, 38.288223134554862}} + Class + ShapedGraphic + ID + 1158 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + ID + 1154 + + + Class + Group + Graphics + + + Bounds + {{302.01802465549162, 1480.6049629105769}, {37, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1150 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\fs24 \cf0 - wait()\ +....} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{230.34967062106779, 1480.6049629105769}, {63, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1151 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\fs24 \cf0 - abandon()\ +- iterjobs()\ +} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{177.68130514255216, 1480.6049629105769}, {44, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1152 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\fs24 \cf0 - post()\ +- claim()\ +} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{170.82414838901212, 1475.3192573441706}, {175.99597549438477, 38.571445465087891}} + Class + ShapedGraphic + ID + 1153 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + + + ID + 1149 + + + Class + LineGraphic + ID + 1148 + Points + + {290.10982343784025, 1540.438820465416} + {289.2121722540532, 1513.7478166474543} + + Style + + stroke + + HeadArrow + UMLInheritance + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 1147 + + + + Bounds + {{245.10982343784025, 1540.438820465416}, {90, 36}} + Class + ShapedGraphic + ID + 1147 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Zookeeper\ +Jobboard} + VerticalPad + 0 + + + + Bounds + {{435.40182134738615, 1470.9621696472168}, {58, 56}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 1146 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\i\fs24 \cf0 - Claim job\ +- Load job\ +- Translate\ +- Activate} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{539.9018270694321, 1424.6444211854182}, {33, 12}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1117 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Worker} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{530.40181944003757, 1370.6049924744807}, {52, 72}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1119 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Specialized\ +\ +\ +\ +\ +} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{545.72859289858161, 1420.3446371019186}, {19.299808229718884, 19.299808879032174}} + Class + ShapedGraphic + ID + 1121 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 1122 + Points + + {545.72859289858161, 1402.9748103444847} + {565.02840112830063, 1402.9748103444847} + {565.02840112830063, 1402.9748103444847} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1123 + Points + + {555.37849701344112, 1410.6947336363717} + {545.7285925739252, 1420.3446377512312} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1124 + Points + + {555.37849701344112, 1410.6947336363719} + {565.02840112830063, 1420.7306576742712} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1125 + Points + + {555.37849701344112, 1397.1848678755687} + {555.37849701344112, 1410.6947352596553} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{549.58855454452544, 1385.6049829377375}, {11.57988428851805, 11.57988428851805}} + Class + ShapedGraphic + ID + 1126 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 1120 + + + ID + 1118 + + + ID + 1116 + + + Class + Group + Graphics + + + Bounds + {{492.40181944003757, 1394.5655408753596}, {47, 12}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1128 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Conductor} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{505.22859289858167, 1444.305185502797}, {19.299808229718884, 19.299808879032174}} + Class + ShapedGraphic + ID + 1130 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 1131 + Points + + {505.22859289858161, 1426.9353587453638} + {524.52840112830063, 1426.9353587453638} + {524.52840112830063, 1426.9353587453638} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1132 + Points + + {514.87849701344112, 1434.6552820372506} + {505.2285925739252, 1444.3051861521101} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1133 + Points + + {514.87849701344112, 1434.6552820372508} + {524.52840112830063, 1444.6912060751501} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1134 + Points + + {514.87849701344112, 1421.1454162764476} + {514.87849701344112, 1434.6552836605342} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{509.08855454452544, 1409.5655313386164}, {11.57988428851805, 11.57988428851805}} + Class + ShapedGraphic + ID + 1135 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 1129 + + + ID + 1127 + + + Class + Group + Graphics + + + Bounds + {{454.40181944003757, 1373.5655408753596}, {47, 12}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 1137 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Conductor} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{467.22859289858161, 1423.305185502797}, {19.299808229718884, 19.299808879032174}} + Class + ShapedGraphic + ID + 1139 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 1140 + Points + + {467.22859289858161, 1405.9353587453638} + {486.52840112830063, 1405.9353587453638} + {486.52840112830063, 1405.9353587453638} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1141 + Points + + {476.87849701344112, 1413.6552820372506} + {467.2285925739252, 1423.3051861521101} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1142 + Points + + {476.87849701344112, 1413.6552820372508} + {486.52840112830063, 1423.6912060751501} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 1143 + Points + + {476.87849701344112, 1400.1454162764476} + {476.87849701344112, 1413.6552836605342} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{471.08855454452544, 1388.5655313386164}, {11.57988428851805, 11.57988428851805}} + Class + ShapedGraphic + ID + 1144 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 1138 + + + ID + 1136 + + + Bounds + {{428.40181944003757, 1349.4199200524531}, {175.99597549438477, 117.33065032958984}} + Class + ShapedGraphic + ID + 1145 + Shape + Cloud + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Workers} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.14999999999999999, -0.15000001192092893}, {0.69999999999999996, 0.69999999999999996}} + + + ID + 1115 + + + Class + LineGraphic + Head + + ID + 968 + + ID + 1065 + Points + + {440.82414838901212, 1414.6049619569026} + {387.27826521030119, 1414.6049619569026} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Bounds + {{93.246466708152184, 1417.5425142326339}, {50, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 996 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Receives\ +Job\ +} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{91.82415492531527, 1366.6049531974777}, {50, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 995 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Posts\ +Workflow\ +} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 994 + Points + + {110.33829994431926, 1405.0729529199277} + {164.08388984446532, 1405.5} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + Group + Graphics + + + Bounds + {{50.1444289081536, 1351.4198986563467}, {35, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 986 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fnil\fcharset0 GillSans;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Library\ +User} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{53.471203982158727, 1435.4435211691641}, {28.346457481384277, 28.346458435058594}} + Class + ShapedGraphic + ID + 988 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 989 + Points + + {53.471203982158727, 1409.9317103895926} + {81.817661463543004, 1409.9317103895926} + {81.817661463543004, 1409.9317103895926} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 990 + Points + + {67.644432722850866, 1421.2702933821463} + {53.471203505321569, 1435.4435221228384} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 991 + Points + + {67.644432722850866, 1421.2702933821463} + {81.817661463543004, 1436.0104861675161} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 992 + Points + + {67.644432722850866, 1401.4277731451773} + {67.644432722850866, 1421.2702957663321} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{59.140495478435582, 1384.4198986563467}, {17.00787353515625, 17.00787353515625}} + Class + ShapedGraphic + ID + 993 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 987 + + + ID + 1001 + + + Bounds + {{237.44151899448087, 1414.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 961 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job} + + + + Bounds + {{228.44151899448087, 1405.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 1000 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + Class + Group + Graphics + + + Class + LineGraphic + Head + + ID + 968 + + ID + 967 + Points + + {273.94151899473252, 1414.6017734596319} + {332.27826523991331, 1414.5950095848621} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + FilledArrow + + + Tail + + ID + 969 + + + + Bounds + {{332.77826521030119, 1396.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 968 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Owner} + + + + Bounds + {{219.44151899448087, 1396.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 969 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + ID + 966 + + + Class + Group + Graphics + + + Class + LineGraphic + Head + + ID + 972 + + ID + 971 + Points + + {264.94151899473252, 1405.6017734596319} + {323.27826523991331, 1405.5950095848621} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + FilledArrow + + + Tail + + ID + 973 + + + + Bounds + {{323.77826521030119, 1387.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 972 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Owner} + + + + Bounds + {{210.44151899448087, 1387.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 973 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + ID + 970 + + + Class + Group + Graphics + + + Class + LineGraphic + Head + + ID + 976 + + ID + 975 + Points + + {255.94151899473252, 1396.6017734596319} + {314.27826523991337, 1396.5950095848621} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + FilledArrow + + + Tail + + ID + 977 + + + + Bounds + {{314.77826521030119, 1378.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 976 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Owner} + + + + Bounds + {{201.44151899448087, 1378.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 977 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + ID + 974 + + + Class + Group + Graphics + + + Class + LineGraphic + Head + + ID + 980 + + ID + 979 + Points + + {246.94151899473252, 1387.6017734596319} + {305.27826523991337, 1387.5950095848621} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + FilledArrow + + + Tail + + ID + 981 + + + + Bounds + {{305.77826521030119, 1369.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 980 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Owner} + + + + Bounds + {{192.44151899448087, 1369.6049619569026}, {54, 36}} + Class + ShapedGraphic + ID + 981 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Job1} + + + + ID + 978 + + + Bounds + {{170.82414838901212, 1345.6049695862971}, {236.99999999999997, 168}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 983 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 \ +\ +\ +\ +\ +\ +\ +\ +\ +\ +\ +} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{170.82414838901212, 1331.6049695862971}, {236.99999999999997, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 984 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Jobboard} + VerticalPad + 0 + + TextPlacement + 0 + + + Class + LineGraphic + ID + 861 + Points + + {470.1300977351811, 156.79728666398489} + {409.22449458705552, 177.09915438002673} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{476.1300977351811, 138.79728666398486}, {41, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 860 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Nested\ +subflow} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 859 + Points + + {382.65871206690008, 221.8325309753418} + {382.65871206690008, 249.83253047325724} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Bounds + {{359.27167431166708, 255.11224365234375}, {47, 47}} + Class + ShapedGraphic + HFlip + YES + ID + 855 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Flow} + VerticalPad + 0 + + + + Bounds + {{355.77167171239853, 251.61224365234375}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 856 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Retry} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.099999999999999978, 1.0000000238418578}, {0.80000000000000004, 0.69999999999999996}} + TextRotation + 305.1478271484375 + + + Bounds + {{290.73464965820312, 1032.5300847720423}, {27, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 839 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Run\ +Loop} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 838 + Points + + {16.938772201538086, 784.51440811157227} + {550.61223120254476, 784.51440811157227} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{478.53062537152402, 1122.9011524936079}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 837 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Completer} + VerticalPad + 0 + + + + Bounds + {{478.53062537152402, 1079.8705891391157}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 836 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Scheduler} + VerticalPad + 0 + + + + Bounds + {{372.92606544494629, 1123.8570556640625}, {61, 56}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 834 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\fs24 \cf0 - run()\ +- suspend()\ +...\ +} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{390.8163413254731, 1078.5120424153899}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 832 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Compiler} + VerticalPad + 0 + + + + Bounds + {{209.22450065612793, 852.73572444915771}, {80, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 831 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 States, results,\ +progress...} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{195.91839599609375, 1080.2736424160523}, {156, 70}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 828 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Align + 0 + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural + +\f0\i\fs24 \cf0 - PENDING -> RUNNING\ +- RUNNING -> SUCCESS\ +- SUSPENDED -> RUNNING\ +- FAILURE -> REVERTING\ +....} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{179.01475524902344, 1044.5300637912073}, {30, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 827 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Emits} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 826 + Points + + {228.92846501504124, 1022.9387556204747} + {165.21409631559922, 1048.4693673490142} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{84.918390063136314, 1048.4693603515625}, {108.00000616531918, 60.376010894775391}} + Class + ShapedGraphic + ID + 9 + Shape + Cloud + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 State\ +Transition\ +Notifications} + VerticalPad + 0 + + + + Class + Group + Graphics + + + Bounds + {{253.38389312817009, 1036.7868945785101}, {27.016406012875592, 38.542124503311257}} + Class + ShapedGraphic + ID + 93 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 90 + Shape + Bezier + ShapeData + + UnitPoints + + {0.406219, 0.101163} + {0.39736100000000002, -0.042958700000000002} + {0.31166700000000003, -0.185311} + {0.149117, -0.29534500000000002} + {-0.030395499999999999, -0.41686299999999998} + {-0.261517, -0.50514099999999995} + {-0.49668800000000002, -0.49976700000000002} + {-0.496693, -0.49976500000000001} + {-0.062913899999999995, -0.36058899999999999} + {-0.062913899999999995, -0.36058899999999999} + {-0.062918699999999994, -0.36058899999999999} + {-0.5, -0.21609700000000001} + {-0.5, -0.21609600000000001} + {-0.35843000000000003, -0.22182399999999999} + {-0.217449, -0.204378} + {-0.10928400000000001, -0.12870000000000001} + {-0.017806099999999998, -0.064687300000000003} + {0.032062500000000001, 0.0174179} + {0.040309900000000003, 0.101163} + {0.040309900000000003, 0.101163} + {-0.044847499999999998, 0.101163} + {-0.044847499999999998, 0.101163} + {-0.044847499999999998, 0.101163} + {0.22758200000000001, 0.5} + {0.22758200000000001, 0.5} + {0.22758200000000001, 0.5} + {0.5, 0.101163} + {0.5, 0.101163} + {0.5, 0.101163} + {0.406219, 0.101163} + + + + + Bounds + {{234.04078487632972, 1028.8634930208395}, {26.999999999999996, 38.288223134554855}} + Class + ShapedGraphic + ID + 94 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 180 + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + Bounds + {{242.15904544605092, 1007.9418767503257}, {27.016406012875589, 38.264972185430459}} + Class + ShapedGraphic + ID + 95 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Rotation + 270 + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + Bounds + {{261.04078487632967, 1015.981498100793}, {27.000000000000004, 38.288223134554862}} + Class + ShapedGraphic + ID + 96 + Magnets + + {0.15027599999999999, -0.32002000000000003} + {-0.5, -0.49964799999999998} + {-0.5, -0.25638699999999998} + {-0.10728, -0.148201} + {0.041826500000000003, 0.088786000000000004} + {-0.043045800000000002, 0.088786000000000004} + {0.22847700000000001, 0.5} + {0.5, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + {0.40652500000000003, 0.088786000000000004} + + Shape + 33C70F48-B008-4466-BD81-E84D73C055CA-438-0000056AF6035FFB + + + ID + 92 + + + Bounds + {{396.52035685550777, 974.46163584936892}, {142, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Bold + Size + 12 + + ID + 457 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs24 \cf0 ActionEngine (one impl.)} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{390.8163413254731, 1038.2328676107024}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 450 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Runner} + VerticalPad + 0 + + + + Bounds + {{478.53062537152402, 1038.2328900119185}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 449 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Runtime} + VerticalPad + 0 + + + + Bounds + {{478.5306334878843, 994.19207080251738}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 447 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Executor} + VerticalPad + 0 + + + + Bounds + {{390.81631892425446, 994.19204840129885}, {63.714366912841797, 31.333333333333332}} + Class + ShapedGraphic + ID + 446 + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs20 \cf0 Analyzer} + VerticalPad + 0 + + + + Class + LineGraphic + Head + + ID + 444 + + ID + 445 + Points + + {304.30400417385465, 1005.6686926988394} + {365.81839492659333, 1029.0751702876107} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + Tail + + ID + 423 + + + + Class + LineGraphic + Head + + ID + 10 + + ID + 433 + Points + + {437.73468537749687, 869.13090571936129} + {473.25508692784757, 868.8206769098332} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Bounds + {{473.75506787377572, 840.81631016602194}, {63.714366912841797, 56}} + Class + ShapedGraphic + ID + 10 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Cylinder + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 Persistence\ +Backend} + VerticalPad + 0 + + + + Class + LineGraphic + ID + 428 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {258.38771438598633, 947} + {308.12470245361328, 886} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 2 + TailArrow + FilledArrow + + + + + Class + TableGroup + Graphics + + + Bounds + {{310.93862753220276, 826.66148410306198}, {126, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 426 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\b\fs24 \cf0 Storage} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{310.93862753220276, 840.66148410306198}, {126, 28}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 43 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - flow_name\ +- flow_uuid} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{310.93862753220276, 868.66148410306198}, {126, 56}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 427 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - save()\ +- get()\ +- get_failures()\ +...} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 426 + 43 + 427 + + + ID + 425 + + + Bounds + {{207.28567728426299, 974.78645878243321}, {105.10203552246094, 36}} + Class + ShapedGraphic + ID + 421 + Shape + Cloud + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Compilation} + VerticalPad + 0 + + + + Bounds + {{240.83671598660843, 957.79548143397199}, {38, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 422 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Engine} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{215.83669066280191, 952.49403624989395}, {88, 72.509323120117188}} + Class + ShapedGraphic + ID + 423 + Shape + Rectangle + + + Class + LineGraphic + ID + 418 + Points + + {175.01475125757293, 858.46545582024169} + {175.01475125757293, 1126.5224146928358} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{56.053440093994141, 802.0387135699907}, {88, 44}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-BoldOblique + Size + 18 + + ID + 414 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 Activation\ +Phase} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{105.08388984446533, 1003.3409264674543}, {59, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 413 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Results/\ +Exceptions} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 412 + Points + + {109.26527080670799, 991.99397346428293} + {192.17343756180355, 991.99397346428293} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Bounds + {{115.18593484369178, 915.30610463461369}, {49, 56}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 411 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Run/\ +Resume/\ +Revert/\ +Suspend} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 410 + Points + + {113.34690338032949, 979.62663303122656} + {203.34690321589053, 979.62663303122656} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{59.15303234416389, 922.95317062424022}, {35, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 402 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fnil\fcharset0 GillSans;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Library\ +User} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{62.479807418169017, 1006.9767931370576}, {28.346457481384277, 28.346458435058594}} + Class + ShapedGraphic + ID + 404 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 405 + Points + + {62.479807418169017, 981.46498235748606} + {90.826264899553294, 981.46498235748606} + {90.826264899553294, 981.46498235748606} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 406 + Points + + {76.653036158861156, 992.80356535003978} + {62.479806941331859, 1006.9767940907319} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 407 + Points + + {76.653036158861156, 992.80356535003978} + {90.826264899553294, 1007.5437581354097} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 408 + Points + + {76.653036158861156, 972.96104511307078} + {76.653036158861156, 992.80356773422557} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{68.149098914445872, 955.95317062424022}, {17.00787353515625, 17.00787353515625}} + Class + ShapedGraphic + ID + 409 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 403 + + + ID + 401 + + + Class + LineGraphic + ID + 399 + Points + + {450.39306747989752, 692.4796011495929} + {451.28062907089827, 609.48823926071918} + + Style + + stroke + + HeadArrow + UMLInheritance + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 398 + Info + 2 + + + + Bounds + {{405.3877204726607, 692.97957255114432}, {90, 36}} + Class + ShapedGraphic + ID + 398 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\b\fs24 \cf0 Distributed\ +Engine} + VerticalPad + 0 + + + + Class + LineGraphic + Head + + ID + 395 + + ID + 397 + Points + + {515.69384736152347, 643.69388126880108} + {479.18127560660326, 607.7427600751555} + + Style + + stroke + + HeadArrow + UMLInheritance + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 396 + Info + 2 + + + + Bounds + {{470.69384736152347, 643.69388126880108}, {90, 36}} + Class + ShapedGraphic + ID + 396 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\b\fs24 \cf0 No-Thread\ +Engine} + VerticalPad + 0 + + + + Class + LineGraphic + Head + + ID + 395 + + ID + 27 + Points + + {387.9411398922191, 643.33613420977974} + {422.69414909838434, 607.74967407906797} + + Style + + stroke + + HeadArrow + UMLInheritance + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 11 + + + + Bounds + {{342.59180028217145, 643.69385172515479}, {90, 36}} + Class + ShapedGraphic + ID + 11 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 K +\i0 -Threaded\ +Engine} + VerticalPad + 0 + + + + Class + TableGroup + Graphics + + + Bounds + {{405.38771609711887, 495.39195656369293}, {90, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 393 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Engine} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{405.38771609711887, 509.39195656369293}, {90, 42}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 394 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - notifier\ +- atom_notifier\ +- storage} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{405.38771609711887, 551.39195656369293}, {90, 56}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 395 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - compile()\ +- prepare()\ +- run()\ +- suspend()} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 393 + 394 + 395 + + + ID + 392 + + + Bounds + {{324.43479725203395, 600.47359077973181}, {35, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 390 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Load()} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 389 + Points + + {299.19385094739675, 622.37332926589443} + {349.19384944761669, 622.37332926589443} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + LineGraphic + ID + 388 + Points + + {315.73465810741618, 475.31037359821562} + {315.73465810741618, 743.36733247080952} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{183.17344081803969, 484.69386811754907}, {72, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 387 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Workflow +\ +Runtime\ +Configuration} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{192.17344567816366, 658.51018444452041}, {54, 36}} + Class + ShapedGraphic + ID + 386 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Storage\ +Config} + + + + Bounds + {{192.17343756180355, 606.36734600615216}, {54, 36}} + Class + ShapedGraphic + ID + 385 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Engine\ +Config} + + + + Bounds + {{192.1734294454435, 554.22448349078456}, {54, 36}} + Class + ShapedGraphic + ID + 1 + Shape + Rectangle + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Flow} + + + + Bounds + {{161.23465842199704, 531.90531247737556}, {126, 182}} + Class + ShapedGraphic + ID + 15 + Shape + NoteShape + Style + + Text + + VerticalPad + 0 + + + + Bounds + {{81.034519768316954, 649.04956274775338}, {43, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 384 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Returns\ +Engine} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 383 + Points + + {83.544734716513915, 635.76110575309315} + {129.28172189203261, 635.76110575309315} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Bounds + {{80.054942197373691, 597.00601059533471}, {47, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 382 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Provides} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 381 + Points + + {87.626367290135391, 623.39376532003678} + {133.36335446565408, 623.39376532003678} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{33.432496253969788, 566.72030291305043}, {35, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 373 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fnil\fcharset0 GillSans;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Library\ +User} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{36.759271327974915, 650.74392542586781}, {28.346457481384277, 28.346458435058594}} + Class + ShapedGraphic + ID + 375 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 376 + Points + + {36.759271327974915, 625.23211464629628} + {65.105728809359192, 625.23211464629628} + {65.105728809359192, 625.23211464629628} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 377 + Points + + {50.932500068667053, 636.57069763884999} + {36.759270851137757, 650.74392637954213} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 378 + Points + + {50.932500068667053, 636.57069763884999} + {65.105728809359192, 651.31089042421991} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 379 + Points + + {50.932500068667053, 616.728177401881} + {50.932500068667053, 636.57070002303578} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{42.42856282425177, 599.72030291305043}, {17.00787353515625, 17.00787353515625}} + Class + ShapedGraphic + ID + 380 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 374 + + + ID + 372 + + + Bounds + {{56.053440093994141, 454.08162381538807}, {97, 44}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-BoldOblique + Size + 18 + + ID + 371 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 Translation\ +Phase} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 370 + Points + + {239.72452365274654, 129.59183421248156} + {249.59182766764883, 184.82352424792543} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{203.06122053766794, 96.734692512775737}, {75, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 369 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Explicit\ +dependencies} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 368 + Points + + {16.938771677235714, 434.69386909068618} + {550.61223067824244, 434.69386909068618} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{56.053440093994141, 39.83673387962002}, {112, 44}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-BoldOblique + Size + 18 + + ID + 367 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\b\fs36 \cf0 Construction\ +Phase} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{100.22448568729375, 191.89795101130832}, {50, 56}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 366 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Creates\ +\ +\ +Workflow} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 364 + Points + + {110.31631892346081, 220.32652202282102} + {156.05330609897936, 220.32652202282102} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + TableGroup + Graphics + + + Bounds + {{306.40872322346235, 337.6122433290239}, {126, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 361 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Retry (Atom)} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{306.40872322346235, 351.6122433290239}, {126, 56}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 362 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - execute()\ +- revert()\ +- on_failure()\ +...} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 361 + 362 + + + ID + 360 + + + Class + TableGroup + Graphics + + + Bounds + {{165.22448860432195, 337.6122433290239}, {126, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 42 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Task (Atom)} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{165.22448860432195, 351.6122433290239}, {126, 56}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 44 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 - execute()\ +- revert()\ +- update_progress()\ +...} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 42 + 44 + + + ID + 352 + + + Class + LineGraphic + Head + + ID + 840 + + ID + 842 + Points + + {381.22447887295158, 117.34693649161716} + {381.85914273540726, 165.11464199273692} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Class + LineGraphic + ID + 347 + Points + + {395.51019288062668, 111.79728682683641} + {394.92858042750709, 139.79728698730469} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{302.34693227416039, 79.112244302955403}, {185, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 345 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Workflow (declarative) structure\ +& code (not executed immediately)} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 329 + Points + + {472.22448860432172, 249.61224332902393} + {411.31888545619614, 269.91411104506579} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Class + LineGraphic + ID + 343 + Points + + {474.22448860432172, 205.97599760148498} + {409.22448860432172, 228.24848490451151} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + Pattern + 1 + TailArrow + 0 + + + + + Bounds + {{478.22448860432172, 179.61224332902398}, {83, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 344 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Dataflow\ +(symbol-based)\ +dependencies} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 341 + Points + + {361.09295431543518, 212.90662923905811} + {315.35596770538763, 253.81785993690883} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Bounds + {{387.44897720864344, 229.36224332902393}, {30, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 336 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 out/in} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + ID + 334 + Points + + {239.72451559473222, 278.11224001641114} + {269.72448860432183, 278.11224001641114} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + LineGraphic + ID + 333 + Points + + {324.22448507715393, 278.11224001641114} + {354.2244580867436, 278.11224001641114} + + Style + + stroke + + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + FilledArrow + + + + + Class + LineGraphic + ID + 332 + Points + + {325.22451559473205, 192.11224001641122} + {355.22448860432172, 192.11224001641122} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + + + Class + LineGraphic + Head + + ID + 324 + + ID + 331 + Points + + {239.72450209952797, 192.61225527520028} + {269.72447510911758, 192.61225527520028} + + Style + + stroke + + HeadArrow + FilledArrow + Legacy + + LineType + 1 + TailArrow + 0 + + + Tail + + ID + 28 + + + + Bounds + {{474.22448860432172, 234.61224332902393}, {49, 42}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica + Size + 12 + + ID + 330 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\i\fs24 \cf0 Nested\ +subflow\ +with retry} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{271.72448860432172, 251.61224332902393}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 328 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Task} + VerticalPad + 0 + + + + Bounds + {{183.72451554562139, 251.61224332902393}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 327 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Task} + VerticalPad + 0 + + + + Bounds + {{355.22448860432172, 165.61224332902398}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 840 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Flow} + VerticalPad + 0 + + + + Bounds + {{270.22448860432183, 165.61224332902398}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 324 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Task} + VerticalPad + 0 + + + + Bounds + {{185.22448860432195, 165.61224332902398}, {54, 54}} + Class + ShapedGraphic + HFlip + YES + ID + 28 + Shape + Circle + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Task} + VerticalPad + 0 + + + + Class + TableGroup + Graphics + + + Bounds + {{165.224488604322, 153.79728666398492}, {269, 168}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 35 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 \ +\ +\ +\ +\ +\ +\ +\ +\ +\ +\ +} + VerticalPad + 0 + + TextPlacement + 0 + + + Bounds + {{165.224488604322, 139.79728666398492}, {269, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 34 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\i\b\fs24 \cf0 Flow (pattern)} + VerticalPad + 0 + + TextPlacement + 0 + + + GridH + + 34 + 35 + + + ID + 33 + + + Class + Group + Graphics + + + Bounds + {{56.122447887295152, 164.67346775924003}, {35, 28}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 61 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fnil\fcharset0 GillSans;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural\qc + +\f0\fs24 \cf0 Library\ +User} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{59.449222961300279, 248.69709027205738}, {28.346457481384277, 28.346458435058594}} + Class + ShapedGraphic + ID + 63 + Shape + Rectangle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Draws + NO + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + Draws + NO + Width + 1.5 + + + VFlip + YES + Wrap + NO + + + Class + LineGraphic + ID + 64 + Points + + {59.449222961300279, 223.18527949248588} + {87.795680442684557, 223.18527949248588} + {87.795680442684557, 223.18527949248588} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 65 + Points + + {73.622451701992418, 234.52386248503961} + {59.449222484463121, 248.6970912257317} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 66 + Points + + {73.622451701992418, 234.52386248503953} + {87.795680442684557, 249.26405527040947} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Class + LineGraphic + ID + 67 + Points + + {73.622451701992418, 214.68134224807059} + {73.622451701992418, 234.52386486922538} + + Style + + shadow + + Beneath + YES + Draws + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + HeadArrow + 0 + Legacy + + LineType + 1 + TailArrow + 0 + Width + 1.5 + + + + + Bounds + {{65.118514457577135, 197.67346775924003}, {17.00787353515625, 17.00787353515625}} + Class + ShapedGraphic + ID + 68 + Shape + Circle + Style + + fill + + Color + + b + 0.4 + g + 1 + r + 1 + + Draws + NO + FillType + 2 + GradientAngle + 90 + GradientColor + + b + 0.4 + g + 1 + r + 1 + + MiddleColor + + b + 0.4 + g + 1 + r + 1 + + TrippleBlend + YES + + shadow + + Beneath + YES + Fuzziness + 2.5038185119628906 + ShadowVector + {0, 1} + + stroke + + CornerRadius + 1 + Width + 1.5 + + + + + ID + 62 + + + ID + 60 + + + Class + Group + Graphics + + + Bounds + {{524.20410965184897, 903.84686831164879}, {90, 36}} + Class + ShapedGraphic + ID + 440 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + RoundRect + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 Zookeeper} + VerticalPad + 0 + + + + Bounds + {{524.20413205306681, 867.84684591043094}, {90, 36}} + Class + ShapedGraphic + ID + 441 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + RoundRect + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 Filesystem} + VerticalPad + 0 + + + + Bounds + {{524.20410965184885, 832.86723361478039}, {90, 36}} + Class + ShapedGraphic + ID + 442 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + RoundRect + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 Memory} + VerticalPad + 0 + + + + Bounds + {{524.20409341912841, 797.78565525800093}, {90, 36}} + Class + ShapedGraphic + ID + 443 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + RoundRect + Style + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc + +\f0\fs20 \cf0 SQLAlchemy} + VerticalPad + 0 + + + + ID + 439 + + + Bounds + {{366.28570556640625, 1120.9999961853027}, {75.71429443359375, 44}} + Class + ShapedGraphic + ID + 835 + Shape + Rectangle + Style + + stroke + + Pattern + 1 + + + + + Bounds + {{366.28571101041302, 969.38000187999}, {202.04080200195312, 196.6199951171875}} + Class + ShapedGraphic + ID + 444 + Shape + Rectangle + + + Bounds + {{379.54083251953125, 960.24970708018532}, {202.04080200195312, 196.6199951171875}} + Class + ShapedGraphic + ID + 1170 + Shape + Rectangle + + + Bounds + {{181.81308267749165, 1321.1440843224241}, {236.99999999999997, 168}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + ID + 1172 + Shape + Rectangle + Style + + fill + + GradientCenter + {-0.29411799999999999, -0.264706} + + + Text + + Align + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1265\cocoasubrtf200 +\cocoascreenfonts1{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720 + +\f0\fs24 \cf0 \ +\ +\ +\ +\ +\ +\ +\ +\ +\ +\ +} + VerticalPad + 0 + + TextPlacement + 0 + + + GridInfo + + GuidesLocked + NO + GuidesVisible + YES + HPages + 2 + ImageCounter + 1 + KeepToScale + + Layers + + + Lock + NO + Name + Layer 1 + Print + YES + View + YES + + + LayoutInfo + + Animate + NO + circoMinDist + 18 + circoSeparation + 0.0 + layoutEngine + dot + neatoSeparation + 0.0 + twopiSeparation + 0.0 + + LinksVisible + NO + MagnetsVisible + NO + MasterSheets + + ModificationDate + 2014-07-09 22:24:00 +0000 + Modifier + Joshua Harlow + NotesVisible + NO + Orientation + 2 + OriginVisible + NO + PageBreaks + YES + PrintInfo + + NSBottomMargin + + float + 41 + + NSHorizonalPagination + + coded + BAtzdHJlYW10eXBlZIHoA4QBQISEhAhOU051bWJlcgCEhAdOU1ZhbHVlAISECE5TT2JqZWN0AIWEASqEhAFxlwCG + + NSLeftMargin + + float + 18 + + NSPaperSize + + size + {612, 792} + + NSPrintReverseOrientation + + int + 0 + + NSRightMargin + + float + 18 + + NSTopMargin + + float + 18 + + + PrintOnePage + + ReadOnly + NO + RowAlign + 1 + RowSpacing + 36 + SheetTitle + Canvas 1 + SmartAlignmentGuidesActive + YES + SmartDistanceGuidesActive + YES + UniqueID + 1 + UseEntirePage + + VPages + 3 + WindowInfo + + CurrentSheet + 0 + ExpandedCanvases + + + name + Canvas 1 + + + Frame + {{77, 45}, {1067, 833}} + ListView + + OutlineWidth + 142 + RightSidebar + + ShowRuler + + Sidebar + + SidebarWidth + 120 + VisibleRegion + {{8.8235295767602651, 949.50982167692416}, {900.00001682954701, 665.68628695780228}} + Zoom + 1.0199999809265137 + ZoomValues + + + Canvas 1 + 1.0199999809265137 + 1 + + + + + diff --git a/doc/source/arguments_and_results.rst b/doc/source/arguments_and_results.rst index e5870545..e23a6375 100644 --- a/doc/source/arguments_and_results.rst +++ b/doc/source/arguments_and_results.rst @@ -53,8 +53,8 @@ the task. ... def execute(self, spam, eggs): ... return spam + eggs ... - >>> MyTask().requires - set(['eggs', 'spam']) + >>> sorted(MyTask().requires) + ['eggs', 'spam'] Inference from the method signature is the ''simplest'' way to specify task arguments. Optional arguments (with default values), and special arguments like diff --git a/doc/source/conductors.rst b/doc/source/conductors.rst index 4dfa3e33..25eb75c8 100644 --- a/doc/source/conductors.rst +++ b/doc/source/conductors.rst @@ -24,9 +24,41 @@ They are responsible for the following: .. note:: - They are inspired by and have similar responsiblities + They are inspired by and have similar responsibilities as `railroad conductors`_. +Considerations +============== + +Some usage considerations should be used when using a conductor to make sure +it's used in a safe and reliable manner. Eventually we hope to make these +non-issues but for now they are worth mentioning. + +Endless cycling +--------------- + +**What:** Jobs that fail (due to some type of internal error) on one conductor +will be abandoned by that conductor and then another conductor may experience +those same errors and abandon it (and repeat). This will create a job +abandonment cycle that will continue for as long as the job exists in an +claimable state. + +**Example:** + +.. image:: img/conductor_cycle.png + :scale: 70% + :alt: Conductor cycling + +**Alleviate by:** + +#. Forcefully delete jobs that have been failing continuously after a given + number of conductor attempts. This can be either done manually or + automatically via scripts (or other associated monitoring). +#. Resolve the internal error's cause (storage backend failure, other...). +#. Help implement `jobboard garbage binning`_. + +.. _jobboard garbage binning: https://blueprints.launchpad.net/taskflow/+spec/jobboard-garbage-bin + Interfaces ========== diff --git a/doc/source/conf.py b/doc/source/conf.py index 6ecddae8..3b0c35ce 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -11,6 +11,7 @@ sys.path.insert(0, os.path.abspath('../..')) extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', + 'sphinx.ext.extlinks', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', @@ -37,6 +38,7 @@ exclude_patterns = ['_build'] # General information about the project. project = u'TaskFlow' copyright = u'2013-2014, OpenStack Foundation' +source_tree = 'http://git.openstack.org/cgit/openstack/taskflow/tree' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True @@ -51,6 +53,10 @@ pygments_style = 'sphinx' # Prefixes that are ignored for sorting the Python module index modindex_common_prefix = ['taskflow.'] +# Shortened external links. +extlinks = { + 'example': (source_tree + '/taskflow/examples/%s.py', ''), +} # -- Options for HTML output -------------------------------------------------- diff --git a/doc/source/examples.rst b/doc/source/examples.rst index c6d2e3ed..9199bc11 100644 --- a/doc/source/examples.rst +++ b/doc/source/examples.rst @@ -1,11 +1,165 @@ -======== -Examples -======== +Making phone calls +================== -While developing TaskFlow the team has worked hard to make sure the concepts -that TaskFlow provides are explained by *relevant* examples. To explore these -please check out the `examples`_ directory in the TaskFlow source tree. If the -examples provided are not satisfactory (or up to your standards) contributions -are welcome and very much appreciated to improve them. +.. note:: -.. _examples: http://git.openstack.org/cgit/openstack/taskflow/tree/taskflow/examples + Full source located at :example:`simple_linear`. + +.. literalinclude:: ../../taskflow/examples/simple_linear.py + :language: python + :linenos: + :lines: 16- + +Making phone calls (automatically reverting) +============================================ + +.. note:: + + Full source located at :example:`reverting_linear`. + +.. literalinclude:: ../../taskflow/examples/reverting_linear.py + :language: python + :linenos: + :lines: 16- + +Building a car +============== + +.. note:: + + Full source located at :example:`build_a_car`. + +.. literalinclude:: ../../taskflow/examples/build_a_car.py + :language: python + :linenos: + :lines: 16- + +Linear equation solver (explicit dependencies) +============================================== + +.. note:: + + Full source located at :example:`calculate_linear`. + +.. literalinclude:: ../../taskflow/examples/calculate_linear.py + :language: python + :linenos: + :lines: 16- + +Linear equation solver (inferred dependencies) +============================================== + +``Source:`` :example:`graph_flow.py` + +.. literalinclude:: ../../taskflow/examples/graph_flow.py + :language: python + :linenos: + :lines: 16- + +Linear equation solver (in parallel) +==================================== + +.. note:: + + Full source located at :example:`calculate_in_parallel` + +.. literalinclude:: ../../taskflow/examples/calculate_in_parallel.py + :language: python + :linenos: + :lines: 16- + +Creating a volume (in parallel) +=============================== + +.. note:: + + Full source located at :example:`create_parallel_volume` + +.. literalinclude:: ../../taskflow/examples/create_parallel_volume.py + :language: python + :linenos: + :lines: 16- + +Storing & emitting a bill +========================= + +.. note:: + + Full source located at :example:`fake_billing` + +.. literalinclude:: ../../taskflow/examples/fake_billing.py + :language: python + :linenos: + :lines: 16- + +Suspending a workflow & resuming +================================ + +.. note:: + + Full source located at :example:`resume_from_backend` + +.. literalinclude:: ../../taskflow/examples/resume_from_backend.py + :language: python + :linenos: + :lines: 16- + +Creating a virtual machine (resumable) +====================================== + +.. note:: + + Full source located at :example:`resume_vm_boot` + +.. literalinclude:: ../../taskflow/examples/resume_vm_boot.py + :language: python + :linenos: + :lines: 16- + +Creating a volume (resumable) +============================= + +.. note:: + + Full source located at :example:`resume_volume_create` + +.. literalinclude:: ../../taskflow/examples/resume_volume_create.py + :language: python + :linenos: + :lines: 16- + +Running engines via iteration +============================= + +.. note:: + + Full source located at :example:`run_by_iter` + +.. literalinclude:: ../../taskflow/examples/run_by_iter.py + :language: python + :linenos: + :lines: 16- + +Controlling retries using a retry controller +============================================ + +.. note:: + + Full source located at :example:`retry_flow` + +.. literalinclude:: ../../taskflow/examples/retry_flow.py + :language: python + :linenos: + :lines: 16- + +Distributed execution (simple) +============================== + +.. note:: + + Full source located at :example:`wbe_simple_linear` + +.. literalinclude:: ../../taskflow/examples/wbe_simple_linear.py + :language: python + :linenos: + :lines: 16- diff --git a/doc/source/img/conductor_cycle.png b/doc/source/img/conductor_cycle.png new file mode 100644 index 00000000..b09d71b4 Binary files /dev/null and b/doc/source/img/conductor_cycle.png differ diff --git a/doc/source/img/engine_states.svg b/doc/source/img/engine_states.svg new file mode 100644 index 00000000..497c31ef --- /dev/null +++ b/doc/source/img/engine_states.svg @@ -0,0 +1,8 @@ + + + + + +Engines statesRESUMINGSCHEDULINGWAITINGSUCCESSSUSPENDEDREVERTEDANALYZINGstart + diff --git a/doc/source/img/flow_states.png b/doc/source/img/flow_states.png deleted file mode 100644 index 8ee0cb2a..00000000 Binary files a/doc/source/img/flow_states.png and /dev/null differ diff --git a/doc/source/img/flow_states.svg b/doc/source/img/flow_states.svg new file mode 100644 index 00000000..c6d9825e --- /dev/null +++ b/doc/source/img/flow_states.svg @@ -0,0 +1,8 @@ + + + + + +Flow statesPENDINGRUNNINGRESUMINGFAILURESUCCESSREVERTEDSUSPENDINGSUSPENDEDstart + diff --git a/doc/source/img/retry_states.png b/doc/source/img/retry_states.png deleted file mode 100644 index ccab5c6b..00000000 Binary files a/doc/source/img/retry_states.png and /dev/null differ diff --git a/doc/source/img/retry_states.svg b/doc/source/img/retry_states.svg new file mode 100644 index 00000000..014516e0 --- /dev/null +++ b/doc/source/img/retry_states.svg @@ -0,0 +1,8 @@ + + + + + +Retries statesPENDINGRUNNINGFAILURESUCCESSREVERTINGRETRYINGREVERTEDstart + diff --git a/doc/source/img/task_states.png b/doc/source/img/task_states.png deleted file mode 100644 index 6654ded3..00000000 Binary files a/doc/source/img/task_states.png and /dev/null differ diff --git a/doc/source/img/task_states.svg b/doc/source/img/task_states.svg new file mode 100644 index 00000000..f40501ac --- /dev/null +++ b/doc/source/img/task_states.svg @@ -0,0 +1,8 @@ + + + + + +Tasks statesPENDINGRUNNINGFAILURESUCCESSREVERTINGREVERTEDstart + diff --git a/doc/source/index.rst b/doc/source/index.rst index 7bfdc96d..3e9326b6 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,13 +1,14 @@ TaskFlow ======== -*TaskFlow is a Python library for OpenStack that helps make task execution -easy, consistent, and reliable.* +*TaskFlow is a Python library that helps to make task execution easy, +consistent and reliable.* [#f1]_ .. note:: - Additional documentation is also hosted on wiki: - https://wiki.openstack.org/wiki/TaskFlow + If you are just getting started or looking for an overview please + visit: http://wiki.openstack.org/wiki/TaskFlow which provides better + introductory material, description of high level goals and related content. Contents ======== @@ -33,6 +34,55 @@ Contents workers +Examples +-------- + +While developing TaskFlow the team has worked *hard* to make sure the various +concepts are explained by *relevant* examples. Here are a few selected examples +to get started (ordered by *perceived* complexity): + +.. toctree:: + :maxdepth: 2 + + examples + +To explore more of these examples please check out the `examples`_ directory +in the TaskFlow `source tree`_. + +.. note:: + + If the examples provided are not satisfactory (or up to your + standards) contributions are welcome and very much appreciated to help + improve them. The higher the quality and the clearer the examples are the + better and more useful they are for everyone. + +.. _examples: http://git.openstack.org/cgit/openstack/taskflow/tree/taskflow/examples +.. _source tree: http://git.openstack.org/cgit/openstack/taskflow/ + +Considerations +-------------- + +Things to consider before (and during) development and integration with +TaskFlow into your project: + +* Read over the `paradigm shifts`_ and engage the team in `IRC`_ (or via the + `openstack-dev`_ mailing list) if these need more explanation (prefix + ``[TaskFlow]`` to your emails subject to get an even faster response). +* Follow (or at least attempt to follow) some of the established + `best practices`_ (feel free to add your own suggested best practices). + +.. warning:: + + External usage of internal helpers and other internal utility functions + and modules should be kept to a *minimum* as these may be altered, + refactored or moved *without* notice. If you are unsure whether to use + a function, class, or module, please ask (see above). + +.. _IRC: irc://chat.freenode.net/openstack-state-management +.. _best practices: http://wiki.openstack.org/wiki/TaskFlow/Best_practices +.. _paradigm shifts: http://wiki.openstack.org/wiki/TaskFlow/Paradigm_shifts +.. _openstack-dev: mailto:openstack-dev@lists.openstack.org + Miscellaneous ------------- @@ -40,9 +90,7 @@ Miscellaneous :maxdepth: 2 exceptions - utils states - examples Indices and tables ================== @@ -51,3 +99,7 @@ Indices and tables * :ref:`modindex` * :ref:`search` +.. [#f1] It should be noted that even though it is designed with OpenStack + integration in mind, and that is where most of its *current* + integration is it aims to be generally usable and useful in any + project. diff --git a/doc/source/inputs_and_outputs.rst b/doc/source/inputs_and_outputs.rst index d89d6ab8..34fb1bad 100644 --- a/doc/source/inputs_and_outputs.rst +++ b/doc/source/inputs_and_outputs.rst @@ -34,6 +34,7 @@ set of names of such values is available via ``provides`` property of the flow. from taskflow import task from taskflow.patterns import linear_flow from taskflow import engines + from pprint import pprint For example: @@ -118,10 +119,11 @@ of the engine helpers (:py:func:`~taskflow.engines.helpers.run` or >>> flo = linear_flow.Flow("cat-dog") >>> flo.add(CatTalk(), DogTalk(provides="dog")) - >>> engines.run(flo, store={'meow': 'meow', 'woof': 'woof'}) + >>> result = engines.run(flo, store={'meow': 'meow', 'woof': 'woof'}) meow woof - {'meow': 'meow', 'woof': 'woof', 'dog': 'dog'} + >>> pprint(result) + {'dog': 'dog', 'meow': 'meow', 'woof': 'woof'} You can also directly interact with the engine storage layer to add additional values, note that if this route is used you can't use @@ -145,7 +147,7 @@ Outputs As you can see from examples above, the run method returns all flow outputs in a ``dict``. This same data can be fetched via :py:meth:`~taskflow.storage.Storage.fetch_all` method of the storage. You can -also get single results using :py:meth:`~taskflow.storage.Storage.fetch_all`. +also get single results using :py:meth:`~taskflow.storage.Storage.fetch`. For example: .. doctest:: @@ -154,8 +156,8 @@ For example: >>> eng.run() meow woof - >>> print(eng.storage.fetch_all()) - {'meow': 'meow', 'woof': 'woof', 'dog': 'dog'} + >>> pprint(eng.storage.fetch_all()) + {'dog': 'dog', 'meow': 'meow', 'woof': 'woof'} >>> print(eng.storage.fetch("dog")) dog diff --git a/doc/source/jobs.rst b/doc/source/jobs.rst index 05592b5c..048a66ea 100644 --- a/doc/source/jobs.rst +++ b/doc/source/jobs.rst @@ -214,7 +214,7 @@ the engine can immediately stop doing further work. The effect that this causes is that when a claim is lost another engine can immediately attempt to acquire the claim that was previously lost and it *could* begin working on the unfinished tasks that the later engine may also still be executing (since that -engine is not yet aware that it has lost the claim). +engine is not yet aware that it has *lost* the claim). **TLDR:** not `preemptable`_, possible to become aware of losing a claim after the fact (at the next state change), another engine could have acquired @@ -235,8 +235,8 @@ the claim by then, therefore both would be *working* on a job. #. Delay claiming partially completed work by adding a wait period (to allow the previous engine to coalesce) before working on a partially completed job - (combine this with the prior suggestions and dual-engine issues should be - avoided). + (combine this with the prior suggestions and *most* dual-engine issues + should be avoided). .. _idempotent: http://en.wikipedia.org/wiki/Idempotence .. _preemptable: http://en.wikipedia.org/wiki/Preemption_%28computing%29 diff --git a/doc/source/states.rst b/doc/source/states.rst index 34841f4d..02fcaf15 100644 --- a/doc/source/states.rst +++ b/doc/source/states.rst @@ -7,46 +7,40 @@ States Engine ====== -.. image:: img/engine_states.png - :height: 265px - :align: right +.. image:: img/engine_states.svg + :width: 660px + :align: left :alt: Action engine state transitions -Executing ---------- +**RESUMING** - Prepares flow & atoms to be resumed. -**RESUMING** - Prepare flow to be resumed. +**SCHEDULING** - Schedules and submits atoms to be worked on. -**SCHEDULING** - Schedule nodes to be worked on. +**WAITING** - Wait for atoms to finish executing. -**WAITING** - Wait for nodes to finish executing. +**ANALYZING** - Analyzes and processes result/s of atom completion. -**ANALYZING** - Analyze and process result/s of node completion. +**SUCCESS** - Completed successfully. -End ---- - -**SUCCESS** - Engine completed successfully. - -**REVERTED** - Engine reverting was induced and all nodes were not completed +**REVERTED** - Reverting was induced and all atoms were **not** completed successfully. -**SUSPENDED** - Engine was suspended while running.. +**SUSPENDED** - Suspended while running. Flow ==== -.. image:: img/flow_states.png - :height: 400px - :align: right +.. image:: img/flow_states.svg + :width: 660px + :align: left :alt: Flow state transitions **PENDING** - A flow starts its life in this state. **RUNNING** - In this state flow makes a progress, executes and/or reverts its -tasks. +atoms. -**SUCCESS** - Once all tasks have finished successfully the flow transitions to +**SUCCESS** - Once all atoms have finished successfully the flow transitions to the SUCCESS state. **REVERTED** - The flow transitions to this state when it has been reverted @@ -57,14 +51,14 @@ after the failure. **SUSPENDING** - In the RUNNING state the flow can be suspended. When this happens, flow transitions to the SUSPENDING state immediately. In that state -the engine running the flow waits for running tasks to finish (since the engine -can not preempt tasks that are active). +the engine running the flow waits for running atoms to finish (since the engine +can not preempt atoms that are active). -**SUSPENDED** - When no tasks are running and all results received so far are +**SUSPENDED** - When no atoms are running and all results received so far are saved, the flow transitions from the SUSPENDING state to SUSPENDED. Also it may -go to the SUCCESS state if all tasks were in fact ran, or to the REVERTED state -if the flow was reverting and all tasks were reverted while the engine was -waiting for running tasks to finish, or to the FAILURE state if tasks were run +go to the SUCCESS state if all atoms were in fact ran, or to the REVERTED state +if the flow was reverting and all atoms were reverted while the engine was +waiting for running atoms to finish, or to the FAILURE state if atoms were run or reverted and some of them failed. **RESUMING** - When the flow is interrupted 'in a hard way' (e.g. server @@ -79,24 +73,25 @@ From the SUCCESS, FAILURE or REVERTED states the flow can be ran again (and thus it goes back into the RUNNING state). One of the possible use cases for this transition is to allow for alteration of a flow or flow details associated with a previously ran flow after the flow has finished, and client code wants -to ensure that each task from this new (potentially updated) flow has its +to ensure that each atom from this new (potentially updated) flow has its chance to run. .. note:: The current code also contains strong checks during each flow state - transition using the model described above and raises the InvalidState - exception if an invalid transition is attempted. This exception being - triggered usually means there is some kind of bug in the engine code or some - type of misuse/state violation is occurring, and should be reported as such. + transition using the model described above and raises the + :py:class:`~taskflow.exceptions.InvalidState` exception if an invalid + transition is attempted. This exception being triggered usually means there + is some kind of bug in the engine code or some type of misuse/state violation + is occurring, and should be reported as such. Task ==== -.. image:: img/task_states.png - :height: 265px - :align: right +.. image:: img/task_states.svg + :width: 660px + :align: left :alt: Task state transitions **PENDING** - When a task is added to a flow, it starts in the PENDING state, @@ -105,7 +100,8 @@ on to complete. The task transitions to the PENDING state after it was reverted and its flow was restarted or retried. **RUNNING** - When flow starts to execute the task, it transitions to the -RUNNING state, and stays in this state until its execute() method returns. +RUNNING state, and stays in this state until its +:py:meth:`execute() ` method returns. **SUCCESS** - The task transitions to this state after it was finished successfully. @@ -115,20 +111,20 @@ error. When the flow containing this task is being reverted, all its tasks are walked in particular order. **REVERTING** - The task transitions to this state when the flow starts to -revert it and its revert() method is called. Only tasks in the SUCCESS or -FAILURE state can be reverted. If this method fails (raises exception), task -goes to the FAILURE state. +revert it and its :py:meth:`revert() ` method +is called. Only tasks in the SUCCESS or FAILURE state can be reverted. If this +method fails (raises exception), the task goes to the FAILURE state. -**REVERTED** - The task that has been reverted appears it this state. +**REVERTED** - A task that has been reverted appears in this state. Retry ===== -.. image:: img/retry_states.png - :height: 275px - :align: right - :alt: Task state transitions +.. image:: img/retry_states.svg + :width: 660px + :align: left + :alt: Retry state transitions Retry has the same states as a task and one additional state. @@ -138,7 +134,8 @@ on to complete. The retry transitions to the PENDING state after it was reverted and its flow was restarted or retried. **RUNNING** - When flow starts to execute the retry, it transitions to the -RUNNING state, and stays in this state until its execute() method returns. +RUNNING state, and stays in this state until its +:py:meth:`execute() ` method returns. **SUCCESS** - The retry transitions to this state after it was finished successfully. @@ -148,14 +145,12 @@ error. When the flow containing this retry is being reverted, all its tasks are walked in particular order. **REVERTING** - The retry transitions to this state when the flow starts to -revert it and its revert() method is called. Only retries in SUCCESS or FAILURE -state can be reverted. If this method fails (raises exception), task goes to -the FAILURE. +revert it and its :py:meth:`revert() ` method is +called. Only retries in SUCCESS or FAILURE state can be reverted. If this +method fails (raises exception), the retry goes to the FAILURE state. -**REVERTED** - The retry that has been reverted appears it this state. +**REVERTED** - A retry that has been reverted appears in this state. **RETRYING** - If flow that is managed by the current retry was failed and -reverted, the retry prepares it for the next run and transitions to the +reverted, the engine prepares it for the next run and transitions to the RETRYING state. - - diff --git a/doc/source/utils.rst b/doc/source/utils.rst deleted file mode 100644 index 75fe91b6..00000000 --- a/doc/source/utils.rst +++ /dev/null @@ -1,15 +0,0 @@ ------ -Utils ------ - -.. warning:: - - External usage of internal helpers and other internal utility functions - and modules should be kept to a *minimum* as these may be altered, - refactored or moved *without* notice. - -The following classes and modules though may be used: - -.. autoclass:: taskflow.utils.misc.Failure -.. autoclass:: taskflow.utils.eventlet_utils.GreenExecutor -.. automodule:: taskflow.utils.persistence_utils diff --git a/doc/source/workers.rst b/doc/source/workers.rst index 6ed987bc..9c2f2b9c 100644 --- a/doc/source/workers.rst +++ b/doc/source/workers.rst @@ -7,8 +7,7 @@ Overview This is engine that schedules tasks to **workers** -- separate processes dedicated for certain atoms execution, possibly running on other machines, -connected via `amqp`_ (or other supported `kombu -`_ transports). +connected via `amqp`_ (or other supported `kombu`_ transports). .. note:: @@ -18,6 +17,7 @@ connected via `amqp`_ (or other supported `kombu production ready. .. _blueprint page: https://blueprints.launchpad.net/taskflow?searchtext=wbe +.. _kombu: http://kombu.readthedocs.org/ Terminology ----------- diff --git a/optional-requirements.txt b/optional-requirements.txt index d423210a..e010cf60 100644 --- a/optional-requirements.txt +++ b/optional-requirements.txt @@ -1,8 +1,11 @@ -# This file lists dependencies that are used by different -# pluggable (optional) parts of TaskFlow, like engines -# or persistence backends. They are not strictly required -# by TaskFlow (you can use TaskFlow without them), but -# so they don't go to requirements.txt. +# This file lists dependencies that are used by different pluggable (optional) +# parts of TaskFlow, like engines or persistence backends. They are not +# strictly required by TaskFlow (aka you can use TaskFlow without them), so +# they don't go into one of the requirements.txt files. + +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. # Database (sqlalchemy) persistence: SQLAlchemy>=0.7.8,<=0.9.99 diff --git a/requirements-py2.txt b/requirements-py2.txt new file mode 100644 index 00000000..9b204ea6 --- /dev/null +++ b/requirements-py2.txt @@ -0,0 +1,22 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +# Packages needed for using this library. +anyjson>=0.3.3 +iso8601>=0.1.9 +# Only needed on python 2.6 +ordereddict +# Python 2->3 compatibility library. +six>=1.7.0 +# Very nice graph library +networkx>=1.8 +Babel>=1.3 +# Used for backend storage engine loading. +stevedore>=0.14 +# Backport for concurrent.futures which exists in 3.2+ +futures>=2.1.6 +# Used for structured input validation +jsonschema>=2.0.0,<3.0.0 +# For pretty printing state-machine tables +PrettyTable>=0.7,<0.8 diff --git a/requirements-py3.txt b/requirements-py3.txt new file mode 100644 index 00000000..63880b31 --- /dev/null +++ b/requirements-py3.txt @@ -0,0 +1,18 @@ +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +# Packages needed for using this library. +anyjson>=0.3.3 +iso8601>=0.1.9 +# Python 2->3 compatibility library. +six>=1.7.0 +# Very nice graph library +networkx>=1.8 +Babel>=1.3 +# Used for backend storage engine loading. +stevedore>=0.14 +# Used for structured input validation +jsonschema>=2.0.0,<3.0.0 +# For pretty printing state-machine tables +PrettyTable>=0.7,<0.8 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 2b42a00c..00000000 --- a/requirements.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Packages needed for using this library. -pbr>=0.6,!=0.7,<1.0 -anyjson>=0.3.3 -iso8601>=0.1.9 -# Python 2->3 compatibility library. -six>=1.6.0 -# Very nice graph library -networkx>=1.8 -Babel>=1.3 -# Used for backend storage engine loading. -stevedore>=0.14 -# Backport for concurrent.futures which exists in 3.2+ -futures>=2.1.3 diff --git a/taskflow/atom.py b/taskflow/atom.py index c288cbb7..d93ff57a 100644 --- a/taskflow/atom.py +++ b/taskflow/atom.py @@ -78,7 +78,9 @@ def _build_rebind_dict(args, rebind_args): def _build_arg_mapping(atom_name, reqs, rebind_args, function, do_infer, ignore_list=None): - """Given a function, its requirements and a rebind mapping this helper + """Builds an input argument mapping for a given function. + + Given a function, its requirements and a rebind mapping this helper function will build the correct argument mapping for the given function as well as verify that the final argument mapping does not have missing or extra arguments (where applicable). diff --git a/taskflow/conductors/base.py b/taskflow/conductors/base.py index 7cee0c64..e7c9887a 100644 --- a/taskflow/conductors/base.py +++ b/taskflow/conductors/base.py @@ -24,7 +24,9 @@ from taskflow.utils import lock_utils @six.add_metaclass(abc.ABCMeta) class Conductor(object): - """Conductors act as entities which extract jobs from a jobboard, assign + """Conductors conduct jobs & assist in associated runtime interactions. + + Conductors act as entities which extract jobs from a jobboard, assign there work to some engine (using some desired configuration) and then wait for that work to complete. If the work fails then they abandon the claimed work (or if the process they are running in crashes or dies this @@ -99,13 +101,13 @@ class Conductor(object): @abc.abstractmethod def run(self): - """Continuously claims, runs, and consumes jobs, and waits for more - jobs when there are none left on the jobboard. - """ + """Continuously claims, runs, and consumes jobs (and repeat).""" @abc.abstractmethod def _dispatch_job(self, job): - """Accepts a single (already claimed) job and causes it to be run in + """Dispatches a claimed job for work completion. + + Accepts a single (already claimed) job and causes it to be run in an engine. Returns a boolean that signifies whether the job should be consumed. The job is consumed upon completion (unless False is returned which will signify the job should be abandoned instead). diff --git a/taskflow/conductors/single_threaded.py b/taskflow/conductors/single_threaded.py index 5deeba30..5e78e348 100644 --- a/taskflow/conductors/single_threaded.py +++ b/taskflow/conductors/single_threaded.py @@ -20,8 +20,8 @@ import six from taskflow.conductors import base from taskflow import exceptions as excp from taskflow.listeners import logging as logging_listener +from taskflow.types import timing as tt from taskflow.utils import lock_utils -from taskflow.utils import misc LOG = logging.getLogger(__name__) WAIT_TIMEOUT = 0.5 @@ -58,8 +58,8 @@ class SingleThreadedConductor(base.Conductor): if wait_timeout is None: wait_timeout = WAIT_TIMEOUT if isinstance(wait_timeout, (int, float) + six.string_types): - self._wait_timeout = misc.Timeout(float(wait_timeout)) - elif isinstance(wait_timeout, misc.Timeout): + self._wait_timeout = tt.Timeout(float(wait_timeout)) + elif isinstance(wait_timeout, tt.Timeout): self._wait_timeout = wait_timeout else: raise ValueError("Invalid timeout literal: %s" % (wait_timeout)) @@ -67,10 +67,13 @@ class SingleThreadedConductor(base.Conductor): @lock_utils.locked def stop(self, timeout=None): - """Requests the conductor to stop dispatching and returns whether the - stop request was successfully completed. If the dispatching is still - occurring then False is returned otherwise True will be returned to - signal that the conductor is no longer dispatching job requests. + """Requests the conductor to stop dispatching. + + This method can be used to request that a conductor stop its + consumption & dispatching loop. It returns whether the stop request + was successfully completed. If the dispatching is still occurring + then False is returned otherwise True will be returned to signal that + the conductor is no longer consuming & dispatching job requests. NOTE(harlowja): If a timeout is provided the dispatcher loop may not have ceased by the timeout reached (the request to cease will @@ -93,17 +96,24 @@ class SingleThreadedConductor(base.Conductor): engine.run() except excp.WrappedFailure as e: if all((f.check(*NO_CONSUME_EXCEPTIONS) for f in e)): - LOG.warn("Job execution failed (consumption being" - " skipped): %s", job, exc_info=True) consume = False - else: - LOG.warn("Job execution failed: %s", job, exc_info=True) + if LOG.isEnabledFor(logging.WARNING): + if consume: + LOG.warn("Job execution failed (consumption being" + " skipped): %s [%s failures]", job, len(e)) + else: + LOG.warn("Job execution failed (consumption" + " proceeding): %s [%s failures]", job, len(e)) + # Show the failure/s + traceback (if possible)... + for i, f in enumerate(e): + LOG.warn("%s. %s", i + 1, f.pformat(traceback=True)) except NO_CONSUME_EXCEPTIONS: LOG.warn("Job execution failed (consumption being" " skipped): %s", job, exc_info=True) consume = False except Exception: - LOG.warn("Job execution failed: %s", job, exc_info=True) + LOG.warn("Job execution failed (consumption proceeding): %s", + job, exc_info=True) else: LOG.info("Job completed successfully: %s", job) return consume diff --git a/taskflow/engines/action_engine/analyzer.py b/taskflow/engines/action_engine/analyzer.py index ef960afc..d4b181a6 100644 --- a/taskflow/engines/action_engine/analyzer.py +++ b/taskflow/engines/action_engine/analyzer.py @@ -22,11 +22,13 @@ from taskflow import states as st class Analyzer(object): - """Analyzes a compilation output to get the next atoms for execution or - reversion by utilizing the compilations underlying structures (graphs, - nodes and edge relations...) and using this information along with the - atom state/states stored in storage to provide useful analysis functions - to the rest of the runtime system. + """Analyzes a compilation and aids in execution processes. + + Its primary purpose is to get the next atoms for execution or reversion + by utilizing the compilations underlying structures (graphs, nodes and + edge relations...) and using this information along with the atom + state/states stored in storage to provide other useful functionality to + the rest of the runtime system. """ def __init__(self, compilation, storage): @@ -56,8 +58,11 @@ class Analyzer(object): return [] def browse_nodes_for_execute(self, node=None): - """Browse next nodes to execute for given node if specified and - for whole graph otherwise. + """Browse next nodes to execute. + + This returns a collection of nodes that are ready to be executed, if + given a specific node it will only examine the successors of that node, + otherwise it will examine the whole graph. """ if node: nodes = self._graph.successors(node) @@ -71,8 +76,11 @@ class Analyzer(object): return available_nodes def browse_nodes_for_revert(self, node=None): - """Browse next nodes to revert for given node if specified and - for whole graph otherwise. + """Browse next nodes to revert. + + This returns a collection of nodes that are ready to be be reverted, if + given a specific node it will only examine the predecessors of that + node, otherwise it will examine the whole graph. """ if node: nodes = self._graph.predecessors(node) @@ -87,7 +95,6 @@ class Analyzer(object): def _is_ready_for_execute(self, task): """Checks if task is ready to be executed.""" - state = self.get_state(task) intention = self._storage.get_atom_intention(task.name) transition = st.check_task_transition(state, st.RUNNING) @@ -104,7 +111,6 @@ class Analyzer(object): def _is_ready_for_revert(self, task): """Checks if task is ready to be reverted.""" - state = self.get_state(task) intention = self._storage.get_atom_intention(task.name) transition = st.check_task_transition(state, st.REVERTING) @@ -120,15 +126,14 @@ class Analyzer(object): for state, intention in six.itervalues(task_states)) def iterate_subgraph(self, retry): - """Iterates a subgraph connected to current retry controller, including - nested retry controllers and its nodes. - """ + """Iterates a subgraph connected to given retry controller.""" for _src, dst in traversal.dfs_edges(self._graph, retry): yield dst def iterate_retries(self, state=None): - """Iterates retry controllers of a graph with given state or all - retries if state is None. + """Iterates retry controllers that match the provided state. + + If no state is provided it will yield back all retry controllers. """ for node in self._graph.nodes_iter(): if isinstance(node, retry_atom.Retry): diff --git a/taskflow/engines/action_engine/compiler.py b/taskflow/engines/action_engine/compiler.py index f5c519cc..32cb58c8 100644 --- a/taskflow/engines/action_engine/compiler.py +++ b/taskflow/engines/action_engine/compiler.py @@ -42,8 +42,7 @@ class Compilation(object): class PatternCompiler(object): - """Compiles patterns & atoms (potentially nested) into an compilation - unit with a *logically* equivalent directed acyclic graph representation. + """Compiles patterns & atoms into a compilation unit. NOTE(harlowja): during this pattern translation process any nested flows will be converted into there equivalent subgraphs. This currently implies @@ -51,8 +50,8 @@ class PatternCompiler(object): be associated with there previously containing flow but instead will lose this identity and what will remain is the logical constraints that there contained flow mandated. In the future this may be changed so that this - association is not lost via the compilation process (since it is sometime - useful to retain part of this relationship). + association is not lost via the compilation process (since it can be + useful to retain this relationship). """ def compile(self, root): graph = _Flattener(root).flatten() @@ -80,9 +79,11 @@ class _Flattener(object): self._freeze = bool(freeze) def _add_new_edges(self, graph, nodes_from, nodes_to, edge_attrs): - """Adds new edges from nodes to other nodes in the specified graph, - with the following edge attributes (defaulting to the class provided - edge_data if None), if the edge does not already exist. + """Adds new edges from nodes to other nodes in the specified graph. + + It will connect the nodes_from to the nodes_to if an edge currently + does *not* exist. When an edge is created the provided edge attributes + will be applied to the new edge between these two nodes. """ nodes_to = list(nodes_to) for u in nodes_from: @@ -109,8 +110,18 @@ class _Flattener(object): elif isinstance(item, task.BaseTask): return self._flatten_task elif isinstance(item, retry.Retry): - raise TypeError("Retry controller %s (%s) is used not as a flow " - "parameter" % (item, type(item))) + if len(self._history) == 1: + raise TypeError("Retry controller: %s (%s) must only be used" + " as a flow constructor parameter and not as a" + " root component" % (item, type(item))) + else: + # TODO(harlowja): we should raise this type error earlier + # instead of later since we should do this same check on add() + # calls, this makes the error more visible (instead of waiting + # until compile time). + raise TypeError("Retry controller: %s (%s) must only be used" + " as a flow constructor parameter and not as a" + " flow added component" % (item, type(item))) else: return None diff --git a/taskflow/engines/action_engine/engine.py b/taskflow/engines/action_engine/engine.py index 8a370516..a5f587fd 100644 --- a/taskflow/engines/action_engine/engine.py +++ b/taskflow/engines/action_engine/engine.py @@ -14,24 +14,33 @@ # License for the specific language governing permissions and limitations # under the License. +import contextlib import threading from taskflow.engines.action_engine import compiler from taskflow.engines.action_engine import executor from taskflow.engines.action_engine import runtime from taskflow.engines import base - from taskflow import exceptions as exc from taskflow.openstack.common import excutils from taskflow import retry from taskflow import states from taskflow import storage as atom_storage - from taskflow.utils import lock_utils from taskflow.utils import misc from taskflow.utils import reflection +@contextlib.contextmanager +def _start_stop(executor): + # A teenie helper context manager to safely start/stop a executor... + executor.start() + try: + yield executor + finally: + executor.stop() + + class ActionEngine(base.EngineBase): """Generic action-based engine. @@ -112,31 +121,38 @@ class ActionEngine(base.EngineBase): """ self.compile() self.prepare() - self._task_executor.start() - state = None runner = self._runtime.runner - try: + last_state = None + with _start_stop(self._task_executor): self._change_state(states.RUNNING) - for state in runner.run_iter(timeout=timeout): - try: - try_suspend = yield state - except GeneratorExit: - break - else: - if try_suspend: + try: + closed = False + for (last_state, failures) in runner.run_iter(timeout=timeout): + if failures: + misc.Failure.reraise_if_any(failures) + if closed: + continue + try: + try_suspend = yield last_state + except GeneratorExit: + # The generator was closed, attempt to suspend and + # continue looping until we have cleanly closed up + # shop... + closed = True self.suspend() - except Exception: - with excutils.save_and_reraise_exception(): - self._change_state(states.FAILURE) - else: - ignorable_states = getattr(runner, 'ignorable_states', []) - if state and state not in ignorable_states: - self._change_state(state) - if state != states.SUSPENDED and state != states.SUCCESS: - failures = self.storage.get_failures() - misc.Failure.reraise_if_any(failures.values()) - finally: - self._task_executor.stop() + else: + if try_suspend: + self.suspend() + except Exception: + with excutils.save_and_reraise_exception(): + self._change_state(states.FAILURE) + else: + ignorable_states = getattr(runner, 'ignorable_states', []) + if last_state and last_state not in ignorable_states: + self._change_state(last_state) + if last_state not in [states.SUSPENDED, states.SUCCESS]: + failures = self.storage.get_failures() + misc.Failure.reraise_if_any(failures.values()) def _change_state(self, state): with self._state_lock: @@ -144,20 +160,12 @@ class ActionEngine(base.EngineBase): if not states.check_flow_transition(old_state, state): return self.storage.set_flow_state(state) - try: - flow_uuid = self._flow.uuid - except AttributeError: - # NOTE(harlowja): if the flow was just a single task, then it - # will not itself have a uuid, but the constructed flow_detail - # will. - if self._flow_detail is not None: - flow_uuid = self._flow_detail.uuid - else: - flow_uuid = None - details = dict(engine=self, - flow_name=self._flow.name, - flow_uuid=flow_uuid, - old_state=old_state) + details = { + 'engine': self, + 'flow_name': self.storage.flow_name, + 'flow_uuid': self.storage.flow_uuid, + 'old_state': old_state, + } self.notifier.notify(state, details) def _ensure_storage(self): @@ -226,9 +234,12 @@ class MultiThreadedActionEngine(ActionEngine): _storage_factory = atom_storage.MultiThreadedStorage def _task_executor_factory(self): - return executor.ParallelTaskExecutor(self._executor) + return executor.ParallelTaskExecutor(executor=self._executor, + max_workers=self._max_workers) - def __init__(self, flow, flow_detail, backend, conf, **kwargs): + def __init__(self, flow, flow_detail, backend, conf, + executor=None, max_workers=None): super(MultiThreadedActionEngine, self).__init__( flow, flow_detail, backend, conf) - self._executor = kwargs.get('executor') + self._executor = executor + self._max_workers = max_workers diff --git a/taskflow/engines/action_engine/executor.py b/taskflow/engines/action_engine/executor.py index 816060f5..b2bdbdae 100644 --- a/taskflow/engines/action_engine/executor.py +++ b/taskflow/engines/action_engine/executor.py @@ -31,11 +31,14 @@ REVERTED = 'reverted' def _execute_task(task, arguments, progress_callback): with task.autobind('update_progress', progress_callback): try: + task.pre_execute() result = task.execute(**arguments) except Exception: # NOTE(imelnikov): wrap current exception with Failure # object and return it. result = misc.Failure() + finally: + task.post_execute() return (task, EXECUTED, result) @@ -45,11 +48,14 @@ def _revert_task(task, arguments, result, failures, progress_callback): kwargs['flow_failures'] = failures with task.autobind('update_progress', progress_callback): try: + task.pre_revert() result = task.revert(**kwargs) except Exception: # NOTE(imelnikov): wrap current exception with Failure # object and return it. result = misc.Failure() + finally: + task.post_revert() return (task, REVERTED, result) @@ -105,13 +111,14 @@ class SerialTaskExecutor(TaskExecutorBase): class ParallelTaskExecutor(TaskExecutorBase): """Executes tasks in parallel. - Submits tasks to executor which should provide interface similar + Submits tasks to an executor which should provide an interface similar to concurrent.Futures.Executor. """ - def __init__(self, executor=None): + def __init__(self, executor=None, max_workers=None): self._executor = executor - self._own_executor = executor is None + self._max_workers = max_workers + self._create_executor = executor is None def execute_task(self, task, task_uuid, arguments, progress_callback=None): return self._executor.submit( @@ -127,11 +134,14 @@ class ParallelTaskExecutor(TaskExecutorBase): return async_utils.wait_for_any(fs, timeout) def start(self): - if self._own_executor: - thread_count = threading_utils.get_optimal_thread_count() - self._executor = futures.ThreadPoolExecutor(thread_count) + if self._create_executor: + if self._max_workers is not None: + max_workers = self._max_workers + else: + max_workers = threading_utils.get_optimal_thread_count() + self._executor = futures.ThreadPoolExecutor(max_workers) def stop(self): - if self._own_executor: + if self._create_executor: self._executor.shutdown(wait=True) self._executor = None diff --git a/taskflow/engines/action_engine/retry_action.py b/taskflow/engines/action_engine/retry_action.py index a1ca3abb..afdfb456 100644 --- a/taskflow/engines/action_engine/retry_action.py +++ b/taskflow/engines/action_engine/retry_action.py @@ -17,7 +17,6 @@ import logging from taskflow.engines.action_engine import executor as ex -from taskflow import exceptions from taskflow import states from taskflow.utils import async_utils from taskflow.utils import misc @@ -39,27 +38,25 @@ class RetryAction(object): return kwargs def change_state(self, retry, state, result=None): - old_state = self._storage.get_atom_state(retry.name) - if old_state == state: - return state != states.PENDING if state in SAVE_RESULT_STATES: self._storage.save(retry.name, result, state) elif state == states.REVERTED: self._storage.cleanup_retry_history(retry.name, state) else: + old_state = self._storage.get_atom_state(retry.name) + if state == old_state: + # NOTE(imelnikov): nothing really changed, so we should not + # write anything to storage and run notifications + return self._storage.set_atom_state(retry.name, state) retry_uuid = self._storage.get_atom_uuid(retry.name) details = dict(retry_name=retry.name, retry_uuid=retry_uuid, result=result) self._notifier.notify(state, details) - return True def execute(self, retry): - if not self.change_state(retry, states.RUNNING): - raise exceptions.InvalidState("Retry controller %s is in invalid " - "state and can't be executed" % - retry.name) + self.change_state(retry, states.RUNNING) kwargs = self._get_retry_args(retry) try: result = retry.execute(**kwargs) @@ -71,10 +68,7 @@ class RetryAction(object): return async_utils.make_completed_future((retry, ex.EXECUTED, result)) def revert(self, retry): - if not self.change_state(retry, states.REVERTING): - raise exceptions.InvalidState("Retry controller %s is in invalid " - "state and can't be reverted" % - retry.name) + self.change_state(retry, states.REVERTING) kwargs = self._get_retry_args(retry) kwargs['flow_failures'] = self._storage.get_failures() try: diff --git a/taskflow/engines/action_engine/runner.py b/taskflow/engines/action_engine/runner.py index 0120bd69..7a0b9c87 100644 --- a/taskflow/engines/action_engine/runner.py +++ b/taskflow/engines/action_engine/runner.py @@ -14,24 +14,199 @@ # License for the specific language governing permissions and limitations # under the License. +import logging + from taskflow import states as st +from taskflow.types import fsm from taskflow.utils import misc +# Waiting state timeout (in seconds). +_WAITING_TIMEOUT = 60 -_WAITING_TIMEOUT = 60 # in seconds +# Meta states the state machine uses. +_UNDEFINED = 'UNDEFINED' +_GAME_OVER = 'GAME_OVER' +_META_STATES = (_GAME_OVER, _UNDEFINED) + +LOG = logging.getLogger(__name__) + + +class _MachineMemory(object): + """State machine memory.""" + + def __init__(self): + self.next_nodes = set() + self.not_done = set() + self.failures = [] + self.done = set() + + +class _MachineBuilder(object): + """State machine *builder* that the runner uses. + + NOTE(harlowja): the machine states that this build will for are:: + + +--------------+-----------+------------+----------+---------+ + | Start | Event | End | On Enter | On Exit | + +--------------+-----------+------------+----------+---------+ + | ANALYZING | finished | GAME_OVER | on_enter | on_exit | + | ANALYZING | schedule | SCHEDULING | on_enter | on_exit | + | ANALYZING | wait | WAITING | on_enter | on_exit | + | FAILURE[$] | | | | | + | GAME_OVER | failed | FAILURE | on_enter | on_exit | + | GAME_OVER | reverted | REVERTED | on_enter | on_exit | + | GAME_OVER | success | SUCCESS | on_enter | on_exit | + | GAME_OVER | suspended | SUSPENDED | on_enter | on_exit | + | RESUMING | schedule | SCHEDULING | on_enter | on_exit | + | REVERTED[$] | | | | | + | SCHEDULING | wait | WAITING | on_enter | on_exit | + | SUCCESS[$] | | | | | + | SUSPENDED[$] | | | | | + | UNDEFINED[^] | start | RESUMING | on_enter | on_exit | + | WAITING | analyze | ANALYZING | on_enter | on_exit | + +--------------+-----------+------------+----------+---------+ + + Between any of these yielded states (minus ``GAME_OVER`` and ``UNDEFINED``) + if the engine has been suspended or the engine has failed (due to a + non-resolveable task failure or scheduling failure) the machine will stop + executing new tasks (currently running tasks will be allowed to complete) + and this machines run loop will be broken. + """ + + def __init__(self, runtime, waiter): + self._analyzer = runtime.analyzer + self._completer = runtime.completer + self._scheduler = runtime.scheduler + self._storage = runtime.storage + self._waiter = waiter + + def runnable(self): + return self._storage.get_flow_state() == st.RUNNING + + def build(self, timeout=None): + memory = _MachineMemory() + if timeout is None: + timeout = _WAITING_TIMEOUT + + def resume(old_state, new_state, event): + memory.next_nodes.update(self._completer.resume()) + memory.next_nodes.update(self._analyzer.get_next_nodes()) + return 'schedule' + + def game_over(old_state, new_state, event): + if memory.failures: + return 'failed' + if self._analyzer.get_next_nodes(): + return 'suspended' + elif self._analyzer.is_success(): + return 'success' + else: + return 'reverted' + + def schedule(old_state, new_state, event): + if self.runnable() and memory.next_nodes: + not_done, failures = self._scheduler.schedule( + memory.next_nodes) + if not_done: + memory.not_done.update(not_done) + if failures: + memory.failures.extend(failures) + memory.next_nodes.clear() + return 'wait' + + def wait(old_state, new_state, event): + # TODO(harlowja): maybe we should start doing 'yield from' this + # call sometime in the future, or equivalent that will work in + # py2 and py3. + if memory.not_done: + done, not_done = self._waiter.wait_for_any(memory.not_done, + timeout) + memory.done.update(done) + memory.not_done = not_done + return 'analyze' + + def analyze(old_state, new_state, event): + next_nodes = set() + while memory.done: + fut = memory.done.pop() + try: + node, event, result = fut.result() + retain = self._completer.complete(node, event, result) + if retain and isinstance(result, misc.Failure): + memory.failures.append(result) + except Exception: + memory.failures.append(misc.Failure()) + else: + try: + more_nodes = self._analyzer.get_next_nodes(node) + except Exception: + memory.failures.append(misc.Failure()) + else: + next_nodes.update(more_nodes) + if self.runnable() and next_nodes and not memory.failures: + memory.next_nodes.update(next_nodes) + return 'schedule' + elif memory.not_done: + return 'wait' + else: + return 'finished' + + def on_exit(old_state, event): + LOG.debug("Exiting old state '%s' in response to event '%s'", + old_state, event) + + def on_enter(new_state, event): + LOG.debug("Entering new state '%s' in response to event '%s'", + new_state, event) + + # NOTE(harlowja): when ran in debugging mode it is quite useful + # to track the various state transitions as they happen... + watchers = {} + if LOG.isEnabledFor(logging.DEBUG): + watchers['on_exit'] = on_exit + watchers['on_enter'] = on_enter + + m = fsm.FSM(_UNDEFINED) + m.add_state(_GAME_OVER, **watchers) + m.add_state(_UNDEFINED, **watchers) + m.add_state(st.ANALYZING, **watchers) + m.add_state(st.RESUMING, **watchers) + m.add_state(st.REVERTED, terminal=True, **watchers) + m.add_state(st.SCHEDULING, **watchers) + m.add_state(st.SUCCESS, terminal=True, **watchers) + m.add_state(st.SUSPENDED, terminal=True, **watchers) + m.add_state(st.WAITING, **watchers) + m.add_state(st.FAILURE, terminal=True, **watchers) + + m.add_transition(_GAME_OVER, st.REVERTED, 'reverted') + m.add_transition(_GAME_OVER, st.SUCCESS, 'success') + m.add_transition(_GAME_OVER, st.SUSPENDED, 'suspended') + m.add_transition(_GAME_OVER, st.FAILURE, 'failed') + m.add_transition(_UNDEFINED, st.RESUMING, 'start') + m.add_transition(st.ANALYZING, _GAME_OVER, 'finished') + m.add_transition(st.ANALYZING, st.SCHEDULING, 'schedule') + m.add_transition(st.ANALYZING, st.WAITING, 'wait') + m.add_transition(st.RESUMING, st.SCHEDULING, 'schedule') + m.add_transition(st.SCHEDULING, st.WAITING, 'wait') + m.add_transition(st.WAITING, st.ANALYZING, 'analyze') + + m.add_reaction(_GAME_OVER, 'finished', game_over) + m.add_reaction(st.ANALYZING, 'analyze', analyze) + m.add_reaction(st.RESUMING, 'start', resume) + m.add_reaction(st.SCHEDULING, 'schedule', schedule) + m.add_reaction(st.WAITING, 'wait', wait) + + return (m, memory) class Runner(object): """Runner that iterates while executing nodes using the given runtime. - This runner acts as the action engine run loop, it resumes the workflow, - schedules all task it can for execution using the runtimes scheduler and - analyzer components, and than waits on returned futures and then activates - the runtimes completion component to finish up those tasks. - - This process repeats until the analzyer runs out of next nodes, when the - scheduler can no longer schedule tasks or when the the engine has been - suspended or a task has failed and that failure could not be resolved. + This runner acts as the action engine run loop/state-machine, it resumes + the workflow, schedules all task it can for execution using the runtimes + scheduler and analyzer components, and than waits on returned futures and + then activates the runtimes completion component to finish up those tasks + and so on... NOTE(harlowja): If the runtimes scheduler component is able to schedule tasks in parallel, this enables parallel running and/or reversion. @@ -43,94 +218,22 @@ class Runner(object): ignorable_states = (st.SCHEDULING, st.WAITING, st.RESUMING, st.ANALYZING) def __init__(self, runtime, waiter): - self._scheduler = runtime.scheduler - self._completer = runtime.completer - self._storage = runtime.storage - self._analyzer = runtime.analyzer - self._waiter = waiter + self._builder = _MachineBuilder(runtime, waiter) - def is_running(self): - return self._storage.get_flow_state() == st.RUNNING + @property + def builder(self): + return self._builder + + def runnable(self): + return self._builder.runnable() def run_iter(self, timeout=None): - """Runs the nodes using the runtime components. - - NOTE(harlowja): the states that this generator will go through are: - - RESUMING -> SCHEDULING - SCHEDULING -> WAITING - WAITING -> ANALYZING - ANALYZING -> SCHEDULING - - Between any of these yielded states if the engine has been suspended - or the engine has failed (due to a non-resolveable task failure or - scheduling failure) the engine will stop executing new tasks (currently - running tasks will be allowed to complete) and this iteration loop - will be broken. - """ - if timeout is None: - timeout = _WAITING_TIMEOUT - - # Prepare flow to be resumed - yield st.RESUMING - next_nodes = self._completer.resume() - next_nodes.update(self._analyzer.get_next_nodes()) - - # Schedule nodes to be worked on - yield st.SCHEDULING - if self.is_running(): - not_done, failures = self._scheduler.schedule(next_nodes) - else: - not_done, failures = (set(), []) - - # Run! - # - # At this point we need to ensure we wait for all active nodes to - # finish running (even if we are asked to suspend) since we can not - # preempt those tasks (maybe in the future we will be better able to do - # this). - while not_done: - yield st.WAITING - - # TODO(harlowja): maybe we should start doing 'yield from' this - # call sometime in the future, or equivalent that will work in - # py2 and py3. - done, not_done = self._waiter.wait_for_any(not_done, timeout) - - # Analyze the results and schedule more nodes (unless we had - # failures). If failures occurred just continue processing what - # is running (so that we don't leave it abandoned) but do not - # schedule anything new. - yield st.ANALYZING - next_nodes = set() - for future in done: - try: - node, event, result = future.result() - retain = self._completer.complete(node, event, result) - if retain and isinstance(result, misc.Failure): - failures.append(result) - except Exception: - failures.append(misc.Failure()) + """Runs the nodes using a built state machine.""" + machine, memory = self.builder.build(timeout=timeout) + for (_prior_state, new_state) in machine.run_iter('start'): + # NOTE(harlowja): skip over meta-states. + if new_state not in _META_STATES: + if new_state == st.FAILURE: + yield (new_state, memory.failures) else: - try: - more_nodes = self._analyzer.get_next_nodes(node) - except Exception: - failures.append(misc.Failure()) - else: - next_nodes.update(more_nodes) - if next_nodes and not failures and self.is_running(): - yield st.SCHEDULING - # Recheck incase someone suspended it. - if self.is_running(): - more_not_done, failures = self._scheduler.schedule( - next_nodes) - not_done.update(more_not_done) - - if failures: - misc.Failure.reraise_if_any(failures) - if self._analyzer.get_next_nodes(): - yield st.SUSPENDED - elif self._analyzer.is_success(): - yield st.SUCCESS - else: - yield st.REVERTED + yield (new_state, []) diff --git a/taskflow/engines/action_engine/runtime.py b/taskflow/engines/action_engine/runtime.py index 40e66453..90913b99 100644 --- a/taskflow/engines/action_engine/runtime.py +++ b/taskflow/engines/action_engine/runtime.py @@ -14,23 +14,24 @@ # License for the specific language governing permissions and limitations # under the License. +from taskflow.engines.action_engine import analyzer as ca +from taskflow.engines.action_engine import executor as ex +from taskflow.engines.action_engine import retry_action as ra +from taskflow.engines.action_engine import runner as ru +from taskflow.engines.action_engine import task_action as ta from taskflow import exceptions as excp from taskflow import retry as retry_atom from taskflow import states as st from taskflow import task as task_atom from taskflow.utils import misc -from taskflow.engines.action_engine import analyzer as ca -from taskflow.engines.action_engine import executor as ex -from taskflow.engines.action_engine import retry_action as ra -from taskflow.engines.action_engine import runner as ru -from taskflow.engines.action_engine import task_action as ta - class Runtime(object): - """An object that contains various utility methods and properties that - represent the collection of runtime components and functionality needed - for an action engine to run to completion. + """A aggregate of runtime objects, properties, ... used during execution. + + This object contains various utility methods and properties that represent + the collection of runtime components and functionality needed for an + action engine to run to completion. """ def __init__(self, compilation, storage, task_notifier, task_executor): @@ -155,8 +156,13 @@ class Completer(object): return False def _process_atom_failure(self, atom, failure): - """On atom failure find its retry controller, ask for the action to - perform with failed subflow and set proper intention for subflow nodes. + """Processes atom failure & applies resolution strategies. + + On atom failure this will find the atoms associated retry controller + and ask that controller for the strategy to perform to resolve that + failure. After getting a resolution strategy decision this method will + then adjust the needed other atoms intentions, and states, ... so that + the failure can be worked around. """ retry = self._analyzer.find_atom_retry(atom) if retry: @@ -195,6 +201,9 @@ class Scheduler(object): def _schedule_node(self, node): """Schedule a single node for execution.""" + # TODO(harlowja): we need to rework this so that we aren't doing type + # checking here, type checking usually means something isn't done right + # and usually will limit extensibility in the future. if isinstance(node, task_atom.BaseTask): return self._schedule_task(node) elif isinstance(node, retry_atom.Retry): @@ -204,8 +213,10 @@ class Scheduler(object): % (node, type(node))) def _schedule_retry(self, retry): - """Schedules the given retry for revert or execute depending - on its intention. + """Schedules the given retry atom for *future* completion. + + Depending on the atoms stored intention this may schedule the retry + atom for reversion or execution. """ intention = self._storage.get_atom_intention(retry.name) if intention == st.EXECUTE: @@ -221,8 +232,10 @@ class Scheduler(object): " intention: %s" % intention) def _schedule_task(self, task): - """Schedules the given task for revert or execute depending - on its intention. + """Schedules the given task atom for *future* completion. + + Depending on the atoms stored intention this may schedule the task + atom for reversion or execution. """ intention = self._storage.get_atom_intention(task.name) if intention == st.EXECUTE: diff --git a/taskflow/engines/action_engine/task_action.py b/taskflow/engines/action_engine/task_action.py index c0d1daa5..a07ded79 100644 --- a/taskflow/engines/action_engine/task_action.py +++ b/taskflow/engines/action_engine/task_action.py @@ -16,7 +16,6 @@ import logging -from taskflow import exceptions from taskflow import states from taskflow.utils import misc @@ -32,10 +31,30 @@ class TaskAction(object): self._task_executor = task_executor self._notifier = notifier - def change_state(self, task, state, result=None, progress=None): + def _is_identity_transition(self, state, task, progress): + if state in SAVE_RESULT_STATES: + # saving result is never identity transition + return False old_state = self._storage.get_atom_state(task.name) - if old_state == state: - return state != states.PENDING + if state != old_state: + # changing state is not identity transition by definition + return False + # NOTE(imelnikov): last thing to check is that the progress has + # changed, which means progress is not None and is different from + # what is stored in the database. + if progress is None: + return False + old_progress = self._storage.get_task_progress(task.name) + if old_progress != progress: + return False + return True + + def change_state(self, task, state, result=None, progress=None): + if self._is_identity_transition(state, task, progress): + # NOTE(imelnikov): ignore identity transitions in order + # to avoid extra write to storage backend and, what's + # more important, extra notifications + return if state in SAVE_RESULT_STATES: self._storage.save(task.name, result, state) else: @@ -49,7 +68,6 @@ class TaskAction(object): self._notifier.notify(state, details) if progress is not None: task.update_progress(progress) - return True def _on_update_progress(self, task, event_data, progress, **kwargs): """Should be called when task updates its progress.""" @@ -62,9 +80,7 @@ class TaskAction(object): task, progress) def schedule_execution(self, task): - if not self.change_state(task, states.RUNNING, progress=0.0): - raise exceptions.InvalidState("Task %s is in invalid state and" - " can't be executed" % task.name) + self.change_state(task, states.RUNNING, progress=0.0) kwargs = self._storage.fetch_mapped_args(task.rebind, atom_name=task.name) task_uuid = self._storage.get_atom_uuid(task.name) @@ -79,9 +95,7 @@ class TaskAction(object): result=result, progress=1.0) def schedule_reversion(self, task): - if not self.change_state(task, states.REVERTING, progress=0.0): - raise exceptions.InvalidState("Task %s is in invalid state and" - " can't be reverted" % task.name) + self.change_state(task, states.REVERTING, progress=0.0) kwargs = self._storage.fetch_mapped_args(task.rebind, atom_name=task.name) task_uuid = self._storage.get_atom_uuid(task.name) diff --git a/taskflow/engines/base.py b/taskflow/engines/base.py index 9255a3da..4bfcbabc 100644 --- a/taskflow/engines/base.py +++ b/taskflow/engines/base.py @@ -54,9 +54,12 @@ class EngineBase(object): @abc.abstractmethod def compile(self): - """Compiles the contained flow into a structure which the engine can - use to run or if this can not be done then an exception is thrown - indicating why this compilation could not be achieved. + """Compiles the contained flow into a internal representation. + + This internal representation is what the engine will *actually* use to + run. If this compilation can not be accomplished then an exception + is expected to be thrown with a message indicating why the compilation + could not be achieved. """ @abc.abstractmethod diff --git a/taskflow/engines/helpers.py b/taskflow/engines/helpers.py index 0fb7a518..c200df8a 100644 --- a/taskflow/engines/helpers.py +++ b/taskflow/engines/helpers.py @@ -31,15 +31,23 @@ from taskflow.utils import reflection ENGINES_NAMESPACE = 'taskflow.engines' +def _fetch_factory(factory_name): + try: + return importutils.import_class(factory_name) + except (ImportError, ValueError) as e: + raise ImportError("Could not import factory %r: %s" + % (factory_name, e)) + + def _fetch_validate_factory(flow_factory): if isinstance(flow_factory, six.string_types): - factory_fun = importutils.import_class(flow_factory) + factory_fun = _fetch_factory(flow_factory) factory_name = flow_factory else: factory_fun = flow_factory factory_name = reflection.get_callable_name(flow_factory) try: - reimported = importutils.import_class(factory_name) + reimported = _fetch_factory(factory_name) assert reimported == factory_fun except (ImportError, AssertionError): raise ValueError('Flow factory %r is not reimportable by name %s' @@ -50,7 +58,7 @@ def _fetch_validate_factory(flow_factory): def load(flow, store=None, flow_detail=None, book=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, **kwargs): - """Load flow into engine. + """Load a flow into an engine. This function creates and prepares engine to run the flow. All that is left is to run the engine with 'run()' method. @@ -151,8 +159,7 @@ def run(flow, store=None, flow_detail=None, book=None, def save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs, backend=None): - """Saves the given factories reimportable name, args, kwargs into the - flow detail. + """Saves the given factories reimportable attributes into the flow detail. This function saves the factory name, arguments, and keyword arguments into the given flow details object and if a backend is provided it will @@ -227,9 +234,11 @@ def load_from_factory(flow_factory, factory_args=None, factory_kwargs=None, def flow_from_detail(flow_detail): - """Recreate flow previously loaded with load_form_factory. + """Reloads a flow previously saved. - Gets flow factory name from metadata, calls it to recreate the flow. + Gets the flow factories name and any arguments and keyword arguments from + the flow details metadata, and then calls that factory to recreate the + flow. :param flow_detail: FlowDetail that holds state of the flow to load """ @@ -241,7 +250,7 @@ def flow_from_detail(flow_detail): % (flow_detail.name, flow_detail.uuid)) try: - factory_fun = importutils.import_class(factory_data['name']) + factory_fun = _fetch_factory(factory_data['name']) except (KeyError, ImportError): raise ImportError('Could not import factory for flow %s %s' % (flow_detail.name, flow_detail.uuid)) @@ -253,10 +262,10 @@ def flow_from_detail(flow_detail): def load_from_detail(flow_detail, store=None, engine_conf=None, backend=None, namespace=ENGINES_NAMESPACE, **kwargs): - """Reload flow previously loaded with load_form_factory function. + """Reloads an engine previously saved. - Gets flow factory name from metadata, calls it to recreate the flow - and loads flow into engine with load(). + This reloads the flow using the flow_from_detail() function and then calls + into the load() function to create an engine from that flow. :param flow_detail: FlowDetail that holds state of the flow to load :param store: dict -- data to put to storage to satisfy flow requirements diff --git a/taskflow/engines/worker_based/cache.py b/taskflow/engines/worker_based/cache.py index f92bf23e..9da7f12c 100644 --- a/taskflow/engines/worker_based/cache.py +++ b/taskflow/engines/worker_based/cache.py @@ -14,54 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. -import logging import random import six from taskflow.engines.worker_based import protocol as pr -from taskflow.utils import lock_utils as lu - -LOG = logging.getLogger(__name__) +from taskflow.types import cache as base -class Cache(object): - """Represents thread-safe cache.""" - - def __init__(self): - self._data = {} - self._lock = lu.ReaderWriterLock() - - def get(self, key): - """Retrieve a value from the cache.""" - with self._lock.read_lock(): - return self._data.get(key) - - def set(self, key, value): - """Set a value in the cache.""" - with self._lock.write_lock(): - self._data[key] = value - LOG.debug("Cache updated. Capacity: %s", len(self._data)) - - def delete(self, key): - """Delete a value from the cache.""" - with self._lock.write_lock(): - self._data.pop(key, None) - - def cleanup(self, on_expired_callback=None): - """Delete out-dated values from the cache.""" - with self._lock.write_lock(): - expired_values = [(k, v) for k, v in six.iteritems(self._data) - if v.expired] - for (k, _v) in expired_values: - self._data.pop(k, None) - if on_expired_callback: - for (_k, v) in expired_values: - on_expired_callback(v) - - -class RequestsCache(Cache): - """Represents thread-safe requests cache.""" +class RequestsCache(base.ExpiringCache): + """Represents a thread-safe requests cache.""" def get_waiting_requests(self, tasks): """Get list of waiting requests by tasks.""" @@ -73,8 +35,8 @@ class RequestsCache(Cache): return waiting_requests -class WorkersCache(Cache): - """Represents thread-safe workers cache.""" +class WorkersCache(base.ExpiringCache): + """Represents a thread-safe workers cache.""" def get_topic_by_task(self, task): """Get topic for a given task.""" diff --git a/taskflow/engines/worker_based/dispatcher.py b/taskflow/engines/worker_based/dispatcher.py new file mode 100644 index 00000000..9ff8ac10 --- /dev/null +++ b/taskflow/engines/worker_based/dispatcher.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from kombu import exceptions as kombu_exc +import six + +from taskflow import exceptions as excp + +LOG = logging.getLogger(__name__) + + +class TypeDispatcher(object): + """Receives messages and dispatches to type specific handlers.""" + + def __init__(self, type_handlers): + self._handlers = dict(type_handlers) + self._requeue_filters = [] + + def add_requeue_filter(self, callback): + """Add a callback that can *request* message requeuing. + + The callback will be activated before the message has been acked and + it can be used to instruct the dispatcher to requeue the message + instead of processing it. + """ + assert six.callable(callback), "Callback must be callable" + self._requeue_filters.append(callback) + + def _collect_requeue_votes(self, data, message): + # Returns how many of the filters asked for the message to be requeued. + requeue_votes = 0 + for f in self._requeue_filters: + try: + if f(data, message): + requeue_votes += 1 + except Exception: + LOG.exception("Failed calling requeue filter to determine" + " if message %r should be requeued.", + message.delivery_tag) + return requeue_votes + + def _requeue_log_error(self, message, errors): + # TODO(harlowja): Remove when http://github.com/celery/kombu/pull/372 + # is merged and a version is released with this change... + try: + message.requeue() + except errors as exc: + # This was taken from how kombu is formatting its messages + # when its reject_log_error or ack_log_error functions are + # used so that we have a similar error format for requeuing. + LOG.critical("Couldn't requeue %r, reason:%r", + message.delivery_tag, exc, exc_info=True) + else: + LOG.debug("AMQP message %r requeued.", message.delivery_tag) + + def _process_message(self, data, message, message_type): + handler = self._handlers.get(message_type) + if handler is None: + message.reject_log_error(logger=LOG, + errors=(kombu_exc.MessageStateError,)) + LOG.warning("Unexpected message type: '%s' in message" + " %r", message_type, message.delivery_tag) + else: + if isinstance(handler, (tuple, list)): + handler, validator = handler + try: + validator(data) + except excp.InvalidFormat as e: + message.reject_log_error( + logger=LOG, errors=(kombu_exc.MessageStateError,)) + LOG.warn("Message: %r, '%s' was rejected due to it being" + " in an invalid format: %s", + message.delivery_tag, message_type, e) + return + message.ack_log_error(logger=LOG, + errors=(kombu_exc.MessageStateError,)) + if message.acknowledged: + LOG.debug("AMQP message %r acknowledged.", + message.delivery_tag) + handler(data, message) + + def on_message(self, data, message): + """This method is called on incoming messages.""" + LOG.debug("Got message: %r", message.delivery_tag) + if self._collect_requeue_votes(data, message): + self._requeue_log_error(message, + errors=(kombu_exc.MessageStateError,)) + else: + try: + message_type = message.properties['type'] + except KeyError: + message.reject_log_error( + logger=LOG, errors=(kombu_exc.MessageStateError,)) + LOG.warning("The 'type' message property is missing" + " in message %r", message.delivery_tag) + else: + self._process_message(data, message, message_type) diff --git a/taskflow/engines/worker_based/executor.py b/taskflow/engines/worker_based/executor.py index 98ff3a8d..f26cad7c 100644 --- a/taskflow/engines/worker_based/executor.py +++ b/taskflow/engines/worker_based/executor.py @@ -14,15 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. +import functools import logging -from kombu import exceptions as kombu_exc - from taskflow.engines.action_engine import executor from taskflow.engines.worker_based import cache from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import proxy from taskflow import exceptions as exc +from taskflow.openstack.common import timeutils +from taskflow.types import timing as tt from taskflow.utils import async_utils from taskflow.utils import misc from taskflow.utils import reflection @@ -74,48 +75,42 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._topics = topics self._requests_cache = cache.RequestsCache() self._workers_cache = cache.WorkersCache() - self._proxy = proxy.Proxy(uuid, exchange, self._on_message, + self._workers_arrival = threading.Condition() + handlers = { + pr.NOTIFY: [ + self._process_notify, + functools.partial(pr.Notify.validate, response=True), + ], + pr.RESPONSE: [ + self._process_response, + pr.Response.validate, + ], + } + self._proxy = proxy.Proxy(uuid, exchange, handlers, self._on_wait, **kwargs) self._proxy_thread = None - self._periodic = PeriodicWorker(misc.Timeout(pr.NOTIFY_PERIOD), + self._periodic = PeriodicWorker(tt.Timeout(pr.NOTIFY_PERIOD), [self._notify_topics]) self._periodic_thread = None - def _on_message(self, data, message): - """This method is called on incoming message.""" - LOG.debug("Got message: %s", data) - try: - # acknowledge message before processing - message.ack() - except kombu_exc.MessageStateError: - LOG.exception("Failed to acknowledge AMQP message.") - else: - LOG.debug("AMQP message acknowledged.") - try: - msg_type = message.properties['type'] - except KeyError: - LOG.warning("The 'type' message property is missing.") - else: - if msg_type == pr.NOTIFY: - self._process_notify(data) - elif msg_type == pr.RESPONSE: - self._process_response(data, message) - else: - LOG.warning("Unexpected message type: %s", msg_type) - - def _process_notify(self, notify): + def _process_notify(self, notify, message): """Process notify message from remote side.""" LOG.debug("Start processing notify message.") topic = notify['topic'] tasks = notify['tasks'] # add worker info to the cache - self._workers_cache.set(topic, tasks) + self._workers_arrival.acquire() + try: + self._workers_cache[topic] = tasks + self._workers_arrival.notify_all() + finally: + self._workers_arrival.release() # publish waiting requests for request in self._requests_cache.get_waiting_requests(tasks): - request.set_pending() - self._publish_request(request, topic) + if request.transition_and_log_error(pr.PENDING, logger=LOG): + self._publish_request(request, topic) def _process_response(self, response, message): """Process response from remote side.""" @@ -125,20 +120,23 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): except KeyError: LOG.warning("The 'correlation_id' message property is missing.") else: - LOG.debug("Task uuid: '%s'", task_uuid) request = self._requests_cache.get(task_uuid) if request is not None: response = pr.Response.from_dict(response) if response.state == pr.RUNNING: - request.set_running() + request.transition_and_log_error(pr.RUNNING, logger=LOG) elif response.state == pr.PROGRESS: request.on_progress(**response.data) elif response.state in (pr.FAILURE, pr.SUCCESS): - # NOTE(imelnikov): request should not be in cache when - # another thread can see its result and schedule another - # request with same uuid; so we remove it, then set result - self._requests_cache.delete(request.uuid) - request.set_result(**response.data) + moved = request.transition_and_log_error(response.state, + logger=LOG) + if moved: + # NOTE(imelnikov): request should not be in the + # cache when another thread can see its result and + # schedule another request with the same uuid; so + # we remove it, then set the result... + del self._requests_cache[request.uuid] + request.set_result(**response.data) else: LOG.warning("Unexpected response status: '%s'", response.state) @@ -152,10 +150,21 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): When request has expired it is removed from the requests cache and the `RequestTimeout` exception is set as a request result. """ - LOG.debug("Request '%r' has expired.", request) - LOG.debug("The '%r' request has expired.", request) - request.set_result(misc.Failure.from_exception( - exc.RequestTimeout("The '%r' request has expired" % request))) + if request.transition_and_log_error(pr.FAILURE, logger=LOG): + # Raise an exception (and then catch it) so we get a nice + # traceback that the request will get instead of it getting + # just an exception with no traceback... + try: + request_age = timeutils.delta_seconds(request.created_on, + timeutils.utcnow()) + raise exc.RequestTimeout( + "Request '%s' has expired after waiting for %0.2f" + " seconds for it to transition out of (%s) states" + % (request, request_age, ", ".join(pr.WAITING_STATES))) + except exc.RequestTimeout: + with misc.capture_failure() as fail: + LOG.debug(fail.exception_str) + request.set_result(fail) def _on_wait(self): """This function is called cyclically between draining events.""" @@ -174,11 +183,11 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): # before putting it into the requests cache to prevent the notify # processing thread get list of waiting requests and publish it # before it is published here, so it wouldn't be published twice. - request.set_pending() - self._requests_cache.set(request.uuid, request) - self._publish_request(request, topic) + if request.transition_and_log_error(pr.PENDING, logger=LOG): + self._requests_cache[request.uuid] = request + self._publish_request(request, topic) else: - self._requests_cache.set(request.uuid, request) + self._requests_cache[request.uuid] = request return request.result @@ -191,10 +200,10 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): correlation_id=request.uuid) except Exception: with misc.capture_failure() as failure: - LOG.exception("Failed to submit the '%s' request." % - request) - self._requests_cache.delete(request.uuid) - request.set_result(failure) + LOG.exception("Failed to submit the '%s' request.", request) + if request.transition_and_log_error(pr.FAILURE, logger=LOG): + del self._requests_cache[request.uuid] + request.set_result(failure) def _notify_topics(self): """Cyclically called to publish notify message to each topic.""" @@ -215,8 +224,35 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): """Wait for futures returned by this executor to complete.""" return async_utils.wait_for_any(fs, timeout) + def wait_for_workers(self, workers=1, timeout=None): + """Waits for geq workers to notify they are ready to do work. + + NOTE(harlowja): if a timeout is provided this function will wait + until that timeout expires, if the amount of workers does not reach + the desired amount of workers before the timeout expires then this will + return how many workers are still needed, otherwise it will + return zero. + """ + if workers <= 0: + raise ValueError("Worker amount must be greater than zero") + w = None + if timeout is not None: + w = tt.StopWatch(timeout).start() + self._workers_arrival.acquire() + try: + while len(self._workers_cache) < workers: + if w is not None and w.expired(): + return workers - len(self._workers_cache) + timeout = None + if w is not None: + timeout = w.leftover() + self._workers_arrival.wait(timeout) + return 0 + finally: + self._workers_arrival.release() + def start(self): - """Start proxy thread (and associated topic notification thread).""" + """Starts proxy thread and associated topic notification thread.""" if not _is_alive(self._proxy_thread): self._proxy_thread = tu.daemon_thread(self._proxy.start) self._proxy_thread.start() @@ -227,9 +263,7 @@ class WorkerTaskExecutor(executor.TaskExecutorBase): self._periodic_thread.start() def stop(self): - """Stop proxy thread (and associated topic notification thread), so - those threads will be gracefully terminated. - """ + """Stops proxy thread and associated topic notification thread.""" if self._periodic_thread is not None: self._periodic.stop() self._periodic_thread.join() diff --git a/taskflow/engines/worker_based/protocol.py b/taskflow/engines/worker_based/protocol.py index 70859053..6e54f9fb 100644 --- a/taskflow/engines/worker_based/protocol.py +++ b/taskflow/engines/worker_based/protocol.py @@ -15,16 +15,24 @@ # under the License. import abc - -import six +import logging +import threading from concurrent import futures +import jsonschema +from jsonschema import exceptions as schema_exc +import six from taskflow.engines.action_engine import executor +from taskflow import exceptions as excp +from taskflow.openstack.common import timeutils +from taskflow.types import timing as tt +from taskflow.utils import lock_utils from taskflow.utils import misc from taskflow.utils import reflection -# NOTE(skudriashev): This is protocol events, not related to the task states. +# NOTE(skudriashev): This is protocol states and events, which are not +# related to task states. WAITING = 'WAITING' PENDING = 'PENDING' RUNNING = 'RUNNING' @@ -32,6 +40,35 @@ SUCCESS = 'SUCCESS' FAILURE = 'FAILURE' PROGRESS = 'PROGRESS' +# During these states the expiry is active (once out of these states the expiry +# no longer matters, since we have no way of knowing how long a task will run +# for). +WAITING_STATES = (WAITING, PENDING) + +_ALL_STATES = (WAITING, PENDING, RUNNING, SUCCESS, FAILURE, PROGRESS) +_STOP_TIMER_STATES = (RUNNING, SUCCESS, FAILURE) + +# Transitions that a request state can go through. +_ALLOWED_TRANSITIONS = ( + # Used when a executor starts to publish a request to a selected worker. + (WAITING, PENDING), + # When a request expires (isn't able to be processed by any worker). + (WAITING, FAILURE), + # Worker has started executing a request. + (PENDING, RUNNING), + # Worker failed to construct/process a request to run (either the worker + # did not transition to RUNNING in the given timeout or the worker itself + # had some type of failure before RUNNING started). + # + # Also used by the executor if the request was attempted to be published + # but that did publishing process did not work out. + (PENDING, FAILURE), + # Execution failed due to some type of remote failure. + (RUNNING, FAILURE), + # Execution succeeded & has completed. + (RUNNING, SUCCESS), +) + # Remote task actions. EXECUTE = 'execute' REVERT = 'revert' @@ -61,6 +98,14 @@ NOTIFY = 'NOTIFY' REQUEST = 'REQUEST' RESPONSE = 'RESPONSE' +# Special jsonschema validation types/adjustments. +_SCHEMA_TYPES = { + # See: https://github.com/Julian/jsonschema/issues/148 + 'array': (list, tuple), +} + +LOG = logging.getLogger(__name__) + @six.add_metaclass(abc.ABCMeta) class Message(object): @@ -78,18 +123,101 @@ class Notify(Message): """Represents notify message type.""" TYPE = NOTIFY + # NOTE(harlowja): the executor (the entity who initially requests a worker + # to send back a notification response) schema is different than the + # worker response schema (that's why there are two schemas here). + _RESPONSE_SCHEMA = { + "type": "object", + 'properties': { + 'topic': { + "type": "string", + }, + 'tasks': { + "type": "array", + "items": { + "type": "string", + }, + } + }, + "required": ["topic", 'tasks'], + "additionalProperties": False, + } + _SENDER_SCHEMA = { + "type": "object", + "additionalProperties": False, + } + def __init__(self, **data): self._data = data def to_dict(self): return self._data + @classmethod + def validate(cls, data, response): + if response: + schema = cls._RESPONSE_SCHEMA + else: + schema = cls._SENDER_SCHEMA + try: + jsonschema.validate(data, schema, types=_SCHEMA_TYPES) + except schema_exc.ValidationError as e: + if response: + raise excp.InvalidFormat("%s message response data not of the" + " expected format: %s" + % (cls.TYPE, e.message), e) + else: + raise excp.InvalidFormat("%s message sender data not of the" + " expected format: %s" + % (cls.TYPE, e.message), e) + class Request(Message): - """Represents request with execution results. Every request is created in - the WAITING state and is expired within the given timeout. + """Represents request with execution results. + + Every request is created in the WAITING state and is expired within the + given timeout if it does not transition out of the (WAITING, PENDING) + states. """ + TYPE = REQUEST + _SCHEMA = { + "type": "object", + 'properties': { + # These two are typically only sent on revert actions (that is + # why are are not including them in the required section). + 'result': {}, + 'failures': { + "type": "object", + }, + 'task_cls': { + 'type': 'string', + }, + 'task_name': { + 'type': 'string', + }, + 'task_version': { + "oneOf": [ + { + "type": "string", + }, + { + "type": "array", + }, + ], + }, + 'action': { + "type": "string", + "enum": list(six.iterkeys(ACTION_TO_EVENT)), + }, + # Keyword arguments that end up in the revert() or execute() + # method of the remote task. + 'arguments': { + "type": "object", + }, + }, + 'required': ['task_cls', 'task_name', 'task_version', 'action'], + } def __init__(self, task, uuid, action, arguments, progress_callback, timeout, **kwargs): @@ -101,13 +229,12 @@ class Request(Message): self._arguments = arguments self._progress_callback = progress_callback self._kwargs = kwargs - self._watch = misc.StopWatch(duration=timeout).start() + self._watch = tt.StopWatch(duration=timeout).start() self._state = WAITING + self._lock = threading.Lock() + self._created_on = timeutils.utcnow() self.result = futures.Future() - def __repr__(self): - return "%s:%s" % (self._task_cls, self._action) - @property def uuid(self): return self._uuid @@ -120,6 +247,10 @@ class Request(Message): def state(self): return self._state + @property + def created_on(self): + return self._created_on + @property def expired(self): """Check if request has expired. @@ -131,13 +262,16 @@ class Request(Message): state for more then the given timeout (it is not considered to be expired in any other state). """ - if self._state in (WAITING, PENDING): + if self._state in WAITING_STATES: return self._watch.expired() return False def to_dict(self): - """Return json-serializable request, converting all `misc.Failure` - objects into dictionaries. + """Return json-serializable request. + + To convert requests that have failed due to some exception this will + convert all `misc.Failure` objects into dictionaries (which will then + be reconstituted by the receiver). """ request = dict(task_cls=self._task_cls, task_name=self._task.name, task_version=self._task.version, action=self._action, @@ -158,20 +292,121 @@ class Request(Message): def set_result(self, result): self.result.set_result((self._task, self._event, result)) - def set_pending(self): - self._state = PENDING - - def set_running(self): - self._state = RUNNING - self._watch.stop() - def on_progress(self, event_data, progress): self._progress_callback(self._task, event_data, progress) + def transition_and_log_error(self, new_state, logger=None): + """Transitions *and* logs an error if that transitioning raises. + + This overlays the transition function and performs nearly the same + functionality but instead of raising if the transition was not valid + it logs a warning to the provided logger and returns False to + indicate that the transition was not performed (note that this + is *different* from the transition function where False means + ignored). + """ + if logger is None: + logger = LOG + moved = False + try: + moved = self.transition(new_state) + except excp.InvalidState: + logger.warn("Failed to transition '%s' to %s state.", self, + new_state, exc_info=True) + return moved + + @lock_utils.locked + def transition(self, new_state): + """Transitions the request to a new state. + + If transition was performed, it returns True. If transition + should was ignored, it returns False. If transition was not + valid (and will not be performed), it raises an InvalidState + exception. + """ + old_state = self._state + if old_state == new_state: + return False + pair = (old_state, new_state) + if pair not in _ALLOWED_TRANSITIONS: + raise excp.InvalidState("Request transition from %s to %s is" + " not allowed" % pair) + if new_state in _STOP_TIMER_STATES: + self._watch.stop() + self._state = new_state + LOG.debug("Transitioned '%s' from %s state to %s state", self, + old_state, new_state) + return True + + @classmethod + def validate(cls, data): + try: + jsonschema.validate(data, cls._SCHEMA, types=_SCHEMA_TYPES) + except schema_exc.ValidationError as e: + raise excp.InvalidFormat("%s message response data not of the" + " expected format: %s" + % (cls.TYPE, e.message), e) + class Response(Message): """Represents response message type.""" TYPE = RESPONSE + _SCHEMA = { + "type": "object", + 'properties': { + 'state': { + "type": "string", + "enum": list(_ALL_STATES), + }, + 'data': { + "anyOf": [ + { + "$ref": "#/definitions/progress", + }, + { + "$ref": "#/definitions/completion", + }, + { + "$ref": "#/definitions/empty", + }, + ], + }, + }, + "required": ["state", 'data'], + "additionalProperties": False, + "definitions": { + "progress": { + "type": "object", + "properties": { + 'progress': { + 'type': 'number', + }, + 'event_data': { + 'type': 'object', + }, + }, + "required": ["progress", 'event_data'], + "additionalProperties": False, + }, + # Used when sending *only* request state changes (and no data is + # expected). + "empty": { + "type": "object", + "additionalProperties": False, + }, + "completion": { + "type": "object", + "properties": { + # This can be any arbitrary type that a task returns, so + # thats why we can't be strict about what type it is since + # any of the json serializable types are allowed. + "result": {}, + }, + "required": ["result"], + "additionalProperties": False, + }, + }, + } def __init__(self, state, **data): self._state = state @@ -195,3 +430,12 @@ class Response(Message): def to_dict(self): return dict(state=self._state, data=self._data) + + @classmethod + def validate(cls, data): + try: + jsonschema.validate(data, cls._SCHEMA, types=_SCHEMA_TYPES) + except schema_exc.ValidationError as e: + raise excp.InvalidFormat("%s message response data not of the" + " expected format: %s" + % (cls.TYPE, e.message), e) diff --git a/taskflow/engines/worker_based/proxy.py b/taskflow/engines/worker_based/proxy.py index 4d5282ee..d2991ca3 100644 --- a/taskflow/engines/worker_based/proxy.py +++ b/taskflow/engines/worker_based/proxy.py @@ -14,13 +14,16 @@ # License for the specific language governing permissions and limitations # under the License. -import kombu import logging import socket import threading +import kombu import six +from taskflow.engines.worker_based import dispatcher +from taskflow.utils import misc + LOG = logging.getLogger(__name__) # NOTE(skudriashev): A timeout of 1 is often used in environments where @@ -29,39 +32,56 @@ DRAIN_EVENTS_PERIOD = 1 class Proxy(object): - """Proxy picks up messages from the named exchange, calls on_message - callback when new message received and is used to publish messages. - """ + """A proxy processes messages from/to the named exchange.""" - def __init__(self, topic, exchange_name, on_message, on_wait=None, + def __init__(self, topic, exchange_name, type_handlers, on_wait=None, **kwargs): self._topic = topic self._exchange_name = exchange_name - self._on_message = on_message self._on_wait = on_wait self._running = threading.Event() - self._url = kwargs.get('url') - self._transport = kwargs.get('transport') - self._transport_opts = kwargs.get('transport_options') + self._dispatcher = dispatcher.TypeDispatcher(type_handlers) + self._dispatcher.add_requeue_filter( + # NOTE(skudriashev): Process all incoming messages only if proxy is + # running, otherwise requeue them. + lambda data, message: not self.is_running) + + url = kwargs.get('url') + transport = kwargs.get('transport') + transport_opts = kwargs.get('transport_options') self._drain_events_timeout = DRAIN_EVENTS_PERIOD - if self._transport == 'memory' and self._transport_opts: - polling_interval = self._transport_opts.get('polling_interval') - if polling_interval: + if transport == 'memory' and transport_opts: + polling_interval = transport_opts.get('polling_interval') + if polling_interval is not None: self._drain_events_timeout = polling_interval # create connection - self._conn = kombu.Connection(self._url, transport=self._transport, - transport_options=self._transport_opts) + self._conn = kombu.Connection(url, transport=transport, + transport_options=transport_opts) # create exchange self._exchange = kombu.Exchange(name=self._exchange_name, durable=False, auto_delete=True) + @property + def connection_details(self): + # The kombu drivers seem to use 'N/A' when they don't have a version... + driver_version = self._conn.transport.driver_version() + if driver_version and driver_version.lower() == 'n/a': + driver_version = None + return misc.AttrDict( + uri=self._conn.as_uri(include_password=False), + transport=misc.AttrDict( + options=dict(self._conn.transport_options), + driver_type=self._conn.transport.driver_type, + driver_name=self._conn.transport.driver_name, + driver_version=driver_version)) + @property def is_running(self): - """Return whether proxy is running.""" + """Return whether the proxy is running.""" return self._running.is_set() def _make_queue(self, name, exchange, **kwargs): @@ -74,7 +94,7 @@ class Proxy(object): **kwargs) def publish(self, msg, routing_key, **kwargs): - """Publish message to the named exchange with routing key.""" + """Publish message to the named exchange with given routing key.""" LOG.debug("Sending %s", msg) if isinstance(routing_key, six.string_types): routing_keys = [routing_key] @@ -97,7 +117,7 @@ class Proxy(object): with kombu.connections[self._conn].acquire(block=True) as conn: queue = self._make_queue(self._topic, self._exchange, channel=conn) with conn.Consumer(queues=queue, - callbacks=[self._on_message]): + callbacks=[self._dispatcher.on_message]): self._running.set() while self.is_running: try: diff --git a/taskflow/engines/worker_based/server.py b/taskflow/engines/worker_based/server.py index 02f56647..73625865 100644 --- a/taskflow/engines/worker_based/server.py +++ b/taskflow/engines/worker_based/server.py @@ -17,7 +17,7 @@ import functools import logging -from kombu import exceptions as kombu_exc +import six from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import proxy @@ -26,60 +26,52 @@ from taskflow.utils import misc LOG = logging.getLogger(__name__) +def delayed(executor): + """Wraps & runs the function using a futures compatible executor.""" + + def decorator(f): + + @six.wraps(f) + def wrapper(*args, **kwargs): + return executor.submit(f, *args, **kwargs) + + return wrapper + + return decorator + + class Server(object): """Server implementation that waits for incoming tasks requests.""" def __init__(self, topic, exchange, executor, endpoints, **kwargs): - self._proxy = proxy.Proxy(topic, exchange, self._on_message, **kwargs) + handlers = { + pr.NOTIFY: [ + delayed(executor)(self._process_notify), + functools.partial(pr.Notify.validate, response=False), + ], + pr.REQUEST: [ + delayed(executor)(self._process_request), + pr.Request.validate, + ], + } + self._proxy = proxy.Proxy(topic, exchange, handlers, + on_wait=None, **kwargs) self._topic = topic self._executor = executor self._endpoints = dict([(endpoint.name, endpoint) for endpoint in endpoints]) - def _on_message(self, data, message): - """This method is called on incoming message.""" - LOG.debug("Got message: %s", data) - # NOTE(skudriashev): Process all incoming messages only if proxy is - # running, otherwise requeue them. - if self._proxy.is_running: - # NOTE(skudriashev): Process request only if message has been - # acknowledged successfully. - try: - # acknowledge message before processing - message.ack() - except kombu_exc.MessageStateError: - LOG.exception("Failed to acknowledge AMQP message.") - else: - LOG.debug("AMQP message acknowledged.") - try: - msg_type = message.properties['type'] - except KeyError: - LOG.warning("The 'type' message property is missing.") - else: - if msg_type == pr.NOTIFY: - handler = self._process_notify - elif msg_type == pr.REQUEST: - handler = self._process_request - else: - LOG.warning("Unexpected message type: %s", msg_type) - return - # spawn new thread to process request - self._executor.submit(handler, data, message) - else: - try: - # requeue message - message.requeue() - except kombu_exc.MessageStateError: - LOG.exception("Failed to requeue AMQP message.") - else: - LOG.debug("AMQP message requeued.") + @property + def connection_details(self): + return self._proxy.connection_details @staticmethod def _parse_request(task_cls, task_name, action, arguments, result=None, failures=None, **kwargs): - """Parse request before it can be processed. All `misc.Failure` objects - that have been converted to dict on the remote side to be serializable - are now converted back to objects. + """Parse request before it can be further processed. + + All `misc.Failure` objects that have been converted to dict on the + remote side will now converted back to `misc.Failure` objects. """ action_args = dict(arguments=arguments, task_name=task_name) if result is not None: @@ -96,9 +88,10 @@ class Server(object): @staticmethod def _parse_message(message): - """Parse broker message to get the `reply_to` and the `correlation_id` - properties. If required properties are missing - the `ValueError` is - raised. + """Extracts required attributes out of the messages properties. + + This extracts the `reply_to` and the `correlation_id` properties. If + any of these required properties are missing a `ValueError` is raised. """ properties = [] for prop in ('reply_to', 'correlation_id'): diff --git a/taskflow/engines/worker_based/worker.py b/taskflow/engines/worker_based/worker.py index 0b3d50dd..49816eab 100644 --- a/taskflow/engines/worker_based/worker.py +++ b/taskflow/engines/worker_based/worker.py @@ -15,6 +15,11 @@ # under the License. import logging +import os +import platform +import socket +import string +import sys from concurrent import futures @@ -23,6 +28,37 @@ from taskflow.engines.worker_based import server from taskflow import task as t_task from taskflow.utils import reflection from taskflow.utils import threading_utils as tu +from taskflow import version + +BANNER_TEMPLATE = string.Template(""" +TaskFlow v${version} WBE worker. +Connection details: + Driver = $transport_driver + Exchange = $exchange + Topic = $topic + Transport = $transport_type + Uri = $connection_uri +Powered by: + Executor = $executor_type + Thread count = $executor_thread_count +Supported endpoints:$endpoints +System details: + Hostname = $hostname + Pid = $pid + Platform = $platform + Python = $python + Thread id = $thread_id +""".strip()) +BANNER_TEMPLATE.defaults = { + # These values may not be possible to fetch/known, default to unknown... + 'pid': '???', + 'hostname': '???', + 'executor_thread_count': '???', + 'endpoints': ' %s' % ([]), + # These are static (avoid refetching...) + 'version': version.version_string(), + 'python': sys.version.split("\n", 1)[0].strip(), +} LOG = logging.getLogger(__name__) @@ -78,6 +114,7 @@ class Worker(object): self._executor = futures.ThreadPoolExecutor(self._threads_count) self._owns_executor = True self._endpoints = self._derive_endpoints(tasks) + self._exchange = exchange self._server = server.Server(topic, exchange, self._executor, self._endpoints, **kwargs) @@ -87,17 +124,48 @@ class Worker(object): derived_tasks = reflection.find_subclasses(tasks, t_task.BaseTask) return [endpoint.Endpoint(task) for task in derived_tasks] - def run(self): - """Run worker.""" - if self._threads_count != -1: - LOG.info("Starting the '%s' topic worker in %s threads.", - self._topic, self._threads_count) + def _generate_banner(self): + """Generates a banner that can be useful to display before running.""" + tpl_params = {} + connection_details = self._server.connection_details + transport = connection_details.transport + if transport.driver_version: + transport_driver = "%s v%s" % (transport.driver_name, + transport.driver_version) else: - LOG.info("Starting the '%s' topic worker using a %s.", self._topic, - self._executor) - LOG.info("Tasks list:") - for endpoint in self._endpoints: - LOG.info("|-- %s", endpoint) + transport_driver = transport.driver_name + tpl_params['transport_driver'] = transport_driver + tpl_params['exchange'] = self._exchange + tpl_params['topic'] = self._topic + tpl_params['transport_type'] = transport.driver_type + tpl_params['connection_uri'] = connection_details.uri + tpl_params['executor_type'] = reflection.get_class_name(self._executor) + if self._threads_count != -1: + tpl_params['executor_thread_count'] = self._threads_count + if self._endpoints: + pretty_endpoints = [] + for ep in self._endpoints: + pretty_endpoints.append(" - %s" % ep) + # This ensures there is a newline before the list... + tpl_params['endpoints'] = "\n" + "\n".join(pretty_endpoints) + try: + tpl_params['hostname'] = socket.getfqdn() + except socket.error: + pass + try: + tpl_params['pid'] = os.getpid() + except OSError: + pass + tpl_params['platform'] = platform.platform() + tpl_params['thread_id'] = tu.get_ident() + return BANNER_TEMPLATE.substitute(BANNER_TEMPLATE.defaults, + **tpl_params) + + def run(self, display_banner=True): + """Runs the worker.""" + if display_banner: + for line in self._generate_banner().splitlines(): + LOG.info(line) self._server.start() def wait(self): diff --git a/taskflow/examples/build_a_car.py b/taskflow/examples/build_a_car.py index 7367c348..1655f2a6 100644 --- a/taskflow/examples/build_a_car.py +++ b/taskflow/examples/build_a_car.py @@ -32,14 +32,16 @@ from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow import task +import example_utils as eu # noqa -# INTRO: This examples shows how a graph_flow and linear_flow can be used -# together to execute non-dependent tasks by going through the steps required -# to build a simplistic car (an assembly line if you will). It also shows -# how raw functions can be wrapped into a task object instead of being forced -# to use the more heavy task base class. This is useful in scenarios where -# pre-existing code has functions that you easily want to plug-in to taskflow, -# without requiring a large amount of code changes. + +# INTRO: This examples shows how a graph flow and linear flow can be used +# together to execute dependent & non-dependent tasks by going through the +# steps required to build a simplistic car (an assembly line if you will). It +# also shows how raw functions can be wrapped into a task object instead of +# being forced to use the more *heavy* task base class. This is useful in +# scenarios where pre-existing code has functions that you easily want to +# plug-in to taskflow, without requiring a large amount of code changes. def build_frame(): @@ -58,6 +60,9 @@ def build_wheels(): return '4' +# These just return true to indiciate success, they would in the real work +# do more than just that. + def install_engine(frame, engine): return True @@ -75,13 +80,7 @@ def install_wheels(frame, engine, engine_installed, wheels): def trash(**kwargs): - print_wrapped("Throwing away pieces of car!") - - -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) + eu.print_wrapped("Throwing away pieces of car!") def startup(**kwargs): @@ -114,6 +113,9 @@ def task_watch(state, details): flow = lf.Flow("make-auto").add( task.FunctorTask(startup, revert=trash, provides='ran'), + # A graph flow allows automatic dependency based ordering, the ordering + # is determined by analyzing the symbols required and provided and ordering + # execution based on a functioning order (if one exists). gf.Flow("install-parts").add( task.FunctorTask(build_frame, provides='frame'), task.FunctorTask(build_engine, provides='engine'), @@ -141,7 +143,7 @@ flow = lf.Flow("make-auto").add( # the tasks should produce, in this example this specification will influence # what those tasks do and what output they create. Different tasks depend on # different information from this specification, all of which will be provided -# automatically by the engine. +# automatically by the engine to those tasks. spec = { "frame": 'steel', "engine": 'honda', @@ -164,7 +166,7 @@ engine = taskflow.engines.load(flow, store={'spec': spec.copy()}) engine.notifier.register('*', flow_watch) engine.task_notifier.register('*', task_watch) -print_wrapped("Building a car") +eu.print_wrapped("Building a car") engine.run() # Alter the specification and ensure that the reverting logic gets triggered @@ -177,8 +179,8 @@ engine = taskflow.engines.load(flow, store={'spec': spec.copy()}) engine.notifier.register('*', flow_watch) engine.task_notifier.register('*', task_watch) -print_wrapped("Building a wrong car that doesn't match specification") +eu.print_wrapped("Building a wrong car that doesn't match specification") try: engine.run() except Exception as e: - print_wrapped("Flow failed: %s" % e) + eu.print_wrapped("Flow failed: %s" % e) diff --git a/taskflow/examples/buildsystem.py b/taskflow/examples/buildsystem.py index c17628a5..38f03040 100644 --- a/taskflow/examples/buildsystem.py +++ b/taskflow/examples/buildsystem.py @@ -29,8 +29,11 @@ import taskflow.engines from taskflow.patterns import graph_flow as gf from taskflow import task +import example_utils as eu # noqa -# In this example we demonstrate use of TargetedFlow to make oversimplified + +# In this example we demonstrate use of a target flow (a flow that only +# executes up to a specified target) to make an *oversimplified* pseudo # build system. It pretends to compile all sources to object files and # link them into an executable. It also can build docs, but this can be # "switched off" via targeted flow special power -- ability to ignore @@ -75,7 +78,7 @@ class BuildDocsTask(task.Task): def make_flow_and_store(source_files, executable_only=False): - flow = gf.TargetedFlow('build flow') + flow = gf.TargetedFlow('build-flow') object_targets = [] store = {} for source in source_files: @@ -97,12 +100,12 @@ def make_flow_and_store(source_files, executable_only=False): return flow, store -SOURCE_FILES = ['first.c', 'second.cpp', 'main.cpp'] +if __name__ == "__main__": + SOURCE_FILES = ['first.c', 'second.cpp', 'main.cpp'] + eu.print_wrapped('Running all tasks:') + flow, store = make_flow_and_store(SOURCE_FILES) + taskflow.engines.run(flow, store=store) -print('Running all tasks:') -flow, store = make_flow_and_store(SOURCE_FILES) -taskflow.engines.run(flow, store=store) - -print('\nBuilding executable, no docs:') -flow, store = make_flow_and_store(SOURCE_FILES, executable_only=True) -taskflow.engines.run(flow, store=store) + eu.print_wrapped('Building executable, no docs:') + flow, store = make_flow_and_store(SOURCE_FILES, executable_only=True) + taskflow.engines.run(flow, store=store) diff --git a/taskflow/examples/calculate_in_parallel.py b/taskflow/examples/calculate_in_parallel.py index f179c3ee..0215f956 100644 --- a/taskflow/examples/calculate_in_parallel.py +++ b/taskflow/examples/calculate_in_parallel.py @@ -26,25 +26,24 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) import taskflow.engines - from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf from taskflow import task -# INTRO: This examples shows how linear_flow and unordered_flow can be used -# together to execute calculations in parallel and then use the -# result for the next task. Adder task is used for all calculations -# and arguments' bindings are used to set correct parameters to the task. +# INTRO: This examples shows how a linear flow and a unordered flow can be +# used together to execute calculations in parallel and then use the +# result for the next task/s. The adder task is used for all calculations +# and argument bindings are used to set correct parameters for each task. # This task provides some values from as a result of execution, this can be # useful when you want to provide values from a static set to other tasks that # depend on those values existing before those tasks can run. # -# This method is *depreciated* in favor of a simpler mechanism that just -# provides those values on engine running by prepopulating the storage backend -# before your tasks are ran (which accomplishes a similar goal in a more -# uniform manner). +# NOTE(harlowja): this usage is *depreciated* in favor of a simpler mechanism +# that provides those values on engine running by prepopulating the storage +# backend before your tasks are ran (which accomplishes a similar goal in a +# more uniform manner). class Provider(task.Task): def __init__(self, name, *args, **kwargs): super(Provider, self).__init__(name=name, **kwargs) diff --git a/taskflow/examples/calculate_linear.py b/taskflow/examples/calculate_linear.py index 45c3f328..8d6f4c03 100644 --- a/taskflow/examples/calculate_linear.py +++ b/taskflow/examples/calculate_linear.py @@ -30,11 +30,11 @@ from taskflow.patterns import linear_flow as lf from taskflow import task -# INTRO: In this example linear_flow is used to group four tasks to calculate +# INTRO: In this example a linear flow is used to group four tasks to calculate # a value. A single added task is used twice, showing how this can be done # and the twice added task takes in different bound values. In the first case # it uses default parameters ('x' and 'y') and in the second case arguments -# are bound with ('z', 'd') keys from the engines storage mechanism. +# are bound with ('z', 'd') keys from the engines internal storage mechanism. # # A multiplier task uses a binding that another task also provides, but this # example explicitly shows that 'z' parameter is bound with 'a' key @@ -47,10 +47,10 @@ from taskflow import task # useful when you want to provide values from a static set to other tasks that # depend on those values existing before those tasks can run. # -# This method is *depreciated* in favor of a simpler mechanism that just -# provides those values on engine running by prepopulating the storage backend -# before your tasks are ran (which accomplishes a similar goal in a more -# uniform manner). +# NOTE(harlowja): this usage is *depreciated* in favor of a simpler mechanism +# that just provides those values on engine running by prepopulating the +# storage backend before your tasks are ran (which accomplishes a similar goal +# in a more uniform manner). class Provider(task.Task): def __init__(self, name, *args, **kwargs): @@ -89,8 +89,8 @@ class Multiplier(task.Task): # Note here that the ordering is established so that the correct sequences # of operations occurs where the adding and multiplying is done according -# to the expected and typical mathematical model. A graph_flow could also be -# used here to automatically ensure the correct ordering. +# to the expected and typical mathematical model. A graph flow could also be +# used here to automatically infer & ensure the correct ordering. flow = lf.Flow('root').add( # Provide the initial values for other tasks to depend on. # diff --git a/taskflow/examples/delayed_return.py b/taskflow/examples/delayed_return.py index cbdc66d5..46578621 100644 --- a/taskflow/examples/delayed_return.py +++ b/taskflow/examples/delayed_return.py @@ -35,7 +35,6 @@ sys.path.insert(0, self_dir) # while the function will have returned. import taskflow.engines - from taskflow.listeners import base from taskflow.patterns import linear_flow as lf from taskflow import states diff --git a/taskflow/examples/example_utils.py b/taskflow/examples/example_utils.py index 3da680e2..be08e53a 100644 --- a/taskflow/examples/example_utils.py +++ b/taskflow/examples/example_utils.py @@ -35,6 +35,12 @@ except ImportError: SQLALCHEMY_AVAILABLE = False +def print_wrapped(text): + print("-" * (len(text))) + print(text) + print("-" * (len(text))) + + def rm_path(persist_path): if not os.path.exists(persist_path): return diff --git a/taskflow/examples/fake_billing.py b/taskflow/examples/fake_billing.py index 16829226..9a421f92 100644 --- a/taskflow/examples/fake_billing.py +++ b/taskflow/examples/fake_billing.py @@ -28,10 +28,9 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) -from taskflow.openstack.common import uuidutils - from taskflow import engines from taskflow.listeners import printing +from taskflow.openstack.common import uuidutils from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow import task @@ -70,7 +69,7 @@ class UrlCaller(object): # Since engines save the output of tasks to a optional persistent storage # backend resources have to be dealt with in a slightly different manner since -# resources are transient and can not be persisted (or serialized). For tasks +# resources are transient and can *not* be persisted (or serialized). For tasks # that require access to a set of resources it is a common pattern to provide # a object (in this case this object) on construction of those tasks via the # task constructor. @@ -149,9 +148,9 @@ class DeclareSuccess(task.Task): print("All data processed and sent to %s" % (sent_to)) -# Resources (db handles and similar) of course can't be persisted so we need -# to make sure that we pass this resource fetcher to the tasks constructor so -# that the tasks have access to any needed resources (the resources are +# Resources (db handles and similar) of course can *not* be persisted so we +# need to make sure that we pass this resource fetcher to the tasks constructor +# so that the tasks have access to any needed resources (the resources are # lazily loaded so that they are only created when they are used). resources = ResourceFetcher() flow = lf.Flow("initialize-me") diff --git a/taskflow/examples/graph_flow.py b/taskflow/examples/graph_flow.py index fd96d24a..99dfdd45 100644 --- a/taskflow/examples/graph_flow.py +++ b/taskflow/examples/graph_flow.py @@ -31,20 +31,20 @@ from taskflow.patterns import linear_flow as lf from taskflow import task -# In this example there are complex dependencies between tasks that are used to -# perform a simple set of linear equations. +# In this example there are complex *inferred* dependencies between tasks that +# are used to perform a simple set of linear equations. # # As you will see below the tasks just define what they require as input # and produce as output (named values). Then the user doesn't care about -# ordering the TASKS (in this case the tasks calculate pieces of the overall +# ordering the tasks (in this case the tasks calculate pieces of the overall # equation). # -# As you will notice graph_flow resolves dependencies automatically using the -# tasks requirements and provided values and no ordering dependency has to be -# manually created. +# As you will notice a graph flow resolves dependencies automatically using the +# tasks symbol requirements and provided symbol values and no orderin +# dependency has to be manually created. # -# Also notice that flows of any types can be nested into a graph_flow; subflows -# dependencies will be resolved too!! Pretty cool right! +# Also notice that flows of any types can be nested into a graph flow; showing +# that subflow dependencies (and associated ordering) will be inferred too. class Adder(task.Task): diff --git a/taskflow/examples/persistence_example.py b/taskflow/examples/persistence_example.py index a8112a3d..720914cd 100644 --- a/taskflow/examples/persistence_example.py +++ b/taskflow/examples/persistence_example.py @@ -35,7 +35,7 @@ from taskflow.persistence import logbook from taskflow import task from taskflow.utils import persistence_utils as p_utils -import example_utils # noqa +import example_utils as eu # noqa # INTRO: In this example we create two tasks, one that will say hi and one # that will say bye with optional capability to raise an error while @@ -49,12 +49,6 @@ import example_utils # noqa # the database during both of these modes (failing or not failing). -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) - - class HiTask(task.Task): def execute(self): print("Hi!") @@ -84,7 +78,7 @@ def make_flow(blowup=False): # Persist the flow and task state here, if the file/dir exists already blowup # if not don't blowup, this allows a user to see both the modes and to see # what is stored in each case. -if example_utils.SQLALCHEMY_AVAILABLE: +if eu.SQLALCHEMY_AVAILABLE: persist_path = os.path.join(tempfile.gettempdir(), "persisting.db") backend_uri = "sqlite:///%s" % (persist_path) else: @@ -96,7 +90,7 @@ if os.path.exists(persist_path): else: blowup = True -with example_utils.get_backend(backend_uri) as backend: +with eu.get_backend(backend_uri) as backend: # Now we can run. engine_config = { 'backend': backend, @@ -108,17 +102,17 @@ with example_utils.get_backend(backend_uri) as backend: # did exist, assume we won't blowup (and therefore this shows the undo # and redo that a flow will go through). flow = make_flow(blowup=blowup) - print_wrapped("Running") + eu.print_wrapped("Running") try: eng = engines.load(flow, **engine_config) eng.run() if not blowup: - example_utils.rm_path(persist_path) + eu.rm_path(persist_path) except Exception: # NOTE(harlowja): don't exit with non-zero status code, so that we can # print the book contents, as well as avoiding exiting also makes the # unit tests (which also runs these examples) pass. traceback.print_exc(file=sys.stdout) - print_wrapped("Book contents") + eu.print_wrapped("Book contents") print(p_utils.pformat(engine_config['book'])) diff --git a/taskflow/examples/pseudo_scoping.out.txt b/taskflow/examples/pseudo_scoping.out.txt new file mode 100644 index 00000000..81a27765 --- /dev/null +++ b/taskflow/examples/pseudo_scoping.out.txt @@ -0,0 +1,11 @@ +Running simple flow: +Fetching number for Josh. +Calling Josh 777. + +Calling many people using prefixed factory: +Fetching number for Jim. +Calling Jim 444. +Fetching number for Joe. +Calling Joe 555. +Fetching number for Josh. +Calling Josh 777. diff --git a/taskflow/examples/pseudo_scoping.py b/taskflow/examples/pseudo_scoping.py new file mode 100644 index 00000000..6a964191 --- /dev/null +++ b/taskflow/examples/pseudo_scoping.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Ivan Melnikov +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import os +import sys + +logging.basicConfig(level=logging.ERROR) + +top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir, + os.pardir)) +sys.path.insert(0, top_dir) + +import taskflow.engines +from taskflow.patterns import linear_flow as lf +from taskflow import task + +# INTRO: pseudo-scoping by adding prefixes + +# Sometimes you need scoping -- e.g. for adding several +# similar subflows to one flow to do same stuff for different +# data. But current version of TaskFlow does not allow that +# directly, so you have to resort to some kind of trickery. +# One (and more or less recommended, if not the only) way of +# solving the problem is to transform every task name, it's +# provides and requires values -- e.g. by adding prefix to them. +# This example shows how this could be done. + + +# The example task is simple: for each specified person, fetch +# his or her phone number from phone book and call. + + +PHONE_BOOK = { + 'jim': '444', + 'joe': '555', + 'iv_m': '666', + 'josh': '777' +} + + +class FetchNumberTask(task.Task): + """Task that fetches number from phone book.""" + + default_provides = 'number' + + def execute(self, person): + print('Fetching number for %s.' % person) + return PHONE_BOOK[person.lower()] + + +class CallTask(task.Task): + """Task that calls person by number.""" + + def execute(self, person, number): + print('Calling %s %s.' % (person, number)) + +# This is how it works for one person: + +simple_flow = lf.Flow('simple one').add( + FetchNumberTask(), + CallTask()) +print('Running simple flow:') +taskflow.engines.run(simple_flow, store={'person': 'Josh'}) + + +# To call several people you'll need a factory function that will +# make a flow with given prefix for you. We need to add prefix +# to task names, their provides and requires values. For requires, +# we use `rebind` argument of task constructor. +def subflow_factory(prefix): + def pr(what): + return '%s-%s' % (prefix, what) + + return lf.Flow(pr('flow')).add( + FetchNumberTask(pr('fetch'), + provides=pr('number'), + rebind=[pr('person')]), + CallTask(pr('call'), + rebind=[pr('person'), pr('number')]) + ) + + +def call_them_all(): + # Let's call them all. We need a flow: + flow = lf.Flow('call-them-prefixed') + + # We'll also need to inject person names with prefixed argument + # name to storage to satisfy task requirements. + persons = {} + + for person in ('Jim', 'Joe', 'Josh'): + prefix = person.lower() + persons['%s-person' % prefix] = person + flow.add(subflow_factory(prefix)) + taskflow.engines.run(flow, store=persons) + +print('\nCalling many people using prefixed factory:') +call_them_all() diff --git a/taskflow/examples/resume_from_backend.py b/taskflow/examples/resume_from_backend.py index 6e270408..677937d4 100644 --- a/taskflow/examples/resume_from_backend.py +++ b/taskflow/examples/resume_from_backend.py @@ -28,12 +28,11 @@ sys.path.insert(0, top_dir) sys.path.insert(0, self_dir) import taskflow.engines - from taskflow.patterns import linear_flow as lf from taskflow import task from taskflow.utils import persistence_utils as p_utils -import example_utils # noqa +import example_utils as eu # noqa # INTRO: In this example linear_flow is used to group three tasks, one which # will suspend the future work the engine may do. This suspend engine is then @@ -53,20 +52,13 @@ import example_utils # noqa # # python taskflow/examples/resume_from_backend.py \ # zookeeper://127.0.0.1:2181/taskflow/resume_from_backend/ -# -### UTILITY FUNCTIONS ######################################### - - -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) +# UTILITY FUNCTIONS ######################################### def print_task_states(flowdetail, msg): - print_wrapped(msg) + eu.print_wrapped(msg) print("Flow '%s' state: %s" % (flowdetail.name, flowdetail.state)) # Sort by these so that our test validation doesn't get confused by the # order in which the items in the flow detail can be in. @@ -82,7 +74,7 @@ def find_flow_detail(backend, lb_id, fd_id): return lb.find(fd_id) -### CREATE FLOW ############################################### +# CREATE FLOW ############################################### class InterruptTask(task.Task): @@ -104,12 +96,12 @@ def flow_factory(): TestTask(name='second')) -### INITIALIZE PERSISTENCE #################################### +# INITIALIZE PERSISTENCE #################################### -with example_utils.get_backend() as backend: +with eu.get_backend() as backend: logbook = p_utils.temporary_log_book(backend) - ### CREATE AND RUN THE FLOW: FIRST ATTEMPT #################### + # CREATE AND RUN THE FLOW: FIRST ATTEMPT #################### flow = flow_factory() flowdetail = p_utils.create_flow_detail(flow, logbook, backend) @@ -117,13 +109,13 @@ with example_utils.get_backend() as backend: backend=backend) print_task_states(flowdetail, "At the beginning, there is no state") - print_wrapped("Running") + eu.print_wrapped("Running") engine.run() print_task_states(flowdetail, "After running") - ### RE-CREATE, RESUME, RUN #################################### + # RE-CREATE, RESUME, RUN #################################### - print_wrapped("Resuming and running again") + eu.print_wrapped("Resuming and running again") # NOTE(harlowja): reload the flow detail from backend, this will allow us # to resume the flow from its suspended state, but first we need to search diff --git a/taskflow/examples/resume_many_flows/resume_all.py b/taskflow/examples/resume_many_flows/resume_all.py index 8be5f6d0..071fb616 100644 --- a/taskflow/examples/resume_many_flows/resume_all.py +++ b/taskflow/examples/resume_many_flows/resume_all.py @@ -30,7 +30,6 @@ sys.path.insert(0, example_dir) import taskflow.engines - from taskflow import states import example_utils # noqa diff --git a/taskflow/examples/resume_vm_boot.py b/taskflow/examples/resume_vm_boot.py index 90756f18..acdf42b5 100644 --- a/taskflow/examples/resume_vm_boot.py +++ b/taskflow/examples/resume_vm_boot.py @@ -31,19 +31,16 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) sys.path.insert(0, self_dir) -from taskflow.patterns import graph_flow as gf -from taskflow.patterns import linear_flow as lf - -from taskflow.openstack.common import uuidutils - from taskflow import engines from taskflow import exceptions as exc +from taskflow.openstack.common import uuidutils +from taskflow.patterns import graph_flow as gf +from taskflow.patterns import linear_flow as lf from taskflow import task - from taskflow.utils import eventlet_utils as e_utils from taskflow.utils import persistence_utils as p_utils -import example_utils # noqa +import example_utils as eu # noqa # INTRO: This examples shows how a hierarchy of flows can be used to create a # vm in a reliable & resumable manner using taskflow + a miniature version of @@ -61,12 +58,6 @@ def slow_down(how_long=0.5): time.sleep(how_long) -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) - - class PrintText(task.Task): """Just inserts some text print outs in a workflow.""" def __init__(self, print_what, no_slow=False): @@ -77,10 +68,10 @@ class PrintText(task.Task): def execute(self): if self._no_slow: - print_wrapped(self._text) + eu.print_wrapped(self._text) else: with slow_down(): - print_wrapped(self._text) + eu.print_wrapped(self._text) class DefineVMSpec(task.Task): @@ -229,10 +220,10 @@ def create_flow(): PrintText("Instance is running!", no_slow=True)) return flow -print_wrapped("Initializing") +eu.print_wrapped("Initializing") # Setup the persistence & resumption layer. -with example_utils.get_backend() as backend: +with eu.get_backend() as backend: try: book_id, flow_id = sys.argv[2].split("+", 1) if not uuidutils.is_uuid_like(book_id): @@ -275,7 +266,7 @@ with example_utils.get_backend() as backend: engine_conf=engine_conf) # Make me my vm please! - print_wrapped('Running') + eu.print_wrapped('Running') engine.run() # How to use. diff --git a/taskflow/examples/resume_volume_create.py b/taskflow/examples/resume_volume_create.py index f6f90bbc..0fe502e4 100644 --- a/taskflow/examples/resume_volume_create.py +++ b/taskflow/examples/resume_volume_create.py @@ -31,12 +31,10 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) sys.path.insert(0, self_dir) +from taskflow import engines from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf - -from taskflow import engines from taskflow import task - from taskflow.utils import persistence_utils as p_utils import example_utils # noqa diff --git a/taskflow/examples/reverting_linear.py b/taskflow/examples/reverting_linear.py index e6e5bb04..76c6b811 100644 --- a/taskflow/examples/reverting_linear.py +++ b/taskflow/examples/reverting_linear.py @@ -26,26 +26,19 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) import taskflow.engines - from taskflow.patterns import linear_flow as lf from taskflow import task # INTRO: In this example we create three tasks, each of which ~calls~ a given -# number (provided as a function input), one of those tasks fails calling a +# number (provided as a function input), one of those tasks *fails* calling a # given number (the suzzie calling); this causes the workflow to enter the # reverting process, which activates the revert methods of the previous two # phone ~calls~. # # This simulated calling makes it appear like all three calls occur or all # three don't occur (transaction-like capabilities). No persistence layer is -# used here so reverting and executing will not handle process failure. -# -# This example shows a basic usage of the taskflow structures without involving -# the complexity of persistence. Using the structures that taskflow provides -# via tasks and flows makes it possible for you to easily at a later time -# hook in a persistence layer (and then gain the functionality that offers) -# when you decide the complexity of adding that layer in is 'worth it' for your -# applications usage pattern (which some applications may not need). +# used here so reverting and executing will *not* be tolerant of process +# failure. class CallJim(task.Task): @@ -94,6 +87,6 @@ except Exception as e: # how to deal with multiple tasks failing while running. # # You will also note that this is not a problem in this case since no - # parallelism is involved; this is ensured by the usage of a linear flow, - # which runs serially as well as the default engine type which is 'serial'. + # parallelism is involved; this is ensured by the usage of a linear flow + # and the default engine type which is 'serial' vs being 'parallel'. print("Flow failed: %s" % e) diff --git a/taskflow/examples/simple_linear.py b/taskflow/examples/simple_linear.py index 17fa587e..495b9633 100644 --- a/taskflow/examples/simple_linear.py +++ b/taskflow/examples/simple_linear.py @@ -36,12 +36,13 @@ from taskflow import task # sequence (the flow) and then passing the work off to an engine, with some # initial data to be ran in a reliable manner. # -# This example shows a basic usage of the taskflow structures without involving -# the complexity of persistence. Using the structures that taskflow provides -# via tasks and flows makes it possible for you to easily at a later time -# hook in a persistence layer (and then gain the functionality that offers) -# when you decide the complexity of adding that layer in is 'worth it' for your -# applications usage pattern (which some applications may not need). +# NOTE(harlowja): This example shows a basic usage of the taskflow structures +# without involving the complexity of persistence. Using the structures that +# taskflow provides via tasks and flows makes it possible for you to easily at +# a later time hook in a persistence layer (and then gain the functionality +# that offers) when you decide the complexity of adding that layer in +# is 'worth it' for your applications usage pattern (which certain applications +# may not need). class CallJim(task.Task): diff --git a/taskflow/examples/simple_linear_listening.py b/taskflow/examples/simple_linear_listening.py index 358f0ff2..04f9f14e 100644 --- a/taskflow/examples/simple_linear_listening.py +++ b/taskflow/examples/simple_linear_listening.py @@ -26,7 +26,6 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), sys.path.insert(0, top_dir) import taskflow.engines - from taskflow.patterns import linear_flow as lf from taskflow import task diff --git a/taskflow/examples/wbe_simple_linear.out.txt b/taskflow/examples/wbe_simple_linear.out.txt new file mode 100644 index 00000000..1585fb96 --- /dev/null +++ b/taskflow/examples/wbe_simple_linear.out.txt @@ -0,0 +1,5 @@ +Running 2 workers. +Executing some work. +Execution finished. +Result = {"result1": 1, "result2": 666, "x": 111, "y": 222, "z": 333} +Stopping workers. diff --git a/taskflow/examples/wbe_simple_linear.py b/taskflow/examples/wbe_simple_linear.py new file mode 100644 index 00000000..e28579f8 --- /dev/null +++ b/taskflow/examples/wbe_simple_linear.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import logging +import os +import sys +import tempfile +import threading + +top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.pardir, + os.pardir)) +sys.path.insert(0, top_dir) + +from taskflow import engines +from taskflow.engines.worker_based import worker +from taskflow.patterns import linear_flow as lf +from taskflow.tests import utils + +import example_utils # noqa + +# INTRO: This example walks through a miniature workflow which shows how to +# start up a number of workers (these workers will process task execution and +# reversion requests using any provided input data) and then use an engine +# that creates a set of *capable* tasks and flows (the engine can not create +# tasks that the workers are not able to run, this will end in failure) that +# those workers will run and then executes that workflow seamlessly using the +# workers to perform the actual execution. +# +# NOTE(harlowja): this example simulates the expected larger number of workers +# by using a set of threads (which in this example simulate the remote workers +# that would typically be running on other external machines). + +# A filesystem can also be used as the queue transport (useful as simple +# transport type that does not involve setting up a larger mq system). If this +# is false then the memory transport is used instead, both work in standalone +# setups. +USE_FILESYSTEM = False +BASE_SHARED_CONF = { + 'exchange': 'taskflow', +} +WORKERS = 2 +WORKER_CONF = { + # These are the tasks the worker can execute, they *must* be importable, + # typically this list is used to restrict what workers may execute to + # a smaller set of *allowed* tasks that are known to be safe (one would + # not want to allow all python code to be executed). + 'tasks': [ + 'taskflow.tests.utils:TaskOneArgOneReturn', + 'taskflow.tests.utils:TaskMultiArgOneReturn' + ], +} +ENGINE_CONF = { + 'engine': 'worker-based', +} + + +def run(engine_conf): + flow = lf.Flow('simple-linear').add( + utils.TaskOneArgOneReturn(provides='result1'), + utils.TaskMultiArgOneReturn(provides='result2') + ) + eng = engines.load(flow, + store=dict(x=111, y=222, z=333), + engine_conf=engine_conf) + eng.run() + return eng.storage.fetch_all() + + +if __name__ == "__main__": + logging.basicConfig(level=logging.ERROR) + + # Setup our transport configuration and merge it into the worker and + # engine configuration so that both of those use it correctly. + shared_conf = dict(BASE_SHARED_CONF) + + tmp_path = None + if USE_FILESYSTEM: + tmp_path = tempfile.mkdtemp(prefix='wbe-example-') + shared_conf.update({ + 'transport': 'filesystem', + 'transport_options': { + 'data_folder_in': tmp_path, + 'data_folder_out': tmp_path, + 'polling_interval': 0.1, + }, + }) + else: + shared_conf.update({ + 'transport': 'memory', + 'transport_options': { + 'polling_interval': 0.1, + }, + }) + worker_conf = dict(WORKER_CONF) + worker_conf.update(shared_conf) + engine_conf = dict(ENGINE_CONF) + engine_conf.update(shared_conf) + workers = [] + worker_topics = [] + + try: + # Create a set of workers to simulate actual remote workers. + print('Running %s workers.' % (WORKERS)) + for i in range(0, WORKERS): + worker_conf['topic'] = 'worker-%s' % (i + 1) + worker_topics.append(worker_conf['topic']) + w = worker.Worker(**worker_conf) + runner = threading.Thread(target=w.run) + runner.daemon = True + runner.start() + w.wait() + workers.append((runner, w.stop)) + + # Now use those workers to do something. + print('Executing some work.') + engine_conf['topics'] = worker_topics + result = run(engine_conf) + print('Execution finished.') + # This is done so that the test examples can work correctly + # even when the keys change order (which will happen in various + # python versions). + print("Result = %s" % json.dumps(result, sort_keys=True)) + finally: + # And cleanup. + print('Stopping workers.') + while workers: + r, stopper = workers.pop() + stopper() + r.join() + if tmp_path: + example_utils.rm_path(tmp_path) diff --git a/taskflow/examples/worker_based/flow.py b/taskflow/examples/worker_based/flow.py deleted file mode 100644 index 50529a81..00000000 --- a/taskflow/examples/worker_based/flow.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import logging -import sys - -import taskflow.engines -from taskflow.patterns import linear_flow as lf -from taskflow.tests import utils - -LOG = logging.getLogger(__name__) - - -if __name__ == "__main__": - logging.basicConfig(level=logging.ERROR) - engine_conf = { - 'engine': 'worker-based', - 'exchange': 'taskflow', - 'topics': ['test-topic'], - } - - # parse command line - try: - arg = sys.argv[1] - except IndexError: - pass - else: - try: - cfg = json.loads(arg) - except ValueError: - engine_conf.update(url=arg) - else: - engine_conf.update(cfg) - finally: - LOG.debug("Worker configuration: %s\n" % - json.dumps(engine_conf, sort_keys=True, indent=4)) - - # create and run flow - flow = lf.Flow('simple-linear').add( - utils.TaskOneArgOneReturn(provides='result1'), - utils.TaskMultiArgOneReturn(provides='result2') - ) - eng = taskflow.engines.load(flow, - store=dict(x=111, y=222, z=333), - engine_conf=engine_conf) - eng.run() - print(json.dumps(eng.storage.fetch_all(), sort_keys=True)) diff --git a/taskflow/examples/worker_based/worker.py b/taskflow/examples/worker_based/worker.py deleted file mode 100644 index 405813c7..00000000 --- a/taskflow/examples/worker_based/worker.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import logging -import sys - -from taskflow.engines.worker_based import worker as w - -LOG = logging.getLogger(__name__) - - -if __name__ == "__main__": - logging.basicConfig(level=logging.ERROR) - worker_conf = { - 'exchange': 'taskflow', - 'topic': 'test-topic', - 'tasks': [ - 'taskflow.tests.utils:TaskOneArgOneReturn', - 'taskflow.tests.utils:TaskMultiArgOneReturn' - ] - } - - # parse command line - try: - arg = sys.argv[1] - except IndexError: - pass - else: - try: - cfg = json.loads(arg) - except ValueError: - worker_conf.update(url=arg) - else: - worker_conf.update(cfg) - finally: - LOG.debug("Worker configuration: %s\n" % - json.dumps(worker_conf, sort_keys=True, indent=4)) - - # run worker - worker = w.Worker(**worker_conf) - try: - worker.run() - except KeyboardInterrupt: - pass diff --git a/taskflow/examples/worker_based_flow.out.txt b/taskflow/examples/worker_based_flow.out.txt deleted file mode 100644 index 7b97ff93..00000000 --- a/taskflow/examples/worker_based_flow.out.txt +++ /dev/null @@ -1,6 +0,0 @@ -Run worker. -Run flow. -{"result1": 1, "result2": 666, "x": 111, "y": 222, "z": 333} - -Flow finished. -Stop worker. diff --git a/taskflow/examples/worker_based_flow.py b/taskflow/examples/worker_based_flow.py deleted file mode 100644 index ef984ee9..00000000 --- a/taskflow/examples/worker_based_flow.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os -import subprocess -import sys -import tempfile - -self_dir = os.path.abspath(os.path.dirname(__file__)) -sys.path.insert(0, self_dir) - -import example_utils # noqa - - -def _path_to(name): - return os.path.abspath(os.path.join(os.path.dirname(__file__), - 'worker_based', name)) - - -def run_test(name, config): - cmd = [sys.executable, _path_to(name), config] - process = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, - stderr=sys.stderr) - return process, cmd - - -def main(): - tmp_path = None - try: - tmp_path = tempfile.mkdtemp(prefix='worker-based-example-') - config = json.dumps({ - 'transport': 'filesystem', - 'transport_options': { - 'data_folder_in': tmp_path, - 'data_folder_out': tmp_path - } - }) - - print('Run worker.') - worker_process, _ = run_test('worker.py', config) - - print('Run flow.') - flow_process, flow_cmd = run_test('flow.py', config) - stdout, _ = flow_process.communicate() - rc = flow_process.returncode - if rc != 0: - raise RuntimeError("Could not run %s [%s]" % (flow_cmd, rc)) - print(stdout.decode()) - print('Flow finished.') - - print('Stop worker.') - worker_process.terminate() - - finally: - if tmp_path is not None: - example_utils.rm_path(tmp_path) - -if __name__ == '__main__': - main() diff --git a/taskflow/examples/wrapped_exception.py b/taskflow/examples/wrapped_exception.py index 17ae6322..7679a150 100644 --- a/taskflow/examples/wrapped_exception.py +++ b/taskflow/examples/wrapped_exception.py @@ -29,13 +29,14 @@ sys.path.insert(0, top_dir) import taskflow.engines - from taskflow import exceptions from taskflow.patterns import unordered_flow as uf from taskflow import task from taskflow.tests import utils from taskflow.utils import misc +import example_utils as eu # noqa + # INTRO: In this example we create two tasks which can trigger exceptions # based on various inputs to show how to analyze the thrown exceptions for # which types were thrown and handle the different types in different ways. @@ -54,12 +55,6 @@ from taskflow.utils import misc # that code to do further cleanups (if desired). -def print_wrapped(text): - print("-" * (len(text))) - print(text) - print("-" * (len(text))) - - class FirstException(Exception): """Exception that first task raises.""" @@ -112,18 +107,18 @@ def run(**store): misc.Failure.reraise_if_any(unknown_failures) -print_wrapped("Raise and catch first exception only") +eu.print_wrapped("Raise and catch first exception only") run(sleep1=0.0, raise1=True, sleep2=0.0, raise2=False) # NOTE(imelnikov): in general, sleeping does not guarantee that we'll have both # task running before one of them fails, but with current implementation this # works most of times, which is enough for our purposes here (as an example). -print_wrapped("Raise and catch both exceptions") +eu.print_wrapped("Raise and catch both exceptions") run(sleep1=1.0, raise1=True, sleep2=1.0, raise2=True) -print_wrapped("Handle one exception, and re-raise another") +eu.print_wrapped("Handle one exception, and re-raise another") try: run(sleep1=1.0, raise1=True, sleep2=1.0, raise2='boom') diff --git a/taskflow/exceptions.py b/taskflow/exceptions.py index 78186ef5..e5b9a9c2 100644 --- a/taskflow/exceptions.py +++ b/taskflow/exceptions.py @@ -84,9 +84,7 @@ class ExecutionFailure(TaskFlowException): class RequestTimeout(ExecutionFailure): - """Raised when a worker request was not finished within an allotted - timeout. - """ + """Raised when a worker request was not finished within allotted time.""" class InvalidState(ExecutionFailure): @@ -131,6 +129,10 @@ class MultipleChoices(TaskFlowException): """Raised when some decision can't be made due to many possible choices.""" +class InvalidFormat(TaskFlowException): + """Raised when some object/entity is not in the expected format.""" + + # Others. class WrappedFailure(Exception): diff --git a/taskflow/flow.py b/taskflow/flow.py index 0fb94338..5533ed4e 100644 --- a/taskflow/flow.py +++ b/taskflow/flow.py @@ -55,8 +55,11 @@ class Flow(object): @property def retry(self): - """A retry object that will affect control how (and if) this flow - retries while execution is underway. + """The associated flow retry controller. + + This retry controller object will affect & control how (and if) this + flow and its contained components retry when execution is underway and + a failure occurs. """ return self._retry diff --git a/taskflow/jobs/backends/__init__.py b/taskflow/jobs/backends/__init__.py index 0299636a..099f0476 100644 --- a/taskflow/jobs/backends/__init__.py +++ b/taskflow/jobs/backends/__init__.py @@ -31,9 +31,25 @@ LOG = logging.getLogger(__name__) def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): - """Fetch a jobboard backend with the given configuration (and any board - specific kwargs) in the given entrypoint namespace and create it with the - given name. + """Fetch a jobboard backend with the given configuration. + + This fetch method will look for the entrypoint name in the entrypoint + namespace, and then attempt to instantiate that entrypoint using the + provided name, configuration and any board specific kwargs. + + NOTE(harlowja): to aid in making it easy to specify configuration and + options to a board the configuration (which is typical just a dictionary) + can also be a uri string that identifies the entrypoint name and any + configuration specific to that board. + + For example, given the following configuration uri: + + zookeeper:///?a=b&c=d + + This will look for the entrypoint named 'zookeeper' and will provide + a configuration object composed of the uris parameters, in this case that + is {'a': 'b', 'c': 'd'} to the constructor of that board instance (also + including the name specified). """ if isinstance(conf, six.string_types): conf = {'board': conf} @@ -58,8 +74,11 @@ def fetch(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): @contextlib.contextmanager def backend(name, conf, namespace=BACKEND_NAMESPACE, **kwargs): - """Fetches a jobboard backend, connects to it and allows it to be used in - a context manager statement with the jobboard being closed upon completion. + """Fetches a jobboard, connects to it and closes it on completion. + + This allows a board instance to fetched, connected to, and then used in a + context manager statement with the board being closed upon context + manager exit. """ jb = fetch(name, conf, namespace=namespace, **kwargs) jb.connect() diff --git a/taskflow/jobs/backends/impl_zookeeper.py b/taskflow/jobs/backends/impl_zookeeper.py index fd73a097..4fc7b6eb 100644 --- a/taskflow/jobs/backends/impl_zookeeper.py +++ b/taskflow/jobs/backends/impl_zookeeper.py @@ -33,6 +33,7 @@ from taskflow.openstack.common import excutils from taskflow.openstack.common import jsonutils from taskflow.openstack.common import uuidutils from taskflow import states +from taskflow.types import timing as tt from taskflow.utils import kazoo_utils from taskflow.utils import lock_utils from taskflow.utils import misc @@ -431,7 +432,7 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): else: child_proc(request) - def post(self, name, book, details=None): + def post(self, name, book=None, details=None): def format_posting(job_uuid): posting = { @@ -475,6 +476,17 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): return job def claim(self, job, who): + def _unclaimable_try_find_owner(cause): + try: + owner = self.find_owner(job) + except Exception: + owner = None + if owner: + msg = "Job %s already claimed by '%s'" % (job.uuid, owner) + else: + msg = "Job %s already claimed" % (job.uuid) + return excp.UnclaimableJob(msg, cause) + _check_who(who) with self._wrap(job.uuid, job.path, "Claiming failure: %s"): # NOTE(harlowja): post as json which will allow for future changes @@ -482,21 +494,33 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): value = jsonutils.dumps({ 'owner': who, }) + # Ensure the target job is still existent (at the right version). + job_data, job_stat = self._client.get(job.path) + txn = self._client.transaction() + # This will abort (and not create the lock) if the job has been + # removed (somehow...) or updated by someone else to a different + # version... + txn.check(job.path, version=job_stat.version) + txn.create(job.lock_path, value=misc.binary_encode(value), + ephemeral=True) try: - self._client.create(job.lock_path, - value=misc.binary_encode(value), - ephemeral=True) - except k_exceptions.NodeExistsException: - # Try to see if we can find who the owner really is... - try: - owner = self.find_owner(job) - except Exception: - owner = None - if owner: - msg = "Job %s already claimed by '%s'" % (job.uuid, owner) + kazoo_utils.checked_commit(txn) + except k_exceptions.NodeExistsError as e: + raise _unclaimable_try_find_owner(e) + except kazoo_utils.KazooTransactionException as e: + if len(e.failures) < 2: + raise else: - msg = "Job %s already claimed" % (job.uuid) - raise excp.UnclaimableJob(msg) + if isinstance(e.failures[0], k_exceptions.NoNodeError): + raise excp.NotFound( + "Job %s not found to be claimed" % job.uuid, + e.failures[0]) + if isinstance(e.failures[1], k_exceptions.NodeExistsError): + raise _unclaimable_try_find_owner(e.failures[1]) + else: + raise excp.UnclaimableJob( + "Job %s claim failed due to transaction" + " not succeeding" % (job.uuid), e) @contextlib.contextmanager def _wrap(self, job_uuid, job_path, @@ -557,9 +581,10 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): raise excp.JobFailure("Can not consume a job %s" " which is not owned by %s" % (job.uuid, who)) - with self._client.transaction() as txn: - txn.delete(job.lock_path, version=lock_stat.version) - txn.delete(job.path, version=data_stat.version) + txn = self._client.transaction() + txn.delete(job.lock_path, version=lock_stat.version) + txn.delete(job.path, version=data_stat.version) + kazoo_utils.checked_commit(txn) self._remove_job(job.path) def abandon(self, job, who): @@ -576,8 +601,9 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): raise excp.JobFailure("Can not abandon a job %s" " which is not owned by %s" % (job.uuid, who)) - with self._client.transaction() as txn: - txn.delete(job.lock_path, version=lock_stat.version) + txn = self._client.transaction() + txn.delete(job.lock_path, version=lock_stat.version) + kazoo_utils.checked_commit(txn) def _state_change_listener(self, state): LOG.debug("Kazoo client has changed to state: %s", state) @@ -586,13 +612,12 @@ class ZookeeperJobBoard(jobboard.NotifyingJobBoard): # Wait until timeout expires (or forever) for jobs to appear. watch = None if timeout is not None: - watch = misc.StopWatch(duration=float(timeout)) - watch.start() + watch = tt.StopWatch(duration=float(timeout)).start() self._job_cond.acquire() try: while True: if not self._known_jobs: - if watch and watch.expired(): + if watch is not None and watch.expired(): raise excp.NotFound("Expired waiting for jobs to" " arrive; waited %s seconds" % watch.elapsed()) diff --git a/taskflow/jobs/job.py b/taskflow/jobs/job.py index 796e5d11..41ac4c16 100644 --- a/taskflow/jobs/job.py +++ b/taskflow/jobs/job.py @@ -24,16 +24,22 @@ from taskflow.openstack.common import uuidutils @six.add_metaclass(abc.ABCMeta) class Job(object): - """A job is a higher level abstraction over a set of flows as well as the - *ownership* of those flows, it is the highest piece of work that can be - owned by an entity performing those flows. + """A abstraction that represents a named and trackable unit of work. - Only one entity will be operating on the flows contained in a job at a - given time (for the foreseeable future). + A job connects a logbook, a owner, last modified and created on dates and + any associated state that the job has. Since it is a connector to a + logbook, which are each associated with a set of factories that can create + set of flows, it is the current top-level container for a piece of work + that can be owned by an entity (typically that entity will read those + logbooks and run any contained flows). - It is the object that should be transferred to another entity on failure of - so that the contained flows ownership can be transferred to the secondary - entity for resumption/continuation/reverting. + Only one entity will be allowed to own and operate on the flows contained + in a job at a given time (for the foreseeable future). + + NOTE(harlowja): It is the object that will be transferred to another + entity on failure so that the contained flows ownership can be + transferred to the secondary entity/owner for resumption, continuation, + reverting... """ def __init__(self, name, uuid=None, details=None): diff --git a/taskflow/jobs/jobboard.py b/taskflow/jobs/jobboard.py index 5857d554..d7d0850f 100644 --- a/taskflow/jobs/jobboard.py +++ b/taskflow/jobs/jobboard.py @@ -24,10 +24,15 @@ from taskflow.utils import misc @six.add_metaclass(abc.ABCMeta) class JobBoard(object): - """A jobboard is an abstract representation of a place where jobs - can be posted, reposted, claimed and transferred. There can be multiple - implementations of this job board, depending on the desired semantics and - capabilities of the underlying jobboard implementation. + """A place where jobs can be posted, reposted, claimed and transferred. + + There can be multiple implementations of this job board, depending on the + desired semantics and capabilities of the underlying jobboard + implementation. + + NOTE(harlowja): the name is meant to be an analogous to a board/posting + system that is used in newspapers, or elsewhere to solicit jobs that + people can interview and apply for (and then work on & complete). """ def __init__(self, name, conf): @@ -36,8 +41,7 @@ class JobBoard(object): @abc.abstractmethod def iterjobs(self, only_unclaimed=False, ensure_fresh=False): - """Returns an iterator that will provide back jobs that are currently - on this jobboard. + """Returns an iterator of jobs that are currently on this board. NOTE(harlowja): the ordering of this iteration should be by posting order (oldest to newest) if possible, but it is left up to the backing @@ -60,9 +64,10 @@ class JobBoard(object): @abc.abstractmethod def wait(self, timeout=None): - """Waits a given amount of time for job/s to be posted, when jobs are - found then an iterator will be returned that contains the jobs at - the given point in time. + """Waits a given amount of time for jobs to be posted. + + When jobs are found then an iterator will be returned that can be used + to iterate over those jobs. NOTE(harlowja): since a jobboard can be mutated on by multiple external entities at the *same* time the iterator that can be returned *may* @@ -75,8 +80,11 @@ class JobBoard(object): @abc.abstractproperty def job_count(self): - """Returns how many jobs are on this jobboard (this count may change as - new jobs appear or are removed). + """Returns how many jobs are on this jobboard. + + NOTE(harlowja): this count may change as jobs appear or are removed so + the accuracy of this count should not be used in a way that requires + it to be exact & absolute. """ @abc.abstractmethod @@ -90,11 +98,13 @@ class JobBoard(object): @abc.abstractmethod def consume(self, job, who): - """Permanently (and atomically) removes a job from the jobboard, - signaling that this job has been completed by the entity assigned - to that job. + """Permanently (and atomically) removes a job from the jobboard. - Only the entity that has claimed that job is able to consume a job. + Consumption signals to the board (and any others examining the board) + that this job has been completed by the entity that previously claimed + that job. + + Only the entity that has claimed that job is able to consume the job. A job that has been consumed can not be reclaimed or reposted by another entity (job postings are immutable). Any entity consuming @@ -108,12 +118,14 @@ class JobBoard(object): """ @abc.abstractmethod - def post(self, name, book, details=None): - """Atomically creates and posts a job to the jobboard, allowing others - to attempt to claim that job (and subsequently work on that job). The - contents of the provided logbook must provide enough information for - others to reference to construct & work on the desired entries that - are contained in that logbook. + def post(self, name, book=None, details=None): + """Atomically creates and posts a job to the jobboard. + + This posting allowing others to attempt to claim that job (and + subsequently work on that job). The contents of the provided logbook, + details dictionary, or name (or a mix of these) must provide *enough* + information for consumers to reference to construct and perform that + jobs contained work (whatever it may be). Once a job has been posted it can only be removed by consuming that job (after that job is claimed). Any entity can post/propose jobs @@ -124,13 +136,14 @@ class JobBoard(object): @abc.abstractmethod def claim(self, job, who): - """Atomically attempts to claim the given job for the entity and either - succeeds or fails at claiming by throwing corresponding exceptions. + """Atomically attempts to claim the provided job. If a job is claimed it is expected that the entity that claims that job - will at sometime in the future work on that jobs flows and either fail - at completing them (resulting in a reposting) or consume that job from - the jobboard (signaling its completion). + will at sometime in the future work on that jobs contents and either + fail at completing them (resulting in a reposting) or consume that job + from the jobboard (signaling its completion). If claiming fails then + a corresponding exception will be raised to signal this to the claim + attempter. :param job: a job on this jobboard that can be claimed (if it does not exist then a NotFound exception will be raised). @@ -139,10 +152,12 @@ class JobBoard(object): @abc.abstractmethod def abandon(self, job, who): - """Atomically abandons the given job on the jobboard, allowing that job - to be reclaimed by others. This would typically occur if the entity - that has claimed the job has failed or is unable to complete the job - or jobs it has claimed. + """Atomically attempts to abandon the provided job. + + This abandonment signals to others that the job may now be reclaimed. + This would typically occur if the entity that has claimed the job has + failed or is unable to complete the job or jobs it had previously + claimed. Only the entity that has claimed that job can abandon a job. Any entity abandoning a unclaimed job (or a job they do not own) will cause an @@ -177,13 +192,14 @@ REMOVAL = 'REMOVAL' # existing job is/has been removed class NotifyingJobBoard(JobBoard): - """A jobboard subclass that can notify about jobs being created - and removed, which can remove the repeated usage of iterjobs() to achieve - the same operation. + """A jobboard subclass that can notify others about board events. + + Implementers are expected to notify *at least* about jobs being posted + and removed. NOTE(harlowja): notifications that are emitted *may* be emitted on a separate dedicated thread when they occur, so ensure that all callbacks - registered are thread safe. + registered are thread safe (and block for as little time as possible). """ def __init__(self, name, conf): super(NotifyingJobBoard, self).__init__(name, conf) diff --git a/taskflow/listeners/timing.py b/taskflow/listeners/timing.py index 15ebe82e..e21dd642 100644 --- a/taskflow/listeners/timing.py +++ b/taskflow/listeners/timing.py @@ -21,7 +21,7 @@ import logging from taskflow import exceptions as exc from taskflow.listeners import base from taskflow import states -from taskflow.utils import misc +from taskflow.types import timing as tt STARTING_STATES = (states.RUNNING, states.REVERTING) FINISHED_STATES = base.FINISH_STATES + (states.REVERTED,) @@ -64,8 +64,7 @@ class TimingListener(base.ListenerBase): if state == states.PENDING: self._timers.pop(task_name, None) elif state in STARTING_STATES: - self._timers[task_name] = misc.StopWatch() - self._timers[task_name].start() + self._timers[task_name] = tt.StopWatch().start() elif state in FINISHED_STATES: if task_name in self._timers: self._record_ending(self._timers[task_name], task_name) diff --git a/taskflow/openstack/common/gettextutils.py b/taskflow/openstack/common/gettextutils.py index ad9dd71b..e9cdb693 100644 --- a/taskflow/openstack/common/gettextutils.py +++ b/taskflow/openstack/common/gettextutils.py @@ -42,7 +42,7 @@ class TranslatorFactory(object): """Create translator functions """ - def __init__(self, domain, lazy=False, localedir=None): + def __init__(self, domain, localedir=None): """Establish a set of translation functions for the domain. :param domain: Name of translation domain, @@ -55,7 +55,6 @@ class TranslatorFactory(object): :type localedir: str """ self.domain = domain - self.lazy = lazy if localedir is None: localedir = os.environ.get(domain.upper() + '_LOCALEDIR') self.localedir = localedir @@ -75,16 +74,19 @@ class TranslatorFactory(object): """ if domain is None: domain = self.domain - if self.lazy: - return functools.partial(Message, domain=domain) - t = gettext.translation( - domain, - localedir=self.localedir, - fallback=True, - ) - if six.PY3: - return t.gettext - return t.ugettext + t = gettext.translation(domain, + localedir=self.localedir, + fallback=True) + # Use the appropriate method of the translation object based + # on the python version. + m = t.gettext if six.PY3 else t.ugettext + + def f(msg): + """oslo.i18n.gettextutils translation function.""" + if USE_LAZY: + return Message(msg, domain=domain) + return m(msg) + return f @property def primary(self): @@ -159,7 +161,7 @@ def enable_lazy(): USE_LAZY = True -def install(domain, lazy=False): +def install(domain): """Install a _() function using the given translation domain. Given a translation domain, install a _() function using gettext's @@ -170,26 +172,14 @@ def install(domain, lazy=False): a translation-domain-specific environment variable (e.g. NOVA_LOCALEDIR). + Note that to enable lazy translation, enable_lazy must be + called. + :param domain: the translation domain - :param lazy: indicates whether or not to install the lazy _() function. - The lazy _() introduces a way to do deferred translation - of messages by installing a _ that builds Message objects, - instead of strings, which can then be lazily translated into - any available locale. """ - if lazy: - from six import moves - tf = TranslatorFactory(domain, lazy=True) - moves.builtins.__dict__['_'] = tf.primary - else: - localedir = '%s_LOCALEDIR' % domain.upper() - if six.PY3: - gettext.install(domain, - localedir=os.environ.get(localedir)) - else: - gettext.install(domain, - localedir=os.environ.get(localedir), - unicode=True) + from six import moves + tf = TranslatorFactory(domain) + moves.builtins.__dict__['_'] = tf.primary class Message(six.text_type): @@ -373,8 +363,8 @@ def get_available_languages(domain): 'zh_Hant_HK': 'zh_HK', 'zh_Hant': 'zh_TW', 'fil': 'tl_PH'} - for (locale, alias) in six.iteritems(aliases): - if locale in language_list and alias not in language_list: + for (locale_, alias) in six.iteritems(aliases): + if locale_ in language_list and alias not in language_list: language_list.append(alias) _AVAILABLE_LANGUAGES[domain] = language_list diff --git a/taskflow/openstack/common/importutils.py b/taskflow/openstack/common/importutils.py index 8d412cd4..1e0e703f 100644 --- a/taskflow/openstack/common/importutils.py +++ b/taskflow/openstack/common/importutils.py @@ -24,10 +24,10 @@ import traceback def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') + __import__(mod_str) try: - __import__(mod_str) return getattr(sys.modules[mod_str], class_str) - except (ValueError, AttributeError): + except AttributeError: raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) diff --git a/taskflow/openstack/common/jsonutils.py b/taskflow/openstack/common/jsonutils.py index e3855ab1..8231688c 100644 --- a/taskflow/openstack/common/jsonutils.py +++ b/taskflow/openstack/common/jsonutils.py @@ -38,11 +38,19 @@ import inspect import itertools import sys +is_simplejson = False if sys.version_info < (2, 7): # On Python <= 2.6, json module is not C boosted, so try to use # simplejson module if available try: import simplejson as json + # NOTE(mriedem): Make sure we have a new enough version of simplejson + # to support the namedobject_as_tuple argument. This can be removed + # in the Kilo release when python 2.6 support is dropped. + if 'namedtuple_as_object' in inspect.getargspec(json.dumps).args: + is_simplejson = True + else: + import json except ImportError: import json else: @@ -165,15 +173,23 @@ def to_primitive(value, convert_instances=False, convert_datetime=True, def dumps(value, default=to_primitive, **kwargs): + if is_simplejson: + kwargs['namedtuple_as_object'] = False return json.dumps(value, default=default, **kwargs) -def loads(s, encoding='utf-8'): - return json.loads(strutils.safe_decode(s, encoding)) +def dump(obj, fp, *args, **kwargs): + if is_simplejson: + kwargs['namedtuple_as_object'] = False + return json.dump(obj, fp, *args, **kwargs) -def load(fp, encoding='utf-8'): - return json.load(codecs.getreader(encoding)(fp)) +def loads(s, encoding='utf-8', **kwargs): + return json.loads(strutils.safe_decode(s, encoding), **kwargs) + + +def load(fp, encoding='utf-8', **kwargs): + return json.load(codecs.getreader(encoding)(fp), **kwargs) try: diff --git a/taskflow/openstack/common/network_utils.py b/taskflow/openstack/common/network_utils.py index fa812b29..2729c3fb 100644 --- a/taskflow/openstack/common/network_utils.py +++ b/taskflow/openstack/common/network_utils.py @@ -17,18 +17,15 @@ Network-related utilities and helper functions. """ -# TODO(jd) Use six.moves once -# https://bitbucket.org/gutworth/six/pull-request/28 -# is merged -try: - import urllib.parse - SplitResult = urllib.parse.SplitResult -except ImportError: - import urlparse - SplitResult = urlparse.SplitResult +import logging +import socket from six.moves.urllib import parse +from taskflow.openstack.common.gettextutils import _LW + +LOG = logging.getLogger(__name__) + def parse_host_port(address, default_port=None): """Interpret a string as a host:port pair. @@ -52,8 +49,12 @@ def parse_host_port(address, default_port=None): ('::1', 1234) >>> parse_host_port('2001:db8:85a3::8a2e:370:7334', default_port=1234) ('2001:db8:85a3::8a2e:370:7334', 1234) - + >>> parse_host_port(None) + (None, None) """ + if not address: + return (None, None) + if address[0] == '[': # Escaped ipv6 _host, _port = address[1:].split(']') @@ -74,7 +75,7 @@ def parse_host_port(address, default_port=None): return (host, None if port is None else int(port)) -class ModifiedSplitResult(SplitResult): +class ModifiedSplitResult(parse.SplitResult): """Split results class for urlsplit.""" # NOTE(dims): The functions below are needed for Python 2.6.x. @@ -106,3 +107,57 @@ def urlsplit(url, scheme='', allow_fragments=True): path, query = path.split('?', 1) return ModifiedSplitResult(scheme, netloc, path, query, fragment) + + +def set_tcp_keepalive(sock, tcp_keepalive=True, + tcp_keepidle=None, + tcp_keepalive_interval=None, + tcp_keepalive_count=None): + """Set values for tcp keepalive parameters + + This function configures tcp keepalive parameters if users wish to do + so. + + :param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are + not sure, this should be True, and default values will be used. + + :param tcp_keepidle: time to wait before starting to send keepalive probes + :param tcp_keepalive_interval: time between successive probes, once the + initial wait time is over + :param tcp_keepalive_count: number of probes to send before the connection + is killed + """ + + # NOTE(praneshp): Despite keepalive being a tcp concept, the level is + # still SOL_SOCKET. This is a quirk. + if isinstance(tcp_keepalive, bool): + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, tcp_keepalive) + else: + raise TypeError("tcp_keepalive must be a boolean") + + if not tcp_keepalive: + return + + # These options aren't available in the OS X version of eventlet, + # Idle + Count * Interval effectively gives you the total timeout. + if tcp_keepidle is not None: + if hasattr(socket, 'TCP_KEEPIDLE'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPIDLE, + tcp_keepidle) + else: + LOG.warning(_LW('tcp_keepidle not available on your system')) + if tcp_keepalive_interval is not None: + if hasattr(socket, 'TCP_KEEPINTVL'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPINTVL, + tcp_keepalive_interval) + else: + LOG.warning(_LW('tcp_keepintvl not available on your system')) + if tcp_keepalive_count is not None: + if hasattr(socket, 'TCP_KEEPCNT'): + sock.setsockopt(socket.IPPROTO_TCP, + socket.TCP_KEEPCNT, + tcp_keepalive_count) + else: + LOG.warning(_LW('tcp_keepknt not available on your system')) diff --git a/taskflow/openstack/common/strutils.py b/taskflow/openstack/common/strutils.py index 0c8c6e1f..2f0fd659 100644 --- a/taskflow/openstack/common/strutils.py +++ b/taskflow/openstack/common/strutils.py @@ -50,6 +50,39 @@ SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]") SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+") +# NOTE(flaper87): The following globals are used by `mask_password` +_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password'] + +# NOTE(ldbragst): Let's build a list of regex objects using the list of +# _SANITIZE_KEYS we already have. This way, we only have to add the new key +# to the list of _SANITIZE_KEYS and we can generate regular expressions +# for XML and JSON automatically. +_SANITIZE_PATTERNS_2 = [] +_SANITIZE_PATTERNS_1 = [] + +# NOTE(amrith): Some regular expressions have only one parameter, some +# have two parameters. Use different lists of patterns here. +_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+'] +_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])', + r'(%(key)s\s+[\"\']).*?([\"\'])', + r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)', + r'(<%(key)s>).*?()', + r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])', + r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])', + r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?' + '[\'"]).*?([\'"])', + r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)'] + +for key in _SANITIZE_KEYS: + for pattern in _FORMAT_PATTERNS_2: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS_2.append(reg_ex) + + for pattern in _FORMAT_PATTERNS_1: + reg_ex = re.compile(pattern % {'key': key}, re.DOTALL) + _SANITIZE_PATTERNS_1.append(reg_ex) + + def int_from_bool_as_string(subject): """Interpret a string as a boolean and return either 1 or 0. @@ -237,3 +270,42 @@ def to_slug(value, incoming=None, errors="strict"): "ascii", "ignore").decode("ascii") value = SLUGIFY_STRIP_RE.sub("", value).strip().lower() return SLUGIFY_HYPHENATE_RE.sub("-", value) + + +def mask_password(message, secret="***"): + """Replace password with 'secret' in message. + + :param message: The string which includes security information. + :param secret: value with which to replace passwords. + :returns: The unicode value of message with the password fields masked. + + For example: + + >>> mask_password("'adminPass' : 'aaaaa'") + "'adminPass' : '***'" + >>> mask_password("'admin_pass' : 'aaaaa'") + "'admin_pass' : '***'" + >>> mask_password('"password" : "aaaaa"') + '"password" : "***"' + >>> mask_password("'original_password' : 'aaaaa'") + "'original_password' : '***'" + >>> mask_password("u'original_password' : u'aaaaa'") + "u'original_password' : u'***'" + """ + message = six.text_type(message) + + # NOTE(ldbragst): Check to see if anything in message contains any key + # specified in _SANITIZE_KEYS, if not then just return the message since + # we don't have to mask any passwords. + if not any(key in message for key in _SANITIZE_KEYS): + return message + + substitute = r'\g<1>' + secret + r'\g<2>' + for pattern in _SANITIZE_PATTERNS_2: + message = re.sub(pattern, substitute, message) + + substitute = r'\g<1>' + secret + for pattern in _SANITIZE_PATTERNS_1: + message = re.sub(pattern, substitute, message) + + return message diff --git a/taskflow/openstack/common/timeutils.py b/taskflow/openstack/common/timeutils.py index 52688a02..c48da95f 100644 --- a/taskflow/openstack/common/timeutils.py +++ b/taskflow/openstack/common/timeutils.py @@ -114,7 +114,7 @@ def utcnow(): def iso8601_from_timestamp(timestamp): - """Returns a iso8601 formatted date from timestamp.""" + """Returns an iso8601 formatted date from timestamp.""" return isotime(datetime.datetime.utcfromtimestamp(timestamp)) @@ -134,7 +134,7 @@ def set_time_override(override_time=None): def advance_time_delta(timedelta): """Advance overridden time using a datetime.timedelta.""" - assert(not utcnow.override_time is None) + assert utcnow.override_time is not None try: for dt in utcnow.override_time: dt += timedelta diff --git a/taskflow/patterns/graph_flow.py b/taskflow/patterns/graph_flow.py index 0ed74c75..7db4fee2 100644 --- a/taskflow/patterns/graph_flow.py +++ b/taskflow/patterns/graph_flow.py @@ -72,9 +72,12 @@ class Flow(flow.Flow): return graph def _swap(self, graph): - """Validates the replacement graph and then swaps the underlying graph - with a frozen version of the replacement graph (this maintains the - invariant that the underlying graph is immutable). + """Validates the replacement graph and then swaps the underlying graph. + + After swapping occurs the underlying graph will be frozen so that the + immutability invariant is maintained (we may be able to relax this + constraint in the future since our exposed public api does not allow + direct access to the underlying graph). """ if not graph.is_directed_acyclic(): raise exc.DependencyFailure("No path through the items in the" diff --git a/taskflow/persistence/backends/__init__.py b/taskflow/persistence/backends/__init__.py index 7560a30b..6faabdef 100644 --- a/taskflow/persistence/backends/__init__.py +++ b/taskflow/persistence/backends/__init__.py @@ -30,8 +30,25 @@ LOG = logging.getLogger(__name__) def fetch(conf, namespace=BACKEND_NAMESPACE, **kwargs): - """Fetches a given backend using the given configuration (and any backend - specific kwargs) in the given entrypoint namespace. + """Fetch a persistence backend with the given configuration. + + This fetch method will look for the entrypoint name in the entrypoint + namespace, and then attempt to instantiate that entrypoint using the + provided configuration and any persistence backend specific kwargs. + + NOTE(harlowja): to aid in making it easy to specify configuration and + options to a backend the configuration (which is typical just a dictionary) + can also be a uri string that identifies the entrypoint name and any + configuration specific to that backend. + + For example, given the following configuration uri: + + mysql:///?a=b&c=d + + This will look for the entrypoint named 'mysql' and will provide + a configuration object composed of the uris parameters, in this case that + is {'a': 'b', 'c': 'd'} to the constructor of that persistence backend + instance. """ backend_name = conf['connection'] try: @@ -54,8 +71,12 @@ def fetch(conf, namespace=BACKEND_NAMESPACE, **kwargs): @contextlib.contextmanager def backend(conf, namespace=BACKEND_NAMESPACE, **kwargs): - """Fetches a persistence backend, ensures that it is upgraded and upon - context manager completion closes the backend. + """Fetches a backend, connects, upgrades, then closes it on completion. + + This allows a backend instance to be fetched, connected to, have its schema + upgraded (if the schema is already up to date this is a no-op) and then + used in a context manager statement with the backend being closed upon + context manager exit. """ with contextlib.closing(fetch(conf, namespace=namespace, **kwargs)) as be: with contextlib.closing(be.get_connection()) as conn: diff --git a/taskflow/persistence/backends/base.py b/taskflow/persistence/backends/base.py index 58aa3554..9185d69c 100644 --- a/taskflow/persistence/backends/base.py +++ b/taskflow/persistence/backends/base.py @@ -70,9 +70,11 @@ class Connection(object): @abc.abstractmethod def validate(self): - """Validates that a backend is still ok to be used (the semantics - of this vary depending on the backend). On failure a backend specific - exception is raised that will indicate why the failure occurred. + """Validates that a backend is still ok to be used. + + The semantics of this *may* vary depending on the backend. On failure a + backend specific exception should be raised that will indicate why the + failure occurred. """ pass diff --git a/taskflow/persistence/backends/impl_dir.py b/taskflow/persistence/backends/impl_dir.py index 7c0b3c9b..9ce4a324 100644 --- a/taskflow/persistence/backends/impl_dir.py +++ b/taskflow/persistence/backends/impl_dir.py @@ -33,10 +33,24 @@ LOG = logging.getLogger(__name__) class DirBackend(base.Backend): - """A backend that writes logbooks, flow details, and task details to a - provided directory. This backend does *not* provide transactional semantics - although it does guarantee that there will be no race conditions when - writing/reading by using file level locking. + """A directory and file based backend. + + This backend writes logbooks, flow details, and atom details to a provided + base path on the local filesystem. It will create and store those objects + in three key directories (one for logbooks, one for flow details and one + for atom details). It creates those associated directories and then + creates files inside those directories that represent the contents of those + objects for later reading and writing. + + This backend does *not* provide true transactional semantics. It does + guarantee that there will be no interprocess race conditions when + writing and reading by using a consistent hierarchy of file based locks. + + Example conf: + + conf = { + "path": "/tmp/taskflow", + } """ def __init__(self, conf): super(DirBackend, self).__init__(conf) diff --git a/taskflow/persistence/backends/impl_memory.py b/taskflow/persistence/backends/impl_memory.py index 2d4c5e09..f425987c 100644 --- a/taskflow/persistence/backends/impl_memory.py +++ b/taskflow/persistence/backends/impl_memory.py @@ -15,8 +15,6 @@ # License for the specific language governing permissions and limitations # under the License. -"""Implementation of in-memory backend.""" - import logging import six @@ -29,8 +27,10 @@ LOG = logging.getLogger(__name__) class MemoryBackend(base.Backend): - """A backend that writes logbooks, flow details, and task details to in - memory dictionaries. + """A in-memory (non-persistent) backend. + + This backend writes logbooks, flow details, and atom details to in-memory + dictionaries and retrieves from those dictionaries as needed. """ def __init__(self, conf=None): super(MemoryBackend, self).__init__(conf) diff --git a/taskflow/persistence/backends/impl_sqlalchemy.py b/taskflow/persistence/backends/impl_sqlalchemy.py index 81f053ea..1dc008eb 100644 --- a/taskflow/persistence/backends/impl_sqlalchemy.py +++ b/taskflow/persistence/backends/impl_sqlalchemy.py @@ -32,6 +32,7 @@ from sqlalchemy import orm as sa_orm from sqlalchemy import pool as sa_pool from taskflow import exceptions as exc +from taskflow.openstack.common import strutils from taskflow.persistence.backends import base from taskflow.persistence.backends.sqlalchemy import migration from taskflow.persistence.backends.sqlalchemy import models @@ -120,6 +121,18 @@ def _is_db_connection_error(reason): return _in_any(reason, list(MY_SQL_CONN_ERRORS + POSTGRES_CONN_ERRORS)) +def _as_bool(value): + if isinstance(value, bool): + return value + # This is different than strutils, but imho is an acceptable difference. + if value is None: + return False + # NOTE(harlowja): prefer strictness to avoid users getting accustomed + # to passing bad values in and this *just working* (which imho is a bad + # habit to encourage). + return strutils.bool_from_string(value, strict=True) + + def _thread_yield(dbapi_con, con_record): """Ensure other greenthreads get a chance to be executed. @@ -167,6 +180,14 @@ def _ping_listener(dbapi_conn, connection_rec, connection_proxy): class SQLAlchemyBackend(base.Backend): + """A sqlalchemy backend. + + Example conf: + + conf = { + "connection": "sqlite:////tmp/test.db", + } + """ def __init__(self, conf, engine=None): super(SQLAlchemyBackend, self).__init__(conf) if engine is not None: @@ -183,8 +204,8 @@ class SQLAlchemyBackend(base.Backend): # all the popping that will happen below. conf = copy.deepcopy(self._conf) engine_args = { - 'echo': misc.as_bool(conf.pop('echo', False)), - 'convert_unicode': misc.as_bool(conf.pop('convert_unicode', True)), + 'echo': _as_bool(conf.pop('echo', False)), + 'convert_unicode': _as_bool(conf.pop('convert_unicode', True)), 'pool_recycle': 3600, } if 'idle_timeout' in conf: @@ -229,13 +250,13 @@ class SQLAlchemyBackend(base.Backend): engine = sa.create_engine(sql_connection, **engine_args) checkin_yield = conf.pop('checkin_yield', eventlet_utils.EVENTLET_AVAILABLE) - if misc.as_bool(checkin_yield): + if _as_bool(checkin_yield): sa.event.listen(engine, 'checkin', _thread_yield) if 'mysql' in e_url.drivername: - if misc.as_bool(conf.pop('checkout_ping', True)): + if _as_bool(conf.pop('checkout_ping', True)): sa.event.listen(engine, 'checkout', _ping_listener) mode = None - if misc.as_bool(conf.pop('mysql_traditional_mode', True)): + if _as_bool(conf.pop('mysql_traditional_mode', True)): mode = 'TRADITIONAL' if 'mysql_sql_mode' in conf: mode = conf.pop('mysql_sql_mode') @@ -337,9 +358,13 @@ class Connection(base.Connection): failures[-1].reraise() def _run_in_session(self, functor, *args, **kwargs): - """Runs a function in a session and makes sure that sqlalchemy - exceptions aren't emitted from that sessions actions (as that would - expose the underlying backends exception model). + """Runs a callback in a session. + + This function proxy will create a session, and then call the callback + with that session (along with the provided args and kwargs). It ensures + that the session is opened & closed and makes sure that sqlalchemy + exceptions aren't emitted from the callback or sessions actions (as + that would expose the underlying sqlalchemy exception model). """ try: session = self._make_session() diff --git a/taskflow/persistence/backends/impl_zookeeper.py b/taskflow/persistence/backends/impl_zookeeper.py index 8f42374c..e60bad85 100644 --- a/taskflow/persistence/backends/impl_zookeeper.py +++ b/taskflow/persistence/backends/impl_zookeeper.py @@ -34,9 +34,16 @@ MIN_ZK_VERSION = (3, 4, 0) class ZkBackend(base.Backend): - """ZooKeeper as backend storage implementation + """A zookeeper backend. - Example conf (use Kazoo): + This backend writes logbooks, flow details, and atom details to a provided + base path in zookeeper. It will create and store those objects in three + key directories (one for logbooks, one for flow details and one for atom + details). It creates those associated directories and then creates files + inside those directories that represent the contents of those objects for + later reading and writing. + + Example conf: conf = { "hosts": "192.168.0.1:2181,192.168.0.2:2181,192.168.0.3:2181", @@ -126,8 +133,11 @@ class ZkConnection(base.Connection): @contextlib.contextmanager def _exc_wrapper(self): - """Exception wrapper which wraps kazoo exceptions and groups them - to taskflow exceptions. + """Exception context-manager which wraps kazoo exceptions. + + This is used to capture and wrap any kazoo specific exceptions and + then group them into corresponding taskflow exceptions (not doing + that would expose the underlying kazoo exception model). """ try: yield @@ -146,8 +156,10 @@ class ZkConnection(base.Connection): def update_atom_details(self, ad): """Update a atom detail transactionally.""" with self._exc_wrapper(): - with self._client.transaction() as txn: - return self._update_atom_details(ad, txn) + txn = self._client.transaction() + ad = self._update_atom_details(ad, txn) + k_utils.checked_commit(txn) + return ad def _update_atom_details(self, ad, txn, create_missing=False): # Determine whether the desired data exists or not. @@ -199,8 +211,10 @@ class ZkConnection(base.Connection): def update_flow_details(self, fd): """Update a flow detail transactionally.""" with self._exc_wrapper(): - with self._client.transaction() as txn: - return self._update_flow_details(fd, txn) + txn = self._client.transaction() + fd = self._update_flow_details(fd, txn) + k_utils.checked_commit(txn) + return fd def _update_flow_details(self, fd, txn, create_missing=False): # Determine whether the desired data exists or not @@ -296,19 +310,19 @@ class ZkConnection(base.Connection): return e_lb with self._exc_wrapper(): - with self._client.transaction() as txn: - # Determine whether the desired data exists or not. - lb_path = paths.join(self.book_path, lb.uuid) - try: - lb_data, _zstat = self._client.get(lb_path) - except k_exc.NoNodeError: - # Create a new logbook since it doesn't exist. - e_lb = _create_logbook(lb_path, txn) - else: - # Otherwise update the existing logbook instead. - e_lb = _update_logbook(lb_path, lb_data, txn) - # Finally return (updated) logbook. - return e_lb + txn = self._client.transaction() + # Determine whether the desired data exists or not. + lb_path = paths.join(self.book_path, lb.uuid) + try: + lb_data, _zstat = self._client.get(lb_path) + except k_exc.NoNodeError: + # Create a new logbook since it doesn't exist. + e_lb = _create_logbook(lb_path, txn) + else: + # Otherwise update the existing logbook instead. + e_lb = _update_logbook(lb_path, lb_data, txn) + k_utils.checked_commit(txn) + return e_lb def _get_logbook(self, lb_uuid): lb_path = paths.join(self.book_path, lb_uuid) @@ -370,35 +384,38 @@ class ZkConnection(base.Connection): txn.delete(lb_path) with self._exc_wrapper(): - with self._client.transaction() as txn: - _destroy_logbook(lb_uuid, txn) + txn = self._client.transaction() + _destroy_logbook(lb_uuid, txn) + k_utils.checked_commit(txn) def clear_all(self, delete_dirs=True): """Delete all data transactionally.""" with self._exc_wrapper(): - with self._client.transaction() as txn: + txn = self._client.transaction() - # Delete all data under logbook path. - for lb_uuid in self._client.get_children(self.book_path): - lb_path = paths.join(self.book_path, lb_uuid) - for fd_uuid in self._client.get_children(lb_path): - txn.delete(paths.join(lb_path, fd_uuid)) - txn.delete(lb_path) + # Delete all data under logbook path. + for lb_uuid in self._client.get_children(self.book_path): + lb_path = paths.join(self.book_path, lb_uuid) + for fd_uuid in self._client.get_children(lb_path): + txn.delete(paths.join(lb_path, fd_uuid)) + txn.delete(lb_path) - # Delete all data under flow detail path. - for fd_uuid in self._client.get_children(self.flow_path): - fd_path = paths.join(self.flow_path, fd_uuid) - for ad_uuid in self._client.get_children(fd_path): - txn.delete(paths.join(fd_path, ad_uuid)) - txn.delete(fd_path) + # Delete all data under flow detail path. + for fd_uuid in self._client.get_children(self.flow_path): + fd_path = paths.join(self.flow_path, fd_uuid) + for ad_uuid in self._client.get_children(fd_path): + txn.delete(paths.join(fd_path, ad_uuid)) + txn.delete(fd_path) - # Delete all data under atom detail path. - for ad_uuid in self._client.get_children(self.atom_path): - ad_path = paths.join(self.atom_path, ad_uuid) - txn.delete(ad_path) + # Delete all data under atom detail path. + for ad_uuid in self._client.get_children(self.atom_path): + ad_path = paths.join(self.atom_path, ad_uuid) + txn.delete(ad_path) - # Delete containing directories. - if delete_dirs: - txn.delete(self.book_path) - txn.delete(self.atom_path) - txn.delete(self.flow_path) + # Delete containing directories. + if delete_dirs: + txn.delete(self.book_path) + txn.delete(self.atom_path) + txn.delete(self.flow_path) + + k_utils.checked_commit(txn) diff --git a/taskflow/persistence/backends/sqlalchemy/alembic/versions/14b227d79a87_add_intention_column.py b/taskflow/persistence/backends/sqlalchemy/alembic/versions/14b227d79a87_add_intention_column.py index b7bfe8d8..33541d0e 100644 --- a/taskflow/persistence/backends/sqlalchemy/alembic/versions/14b227d79a87_add_intention_column.py +++ b/taskflow/persistence/backends/sqlalchemy/alembic/versions/14b227d79a87_add_intention_column.py @@ -20,6 +20,7 @@ down_revision = '84d6e888850' from alembic import op import sqlalchemy as sa + from taskflow import states diff --git a/taskflow/persistence/backends/sqlalchemy/alembic/versions/84d6e888850_add_task_detail_type.py b/taskflow/persistence/backends/sqlalchemy/alembic/versions/84d6e888850_add_task_detail_type.py index d03b6528..756cf93a 100644 --- a/taskflow/persistence/backends/sqlalchemy/alembic/versions/84d6e888850_add_task_detail_type.py +++ b/taskflow/persistence/backends/sqlalchemy/alembic/versions/84d6e888850_add_task_detail_type.py @@ -28,6 +28,7 @@ down_revision = '1c783c0c2875' from alembic import op import sqlalchemy as sa + from taskflow.persistence import logbook diff --git a/taskflow/persistence/backends/sqlalchemy/models.py b/taskflow/persistence/backends/sqlalchemy/models.py index cad86628..4a78c5cb 100644 --- a/taskflow/persistence/backends/sqlalchemy/models.py +++ b/taskflow/persistence/backends/sqlalchemy/models.py @@ -25,7 +25,6 @@ from sqlalchemy import types as types from taskflow.openstack.common import jsonutils from taskflow.openstack.common import timeutils from taskflow.openstack.common import uuidutils - from taskflow.persistence import logbook from taskflow import states diff --git a/taskflow/persistence/logbook.py b/taskflow/persistence/logbook.py index 31815a1d..12c6c996 100644 --- a/taskflow/persistence/logbook.py +++ b/taskflow/persistence/logbook.py @@ -64,14 +64,20 @@ def _fix_meta(data): class LogBook(object): - """This class that contains a dict of flow detail entries for a - given *job* so that the job can track what 'work' has been - completed for resumption/reverting and miscellaneous tracking + """A container of flow details, a name and associated metadata. + + Typically this class contains a collection of flow detail entries + for a given engine (or job) so that those entities can track what 'work' + has been completed for resumption, reverting and miscellaneous tracking purposes. The data contained within this class need *not* be backed by the backend storage in real time. The data in this class will only be guaranteed to be persisted when a save occurs via some backend connection. + + NOTE(harlowja): the naming of this class is analogous to a ships log or a + similar type of record used in detailing work that been completed (or work + that has not been completed). """ def __init__(self, name, uuid=None): if uuid: @@ -159,8 +165,11 @@ class LogBook(object): class FlowDetail(object): - """This class contains a dict of atom detail entries for a given - flow along with any metadata associated with that flow. + """A container of atom details, a name and associated metadata. + + Typically this class contains a collection of atom detail entries that + represent the atoms in a given flow structure (along with any other needed + metadata relevant to that flow). The data contained within this class need *not* be backed by the backend storage in real time. The data in this class will only be guaranteed to be @@ -241,13 +250,15 @@ class FlowDetail(object): @six.add_metaclass(abc.ABCMeta) class AtomDetail(object): - """This is a base class that contains an entry that contains the - persistence of an atom after or before (or during) it is running including - any results it may have produced, any state that it may be in (failed - for example), any exception that occurred when running and any associated - stacktrace that may have occurring during that exception being thrown - and any other metadata that should be stored along-side the details - about this atom. + """A base container of atom specific runtime information and metadata. + + This is a base class that contains attributes that are used to connect + a atom to the persistence layer during, after, or before it is running + including any results it may have produced, any state that it may be + in (failed for example), any exception that occurred when running and any + associated stacktrace that may have occurring during that exception being + thrown and any other metadata that should be stored along-side the details + about the connected atom. The data contained within this class need *not* backed by the backend storage in real time. The data in this class will only be guaranteed to be @@ -276,8 +287,11 @@ class AtomDetail(object): @property def last_results(self): - """Gets the atoms last result (if it has many results it should then - return the last one of many). + """Gets the atoms last result. + + If the atom has produced many results (for example if it has been + retried, reverted, executed and ...) this returns the last one of + many results. """ return self.results @@ -397,7 +411,7 @@ class TaskDetail(AtomDetail): def merge(self, other, deep_copy=False): if not isinstance(other, TaskDetail): - raise NotImplemented("Can only merge with other task details") + raise NotImplementedError("Can only merge with other task details") if other is self: return self super(TaskDetail, self).merge(other, deep_copy=deep_copy) @@ -482,7 +496,8 @@ class RetryDetail(AtomDetail): def merge(self, other, deep_copy=False): if not isinstance(other, RetryDetail): - raise NotImplemented("Can only merge with other retry details") + raise NotImplementedError("Can only merge with other retry " + "details") if other is self: return self super(RetryDetail, self).merge(other, deep_copy=deep_copy) diff --git a/taskflow/retry.py b/taskflow/retry.py index fb7330e2..425c8ea6 100644 --- a/taskflow/retry.py +++ b/taskflow/retry.py @@ -34,8 +34,7 @@ RETRY = "RETRY" @six.add_metaclass(abc.ABCMeta) class Decider(object): - """A base class or mixin for an object that can decide how to resolve - execution failures. + """A class/mixin object that can decide how to resolve execution failures. A decider may be executed multiple times on subflow or other atom failure and it is expected to make a decision about what should be done @@ -45,10 +44,11 @@ class Decider(object): @abc.abstractmethod def on_failure(self, history, *args, **kwargs): - """On subflow failure makes a decision about the future flow - execution using information about prior previous failures (if this - historical failure information is not available or was not persisted - this history will be empty). + """On failure makes a decision about the future. + + This method will typically use information about prior failures (if + this historical failure information is not available or was not + persisted this history will be empty). Returns retry action constant: @@ -63,9 +63,13 @@ class Decider(object): @six.add_metaclass(abc.ABCMeta) class Retry(atom.Atom, Decider): - """A base class for a retry object that decides how to resolve subflow - execution failures and may also provide execute and revert methods to alter - the inputs of subflow atoms. + """A class that can decide how to resolve execution failures. + + This abstract base class is used to inherit from and provide different + strategies that will be activated upon execution failures. Since a retry + object is an atom it may also provide execute and revert methods to alter + the inputs of connected atoms (depending on the desired strategy to be + used this can be quite useful). """ default_provides = None @@ -88,22 +92,32 @@ class Retry(atom.Atom, Decider): @abc.abstractmethod def execute(self, history, *args, **kwargs): - """Activate a given retry which will produce data required to - start or restart a subflow using previously provided values and a - history of subflow failures from previous runs. - Retry can provide same values multiple times (after each run), - the latest value will be used by tasks. Old values will be saved to - the history of retry that is a list of tuples (result, failures) - where failures is a dictionary of failures by task names. - This allows to make retries of subflow with different parameters. + """Executes the given retry atom. + + This execution activates a given retry which will typically produce + data required to start or restart a connected component using + previously provided values and a history of prior failures from + previous runs. The historical data can be analyzed to alter the + resolution strategy that this retry controller will use. + + For example, a retry can provide the same values multiple times (after + each run), the latest value or some other variation. Old values will be + saved to the history of the retry atom automatically, that is a list of + tuples (result, failures) are persisted where failures is a dictionary + of failures indexed by task names and the result is the execution + result returned by this retry controller during that failure resolution + attempt. """ def revert(self, history, *args, **kwargs): - """Revert this retry using the given context, all results - that had been provided by previous tries and all errors caused - a reversion. This method will be called only if a subflow must be - reverted without the retry. It won't be called on subflow retry, but - all subflow's tasks will be reverted before the retry. + """Reverts this retry using the given context. + + On revert call all results that had been provided by previous tries + and all errors caused during reversion are provided. This method + will be called *only* if a subflow must be reverted without the + retry (that is to say that the controller has ran out of resolution + options and has either given up resolution or has failed to handle + a execution failure). """ @@ -146,9 +160,12 @@ class Times(Retry): class ForEachBase(Retry): - """Base class for retries that iterate given collection.""" + """Base class for retries that iterate over a given collection.""" def _get_next_value(self, values, history): + # Fetches the next resolution result to try, removes overlapping + # entries with what has already been tried and then returns the first + # resolution strategy remaining. items = (item for item, _failures in history) remaining = misc.sequence_minus(values, items) if not remaining: @@ -166,8 +183,10 @@ class ForEachBase(Retry): class ForEach(ForEachBase): - """Accepts a collection of values to the constructor. Returns the next - element of the collection on each try. + """Applies a statically provided collection of strategies. + + Accepts a collection of decision strategies on construction and returns the + next element of the collection on each try. """ def __init__(self, values, name=None, provides=None, requires=None, @@ -180,12 +199,17 @@ class ForEach(ForEachBase): return self._on_failure(self._values, history) def execute(self, history, *args, **kwargs): + # NOTE(harlowja): This allows any connected components to know the + # current resolution strategy being attempted. return self._get_next_value(self._values, history) class ParameterizedForEach(ForEachBase): - """Accepts a collection of values from storage as a parameter of execute - method. Returns the next element of the collection on each try. + """Applies a dynamically provided collection of strategies. + + Accepts a collection of decision strategies from a predecessor (or from + storage) as a parameter and returns the next element of that collection on + each try. """ def on_failure(self, values, history, *args, **kwargs): diff --git a/taskflow/states.py b/taskflow/states.py index 963e4f64..28272cd3 100644 --- a/taskflow/states.py +++ b/taskflow/states.py @@ -46,14 +46,14 @@ EXECUTE = 'EXECUTE' IGNORE = 'IGNORE' REVERT = 'REVERT' RETRY = 'RETRY' -INTENTIONS = [EXECUTE, IGNORE, REVERT, RETRY] +INTENTIONS = (EXECUTE, IGNORE, REVERT, RETRY) # Additional engine states SCHEDULING = 'SCHEDULING' WAITING = 'WAITING' ANALYZING = 'ANALYZING' -## Flow state transitions +# Flow state transitions # See: http://docs.openstack.org/developer/taskflow/states.html _ALLOWED_FLOW_TRANSITIONS = frozenset(( @@ -124,7 +124,7 @@ def check_flow_transition(old_state, new_state): % pair) -## Task state transitions +# Task state transitions # See: http://docs.openstack.org/developer/taskflow/states.html _ALLOWED_TASK_TRANSITIONS = frozenset(( diff --git a/taskflow/storage.py b/taskflow/storage.py index 353d44f3..31a8868f 100644 --- a/taskflow/storage.py +++ b/taskflow/storage.py @@ -77,9 +77,12 @@ class Storage(object): @abc.abstractproperty def _lock_cls(self): - """Lock class used to generate reader/writer locks for protecting - read/write access to the underlying storage backend and internally - mutating operations. + """Lock class used to generate reader/writer locks. + + These locks are used for protecting read/write access to the + underlying storage backend when internally mutating operations occur. + They ensure that we read and write data in a consistent manner when + being used in a multithreaded situation. """ def _with_connection(self, functor, *args, **kwargs): @@ -248,9 +251,12 @@ class Storage(object): self._with_connection(self._save_atom_detail, ad) def update_atom_metadata(self, atom_name, update_with): - """Updates a atoms metadata given another dictionary or a list of - (key, value) pairs to include in the updated metadata (newer keys will - overwrite older keys). + """Updates a atoms associated metadata. + + This update will take a provided dictionary or a list of (key, value) + pairs to include in the updated metadata (newer keys will overwrite + older keys) and after merging saves the updated data into the + underlying persistence layer. """ self._update_atom_metadata(atom_name, update_with) diff --git a/taskflow/task.py b/taskflow/task.py index e66b435c..cd470e72 100644 --- a/taskflow/task.py +++ b/taskflow/task.py @@ -30,8 +30,12 @@ LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseTask(atom.Atom): - """An abstraction that defines a potential piece of work that can be - applied and can be reverted to undo the work as a single task. + """An abstraction that defines a potential piece of work. + + This potential piece of work is expected to be able to contain + functionality that defines what can be executed to accomplish that work + as well as a way of defining what can be executed to reverted/undo that + same piece of work. """ TASK_EVENTS = ('update_progress', ) @@ -43,6 +47,15 @@ class BaseTask(atom.Atom): # Map of events => lists of callbacks to invoke on task events. self._events_listeners = collections.defaultdict(list) + def pre_execute(self): + """Code to be run prior to executing the task. + + A common pattern for initializing the state of the system prior to + running tasks is to define some code in a base class that all your + tasks inherit from. In that class, you can define a pre_execute + method and it will always be invoked just prior to your tasks running. + """ + @abc.abstractmethod def execute(self, *args, **kwargs): """Activate a given task which will perform some operation and return. @@ -61,6 +74,25 @@ class BaseTask(atom.Atom): or remote). """ + def post_execute(self): + """Code to be run after executing the task. + + A common pattern for cleaning up global state of the system after the + execution of tasks is to define some code in a base class that all your + tasks inherit from. In that class, you can define a post_execute + method and it will always be invoked just after your tasks execute, + regardless of whether they succeded or not. + + This pattern is useful if you have global shared database sessions + that need to be cleaned up, for example. + """ + + def pre_revert(self): + """Code to be run prior to reverting the task. + + This works the same as pre_execute, but for the revert phase. + """ + def revert(self, *args, **kwargs): """Revert this task. @@ -75,6 +107,12 @@ class BaseTask(atom.Atom): contain the failure information. """ + def post_revert(self): + """Code to be run after reverting the task. + + This works the same as post_execute, but for the revert phase. + """ + def update_progress(self, progress, **kwargs): """Update task progress and notify all registered listeners. @@ -101,8 +139,12 @@ class BaseTask(atom.Atom): @contextlib.contextmanager def autobind(self, event_name, handler_func, **kwargs): - """Binds a given function to the task for a given event name and then - unbinds that event name and associated function automatically on exit. + """Binds & unbinds a given event handler to the task. + + This function binds and unbinds using the context manager protocol. + When events are triggered on the task of the given event name this + handler will automatically be called with the provided keyword + arguments. """ bound = False if handler_func is not None: @@ -135,10 +177,11 @@ class BaseTask(atom.Atom): self._events_listeners[event].append((handler, kwargs)) def unbind(self, event, handler=None): - """Remove a previously-attached event handler from the task. If handler - function not passed, then unbind all event handlers for the provided - event. If multiple of the same handlers are bound, then the first - match is removed (and only the first match). + """Remove a previously-attached event handler from the task. + + If a handler function not passed, then this will unbind all event + handlers for the provided event. If multiple of the same handlers are + bound, then the first match is removed (and only the first match). :param event: event type :param handler: handler previously bound diff --git a/taskflow/test.py b/taskflow/test.py index ce99a373..4de61d3e 100644 --- a/taskflow/test.py +++ b/taskflow/test.py @@ -14,15 +14,13 @@ # License for the specific language governing permissions and limitations # under the License. +import fixtures import mock - +import six from testtools import compat from testtools import matchers from testtools import testcase -import fixtures -import six - from taskflow import exceptions from taskflow.tests import utils from taskflow.utils import misc @@ -41,8 +39,11 @@ class GreaterThanEqual(object): class FailureRegexpMatcher(object): - """Matches if the failure was caused by the given exception and its string - matches to the given pattern. + """Matches if the failure was caused by the given exception and message. + + This will match if a given failure contains and exception of the given + class type and if its string message matches to the given regular + expression pattern. """ def __init__(self, exc_class, pattern): @@ -59,8 +60,10 @@ class FailureRegexpMatcher(object): class ItemsEqual(object): - """Matches the sequence that has same elements as reference - object, regardless of the order. + """Matches the items in two sequences. + + This matcher will validate that the provided sequence has the same elements + as a reference sequence, regardless of the order. """ def __init__(self, seq): @@ -167,9 +170,7 @@ class TestCase(testcase.TestCase): def assertFailuresRegexp(self, exc_class, pattern, callable_obj, *args, **kwargs): - """Assert that the callable failed with the given exception and its - string matches to the given pattern. - """ + """Asserts the callable failed with the given exception and message.""" try: with utils.wrap_all_failures(): callable_obj(*args, **kwargs) @@ -200,8 +201,11 @@ class MockTestCase(TestCase): return mocked def _patch_class(self, module, name, autospec=True, attach_as=None): - """Patch class, create class instance mock and attach them to - the master mock. + """Patches a modules class. + + This will create a class instance mock (using the provided name to + find the class in the module) and attach a mock class the master mock + to be cleaned up on test exit. """ if autospec: instance_mock = mock.Mock(spec_set=getattr(module, name)) diff --git a/taskflow/tests/test_examples.py b/taskflow/tests/test_examples.py index 43f8f4d9..2631cd47 100644 --- a/taskflow/tests/test_examples.py +++ b/taskflow/tests/test_examples.py @@ -24,7 +24,7 @@ extension; then it will be checked that output did not change. When this module is used as main module, output for all examples are generated. Please note that this will break tests as output for most -examples is indeterministic. +examples is indeterministic (due to hash randomization for example). """ @@ -91,8 +91,12 @@ def list_examples(): class ExamplesTestCase(taskflow.test.TestCase): @classmethod def update(cls): - """For each example, adds on a test method that the testing framework - will then run. + """For each example, adds on a test method. + + This newly created test method will then be activated by the testing + framework when it scans for and runs tests. This makes for a elegant + and simple way to ensure that all of the provided examples + actually work. """ def add_test_method(name, method_name): def test_example(self): diff --git a/taskflow/tests/unit/action_engine/__init__.py b/taskflow/tests/unit/action_engine/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/taskflow/tests/unit/action_engine/test_compile.py b/taskflow/tests/unit/action_engine/test_compile.py new file mode 100644 index 00000000..7207468e --- /dev/null +++ b/taskflow/tests/unit/action_engine/test_compile.py @@ -0,0 +1,419 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from taskflow.engines.action_engine import compiler +from taskflow import exceptions as exc +from taskflow.patterns import graph_flow as gf +from taskflow.patterns import linear_flow as lf +from taskflow.patterns import unordered_flow as uf +from taskflow import retry +from taskflow import test +from taskflow.tests import utils as test_utils + + +class PatternCompileTest(test.TestCase): + def test_task(self): + task = test_utils.DummyTask(name='a') + compilation = compiler.PatternCompiler().compile(task) + g = compilation.execution_graph + self.assertEqual(list(g.nodes()), [task]) + self.assertEqual(list(g.edges()), []) + + def test_retry(self): + r = retry.AlwaysRevert('r1') + msg_regex = "^Retry controller: .* must only be used .*" + self.assertRaisesRegexp(TypeError, msg_regex, + compiler.PatternCompiler().compile, r) + + def test_wrong_object(self): + msg_regex = '^Unknown type requested to flatten' + self.assertRaisesRegexp(TypeError, msg_regex, + compiler.PatternCompiler().compile, 42) + + def test_linear(self): + a, b, c, d = test_utils.make_many(4) + flo = lf.Flow("test") + flo.add(a, b, c) + sflo = lf.Flow("sub-test") + sflo.add(d) + flo.add(sflo) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(4, len(g)) + + order = g.topological_sort() + self.assertEqual([a, b, c, d], order) + self.assertTrue(g.has_edge(c, d)) + self.assertEqual(g.get_edge_data(c, d), {'invariant': True}) + + self.assertEqual([d], list(g.no_successors_iter())) + self.assertEqual([a], list(g.no_predecessors_iter())) + + def test_invalid(self): + a, b, c = test_utils.make_many(3) + flo = lf.Flow("test") + flo.add(a, b, c) + flo.add(flo) + self.assertRaises(ValueError, + compiler.PatternCompiler().compile, flo) + + def test_unordered(self): + a, b, c, d = test_utils.make_many(4) + flo = uf.Flow("test") + flo.add(a, b, c, d) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(4, len(g)) + self.assertEqual(0, g.number_of_edges()) + self.assertEqual(set([a, b, c, d]), + set(g.no_successors_iter())) + self.assertEqual(set([a, b, c, d]), + set(g.no_predecessors_iter())) + + def test_linear_nested(self): + a, b, c, d = test_utils.make_many(4) + flo = lf.Flow("test") + flo.add(a, b) + flo2 = uf.Flow("test2") + flo2.add(c, d) + flo.add(flo2) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(4, len(g)) + + lb = g.subgraph([a, b]) + self.assertFalse(lb.has_edge(b, a)) + self.assertTrue(lb.has_edge(a, b)) + self.assertEqual(g.get_edge_data(a, b), {'invariant': True}) + + ub = g.subgraph([c, d]) + self.assertEqual(0, ub.number_of_edges()) + + # This ensures that c and d do not start executing until after b. + self.assertTrue(g.has_edge(b, c)) + self.assertTrue(g.has_edge(b, d)) + + def test_unordered_nested(self): + a, b, c, d = test_utils.make_many(4) + flo = uf.Flow("test") + flo.add(a, b) + flo2 = lf.Flow("test2") + flo2.add(c, d) + flo.add(flo2) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(4, len(g)) + for n in [a, b]: + self.assertFalse(g.has_edge(n, c)) + self.assertFalse(g.has_edge(n, d)) + self.assertFalse(g.has_edge(d, c)) + self.assertTrue(g.has_edge(c, d)) + self.assertEqual(g.get_edge_data(c, d), {'invariant': True}) + + ub = g.subgraph([a, b]) + self.assertEqual(0, ub.number_of_edges()) + lb = g.subgraph([c, d]) + self.assertEqual(1, lb.number_of_edges()) + + def test_unordered_nested_in_linear(self): + a, b, c, d = test_utils.make_many(4) + flo = lf.Flow('lt').add( + a, + uf.Flow('ut').add(b, c), + d) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(4, len(g)) + self.assertItemsEqual(g.edges(), [ + (a, b), + (a, c), + (b, d), + (c, d) + ]) + + def test_graph(self): + a, b, c, d = test_utils.make_many(4) + flo = gf.Flow("test") + flo.add(a, b, c, d) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(4, len(g)) + self.assertEqual(0, g.number_of_edges()) + + def test_graph_nested(self): + a, b, c, d, e, f, g = test_utils.make_many(7) + flo = gf.Flow("test") + flo.add(a, b, c, d) + + flo2 = lf.Flow('test2') + flo2.add(e, f, g) + flo.add(flo2) + + compilation = compiler.PatternCompiler().compile(flo) + graph = compilation.execution_graph + self.assertEqual(7, len(graph)) + self.assertItemsEqual(graph.edges(data=True), [ + (e, f, {'invariant': True}), + (f, g, {'invariant': True}) + ]) + + def test_graph_nested_graph(self): + a, b, c, d, e, f, g = test_utils.make_many(7) + flo = gf.Flow("test") + flo.add(a, b, c, d) + + flo2 = gf.Flow('test2') + flo2.add(e, f, g) + flo.add(flo2) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(7, len(g)) + self.assertEqual(0, g.number_of_edges()) + + def test_graph_links(self): + a, b, c, d = test_utils.make_many(4) + flo = gf.Flow("test") + flo.add(a, b, c, d) + flo.link(a, b) + flo.link(b, c) + flo.link(c, d) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(4, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (a, b, {'manual': True}), + (b, c, {'manual': True}), + (c, d, {'manual': True}), + ]) + self.assertItemsEqual([a], g.no_predecessors_iter()) + self.assertItemsEqual([d], g.no_successors_iter()) + + def test_graph_dependencies(self): + a = test_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) + b = test_utils.ProvidesRequiresTask('b', provides=[], requires=['x']) + flo = gf.Flow("test").add(a, b) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(2, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (a, b, {'reasons': set(['x'])}) + ]) + self.assertItemsEqual([a], g.no_predecessors_iter()) + self.assertItemsEqual([b], g.no_successors_iter()) + + def test_graph_nested_requires(self): + a = test_utils.ProvidesRequiresTask('a', provides=['x'], requires=[]) + b = test_utils.ProvidesRequiresTask('b', provides=[], requires=[]) + c = test_utils.ProvidesRequiresTask('c', provides=[], requires=['x']) + flo = gf.Flow("test").add( + a, + lf.Flow("test2").add(b, c) + ) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(3, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (a, c, {'reasons': set(['x'])}), + (b, c, {'invariant': True}) + ]) + self.assertItemsEqual([a, b], g.no_predecessors_iter()) + self.assertItemsEqual([c], g.no_successors_iter()) + + def test_graph_nested_provides(self): + a = test_utils.ProvidesRequiresTask('a', provides=[], requires=['x']) + b = test_utils.ProvidesRequiresTask('b', provides=['x'], requires=[]) + c = test_utils.ProvidesRequiresTask('c', provides=[], requires=[]) + flo = gf.Flow("test").add( + a, + lf.Flow("test2").add(b, c) + ) + + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(3, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (b, c, {'invariant': True}), + (b, a, {'reasons': set(['x'])}) + ]) + self.assertItemsEqual([b], g.no_predecessors_iter()) + self.assertItemsEqual([a, c], g.no_successors_iter()) + + def test_checks_for_dups(self): + flo = gf.Flow("test").add( + test_utils.DummyTask(name="a"), + test_utils.DummyTask(name="a") + ) + self.assertRaisesRegexp(exc.Duplicate, + '^Atoms with duplicate names', + compiler.PatternCompiler().compile, flo) + + def test_checks_for_dups_globally(self): + flo = gf.Flow("test").add( + gf.Flow("int1").add(test_utils.DummyTask(name="a")), + gf.Flow("int2").add(test_utils.DummyTask(name="a"))) + self.assertRaisesRegexp(exc.Duplicate, + '^Atoms with duplicate names', + compiler.PatternCompiler().compile, flo) + + def test_retry_in_linear_flow(self): + flo = lf.Flow("test", retry.AlwaysRevert("c")) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(1, len(g)) + self.assertEqual(0, g.number_of_edges()) + + def test_retry_in_unordered_flow(self): + flo = uf.Flow("test", retry.AlwaysRevert("c")) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(1, len(g)) + self.assertEqual(0, g.number_of_edges()) + + def test_retry_in_graph_flow(self): + flo = gf.Flow("test", retry.AlwaysRevert("c")) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(1, len(g)) + self.assertEqual(0, g.number_of_edges()) + + def test_retry_in_nested_flows(self): + c1 = retry.AlwaysRevert("c1") + c2 = retry.AlwaysRevert("c2") + flo = lf.Flow("test", c1).add(lf.Flow("test2", c2)) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + + self.assertEqual(2, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (c1, c2, {'retry': True}) + ]) + self.assertIs(c1, g.node[c2]['retry']) + self.assertItemsEqual([c1], g.no_predecessors_iter()) + self.assertItemsEqual([c2], g.no_successors_iter()) + + def test_retry_in_linear_flow_with_tasks(self): + c = retry.AlwaysRevert("c") + a, b = test_utils.make_many(2) + flo = lf.Flow("test", c).add(a, b) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + + self.assertEqual(3, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (a, b, {'invariant': True}), + (c, a, {'retry': True}) + ]) + + self.assertItemsEqual([c], g.no_predecessors_iter()) + self.assertItemsEqual([b], g.no_successors_iter()) + self.assertIs(c, g.node[a]['retry']) + self.assertIs(c, g.node[b]['retry']) + + def test_retry_in_unordered_flow_with_tasks(self): + c = retry.AlwaysRevert("c") + a, b = test_utils.make_many(2) + flo = uf.Flow("test", c).add(a, b) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + + self.assertEqual(3, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (c, a, {'retry': True}), + (c, b, {'retry': True}) + ]) + + self.assertItemsEqual([c], g.no_predecessors_iter()) + self.assertItemsEqual([a, b], g.no_successors_iter()) + self.assertIs(c, g.node[a]['retry']) + self.assertIs(c, g.node[b]['retry']) + + def test_retry_in_graph_flow_with_tasks(self): + r = retry.AlwaysRevert("cp") + a, b, c = test_utils.make_many(3) + flo = gf.Flow("test", r).add(a, b, c).link(b, c) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + self.assertEqual(4, len(g)) + + self.assertItemsEqual(g.edges(data=True), [ + (r, a, {'retry': True}), + (r, b, {'retry': True}), + (b, c, {'manual': True}) + ]) + + self.assertItemsEqual([r], g.no_predecessors_iter()) + self.assertItemsEqual([a, c], g.no_successors_iter()) + self.assertIs(r, g.node[a]['retry']) + self.assertIs(r, g.node[b]['retry']) + self.assertIs(r, g.node[c]['retry']) + + def test_retries_hierarchy(self): + c1 = retry.AlwaysRevert("cp1") + c2 = retry.AlwaysRevert("cp2") + a, b, c, d = test_utils.make_many(4) + flo = lf.Flow("test", c1).add( + a, + lf.Flow("test", c2).add(b, c), + d) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + + self.assertEqual(6, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (c1, a, {'retry': True}), + (a, c2, {'invariant': True}), + (c2, b, {'retry': True}), + (b, c, {'invariant': True}), + (c, d, {'invariant': True}), + ]) + self.assertIs(c1, g.node[a]['retry']) + self.assertIs(c1, g.node[d]['retry']) + self.assertIs(c2, g.node[b]['retry']) + self.assertIs(c2, g.node[c]['retry']) + self.assertIs(c1, g.node[c2]['retry']) + self.assertIs(None, g.node[c1].get('retry')) + + def test_retry_subflows_hierarchy(self): + c1 = retry.AlwaysRevert("cp1") + a, b, c, d = test_utils.make_many(4) + flo = lf.Flow("test", c1).add( + a, + lf.Flow("test").add(b, c), + d) + compilation = compiler.PatternCompiler().compile(flo) + g = compilation.execution_graph + + self.assertEqual(5, len(g)) + self.assertItemsEqual(g.edges(data=True), [ + (c1, a, {'retry': True}), + (a, b, {'invariant': True}), + (b, c, {'invariant': True}), + (c, d, {'invariant': True}), + ]) + self.assertIs(c1, g.node[a]['retry']) + self.assertIs(c1, g.node[d]['retry']) + self.assertIs(c1, g.node[b]['retry']) + self.assertIs(c1, g.node[c]['retry']) + self.assertIs(None, g.node[c1].get('retry')) diff --git a/taskflow/tests/unit/action_engine/test_runner.py b/taskflow/tests/unit/action_engine/test_runner.py new file mode 100644 index 00000000..2e18f6b6 --- /dev/null +++ b/taskflow/tests/unit/action_engine/test_runner.py @@ -0,0 +1,307 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from taskflow.engines.action_engine import compiler +from taskflow.engines.action_engine import executor +from taskflow.engines.action_engine import runner +from taskflow.engines.action_engine import runtime +from taskflow import exceptions as excp +from taskflow.patterns import linear_flow as lf +from taskflow import states as st +from taskflow import storage +from taskflow import test +from taskflow.tests import utils as test_utils +from taskflow.types import fsm +from taskflow.utils import misc +from taskflow.utils import persistence_utils as pu + + +class _RunnerTestMixin(object): + def _make_runtime(self, flow, initial_state=None): + compilation = compiler.PatternCompiler().compile(flow) + flow_detail = pu.create_flow_detail(flow) + store = storage.SingleThreadedStorage(flow_detail) + # This ensures the tasks exist in storage... + for task in compilation.execution_graph: + store.ensure_task(task.name) + if initial_state: + store.set_flow_state(initial_state) + task_notifier = misc.Notifier() + task_executor = executor.SerialTaskExecutor() + task_executor.start() + self.addCleanup(task_executor.stop) + return runtime.Runtime(compilation, store, + task_notifier, task_executor) + + +class RunnerTest(test.TestCase, _RunnerTestMixin): + def test_running(self): + flow = lf.Flow("root") + flow.add(*test_utils.make_many(1)) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.runnable()) + + rt = self._make_runtime(flow, initial_state=st.SUSPENDED) + self.assertFalse(rt.runner.runnable()) + + def test_run_iterations(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.runnable()) + + it = rt.runner.run_iter() + state, failures = six.next(it) + self.assertEqual(st.RESUMING, state) + self.assertEqual(0, len(failures)) + + state, failures = six.next(it) + self.assertEqual(st.SCHEDULING, state) + self.assertEqual(0, len(failures)) + + state, failures = six.next(it) + self.assertEqual(st.WAITING, state) + self.assertEqual(0, len(failures)) + + state, failures = six.next(it) + self.assertEqual(st.ANALYZING, state) + self.assertEqual(0, len(failures)) + + state, failures = six.next(it) + self.assertEqual(st.SUCCESS, state) + self.assertEqual(0, len(failures)) + + self.assertRaises(StopIteration, six.next, it) + + def test_run_iterations_reverted(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskWithFailure) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.runnable()) + + transitions = list(rt.runner.run_iter()) + state, failures = transitions[-1] + self.assertEqual(st.REVERTED, state) + self.assertEqual([], failures) + + self.assertEqual(st.REVERTED, rt.storage.get_atom_state(tasks[0].name)) + + def test_run_iterations_failure(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.NastyFailingTask) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.runnable()) + + transitions = list(rt.runner.run_iter()) + state, failures = transitions[-1] + self.assertEqual(st.FAILURE, state) + self.assertEqual(1, len(failures)) + failure = failures[0] + self.assertTrue(failure.check(RuntimeError)) + + self.assertEqual(st.FAILURE, rt.storage.get_atom_state(tasks[0].name)) + + def test_run_iterations_suspended(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 2, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.runnable()) + + transitions = [] + for state, failures in rt.runner.run_iter(): + transitions.append((state, failures)) + if state == st.ANALYZING: + rt.storage.set_flow_state(st.SUSPENDED) + state, failures = transitions[-1] + self.assertEqual(st.SUSPENDED, state) + self.assertEqual([], failures) + + self.assertEqual(st.SUCCESS, rt.storage.get_atom_state(tasks[0].name)) + self.assertEqual(st.PENDING, rt.storage.get_atom_state(tasks[1].name)) + + def test_run_iterations_suspended_failure(self): + flow = lf.Flow("root") + sad_tasks = test_utils.make_many( + 1, task_cls=test_utils.NastyFailingTask) + flow.add(*sad_tasks) + happy_tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskNoRequiresNoReturns, offset=1) + flow.add(*happy_tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + self.assertTrue(rt.runner.runnable()) + + transitions = [] + for state, failures in rt.runner.run_iter(): + transitions.append((state, failures)) + if state == st.ANALYZING: + rt.storage.set_flow_state(st.SUSPENDED) + state, failures = transitions[-1] + self.assertEqual(st.SUSPENDED, state) + self.assertEqual([], failures) + + self.assertEqual(st.PENDING, + rt.storage.get_atom_state(happy_tasks[0].name)) + self.assertEqual(st.FAILURE, + rt.storage.get_atom_state(sad_tasks[0].name)) + + +class RunnerBuilderTest(test.TestCase, _RunnerTestMixin): + def test_builder_manual_process(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + self.assertTrue(rt.runner.builder.runnable()) + self.assertRaises(fsm.NotInitialized, machine.process_event, 'poke') + + # Should now be pending... + self.assertEqual(st.PENDING, rt.storage.get_atom_state(tasks[0].name)) + + machine.initialize() + self.assertEqual(runner._UNDEFINED, machine.current_state) + self.assertFalse(machine.terminated) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + last_state = machine.current_state + + reaction, terminal = machine.process_event('start') + self.assertFalse(terminal) + self.assertIsNotNone(reaction) + self.assertEqual(st.RESUMING, machine.current_state) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + + last_state = machine.current_state + cb, args, kwargs = reaction + next_event = cb(last_state, machine.current_state, + 'start', *args, **kwargs) + reaction, terminal = machine.process_event(next_event) + self.assertFalse(terminal) + self.assertIsNotNone(reaction) + self.assertEqual(st.SCHEDULING, machine.current_state) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + + last_state = machine.current_state + cb, args, kwargs = reaction + next_event = cb(last_state, machine.current_state, + next_event, *args, **kwargs) + reaction, terminal = machine.process_event(next_event) + self.assertFalse(terminal) + self.assertEqual(st.WAITING, machine.current_state) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + + # Should now be running... + self.assertEqual(st.RUNNING, rt.storage.get_atom_state(tasks[0].name)) + + last_state = machine.current_state + cb, args, kwargs = reaction + next_event = cb(last_state, machine.current_state, + next_event, *args, **kwargs) + reaction, terminal = machine.process_event(next_event) + self.assertFalse(terminal) + self.assertIsNotNone(reaction) + self.assertEqual(st.ANALYZING, machine.current_state) + self.assertRaises(excp.NotFound, machine.process_event, 'poke') + + last_state = machine.current_state + cb, args, kwargs = reaction + next_event = cb(last_state, machine.current_state, + next_event, *args, **kwargs) + reaction, terminal = machine.process_event(next_event) + self.assertFalse(terminal) + self.assertEqual(runner._GAME_OVER, machine.current_state) + + # Should now be done... + self.assertEqual(st.SUCCESS, rt.storage.get_atom_state(tasks[0].name)) + + def test_builder_automatic_process(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 1, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + self.assertTrue(rt.runner.builder.runnable()) + + transitions = list(machine.run_iter('start')) + self.assertEqual((runner._UNDEFINED, st.RESUMING), transitions[0]) + self.assertEqual((runner._GAME_OVER, st.SUCCESS), transitions[-1]) + self.assertEqual(st.SUCCESS, rt.storage.get_atom_state(tasks[0].name)) + + def test_builder_automatic_process_failure(self): + flow = lf.Flow("root") + tasks = test_utils.make_many(1, task_cls=test_utils.NastyFailingTask) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + self.assertTrue(rt.runner.builder.runnable()) + + transitions = list(machine.run_iter('start')) + self.assertEqual((runner._GAME_OVER, st.FAILURE), transitions[-1]) + self.assertEqual(1, len(memory.failures)) + + def test_builder_automatic_process_reverted(self): + flow = lf.Flow("root") + tasks = test_utils.make_many(1, task_cls=test_utils.TaskWithFailure) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + self.assertTrue(rt.runner.builder.runnable()) + + transitions = list(machine.run_iter('start')) + self.assertEqual((runner._GAME_OVER, st.REVERTED), transitions[-1]) + self.assertEqual(st.REVERTED, rt.storage.get_atom_state(tasks[0].name)) + + def test_builder_expected_transition_occurrences(self): + flow = lf.Flow("root") + tasks = test_utils.make_many( + 10, task_cls=test_utils.TaskNoRequiresNoReturns) + flow.add(*tasks) + + rt = self._make_runtime(flow, initial_state=st.RUNNING) + machine, memory = rt.runner.builder.build() + transitions = list(machine.run_iter('start')) + + occurrences = dict((t, transitions.count(t)) for t in transitions) + self.assertEqual(10, occurrences.get((st.SCHEDULING, st.WAITING))) + self.assertEqual(10, occurrences.get((st.WAITING, st.ANALYZING))) + self.assertEqual(9, occurrences.get((st.ANALYZING, st.SCHEDULING))) + self.assertEqual(1, occurrences.get((runner._GAME_OVER, st.SUCCESS))) + self.assertEqual(1, occurrences.get((runner._UNDEFINED, st.RESUMING))) + + self.assertEqual(0, len(memory.next_nodes)) + self.assertEqual(0, len(memory.not_done)) + self.assertEqual(0, len(memory.failures)) diff --git a/taskflow/tests/unit/jobs/__init__.py b/taskflow/tests/unit/jobs/__init__.py index da9e7d90..e69de29b 100644 --- a/taskflow/tests/unit/jobs/__init__.py +++ b/taskflow/tests/unit/jobs/__init__.py @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2013 Yahoo! Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. diff --git a/taskflow/tests/unit/jobs/base.py b/taskflow/tests/unit/jobs/base.py index c75f424c..a178a8af 100644 --- a/taskflow/tests/unit/jobs/base.py +++ b/taskflow/tests/unit/jobs/base.py @@ -15,11 +15,11 @@ # under the License. import contextlib -import mock import threading import time from kazoo.recipe import watchers +import mock from taskflow import exceptions as excp from taskflow.openstack.common import uuidutils diff --git a/taskflow/tests/unit/jobs/test_zk_job.py b/taskflow/tests/unit/jobs/test_zk_job.py index 3d5f8228..7268a1a4 100644 --- a/taskflow/tests/unit/jobs/test_zk_job.py +++ b/taskflow/tests/unit/jobs/test_zk_job.py @@ -16,16 +16,14 @@ import six import testtools - from zake import fake_client from zake import utils as zake_utils from taskflow.jobs.backends import impl_zookeeper -from taskflow import states -from taskflow import test - from taskflow.openstack.common import jsonutils from taskflow.openstack.common import uuidutils +from taskflow import states +from taskflow import test from taskflow.tests.unit.jobs import base from taskflow.tests import utils as test_utils from taskflow.utils import kazoo_utils diff --git a/taskflow/tests/unit/patterns/test_graph_flow.py b/taskflow/tests/unit/patterns/test_graph_flow.py index 2a95ad2d..c7dad38e 100644 --- a/taskflow/tests/unit/patterns/test_graph_flow.py +++ b/taskflow/tests/unit/patterns/test_graph_flow.py @@ -17,7 +17,6 @@ from taskflow import exceptions as exc from taskflow.patterns import graph_flow as gf from taskflow import retry - from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/patterns/test_linear_flow.py b/taskflow/tests/unit/patterns/test_linear_flow.py index 7ff88860..a0dbd0d7 100644 --- a/taskflow/tests/unit/patterns/test_linear_flow.py +++ b/taskflow/tests/unit/patterns/test_linear_flow.py @@ -17,7 +17,6 @@ from taskflow import exceptions as exc from taskflow.patterns import linear_flow as lf from taskflow import retry - from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/patterns/test_unordered_flow.py b/taskflow/tests/unit/patterns/test_unordered_flow.py index 4759f8d2..a4043fe2 100644 --- a/taskflow/tests/unit/patterns/test_unordered_flow.py +++ b/taskflow/tests/unit/patterns/test_unordered_flow.py @@ -17,7 +17,6 @@ from taskflow import exceptions as exc from taskflow.patterns import unordered_flow as uf from taskflow import retry - from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/persistence/test_sql_persistence.py b/taskflow/tests/unit/persistence/test_sql_persistence.py index 35a36db6..b48f84a8 100644 --- a/taskflow/tests/unit/persistence/test_sql_persistence.py +++ b/taskflow/tests/unit/persistence/test_sql_persistence.py @@ -54,9 +54,7 @@ from taskflow.utils import lock_utils def _get_connect_string(backend, user, passwd, database=None, variant=None): - """Try to get a connection with a very specific set of values, if we get - these then we'll run the tests, otherwise they are skipped. - """ + """Forms a sqlalchemy database uri string for the given values.""" if backend == "postgres": if not variant: variant = 'psycopg2' diff --git a/taskflow/tests/unit/persistence/test_zk_persistence.py b/taskflow/tests/unit/persistence/test_zk_persistence.py index 414db09b..609de21f 100644 --- a/taskflow/tests/unit/persistence/test_zk_persistence.py +++ b/taskflow/tests/unit/persistence/test_zk_persistence.py @@ -16,30 +16,45 @@ import contextlib +from kazoo import exceptions as kazoo_exceptions import testtools from zake import fake_client +from taskflow import exceptions as exc from taskflow.openstack.common import uuidutils from taskflow.persistence import backends from taskflow.persistence.backends import impl_zookeeper from taskflow import test from taskflow.tests.unit.persistence import base from taskflow.tests import utils as test_utils +from taskflow.utils import kazoo_utils TEST_PATH_TPL = '/taskflow/persistence-test/%s' _ZOOKEEPER_AVAILABLE = test_utils.zookeeper_available( impl_zookeeper.MIN_ZK_VERSION) +def clean_backend(backend, conf): + with contextlib.closing(backend.get_connection()) as conn: + try: + conn.clear_all() + except exc.NotFound: + pass + client = kazoo_utils.make_client(conf) + client.start() + try: + client.delete(conf['path'], recursive=True) + except kazoo_exceptions.NoNodeError: + pass + finally: + kazoo_utils.finalize_client(client) + + @testtools.skipIf(not _ZOOKEEPER_AVAILABLE, 'zookeeper is not available') class ZkPersistenceTest(test.TestCase, base.PersistenceTestMixin): def _get_connection(self): return self.backend.get_connection() - def _clear_all(self): - with contextlib.closing(self._get_connection()) as conn: - conn.clear_all() - def setUp(self): super(ZkPersistenceTest, self).setUp() conf = test_utils.ZK_TEST_CONFIG.copy() @@ -48,13 +63,14 @@ class ZkPersistenceTest(test.TestCase, base.PersistenceTestMixin): conf['path'] = TEST_PATH_TPL % (uuidutils.generate_uuid()) try: self.backend = impl_zookeeper.ZkBackend(conf) - self.addCleanup(self.backend.close) except Exception as e: self.skipTest("Failed creating backend created from configuration" " %s due to %s" % (conf, e)) - with contextlib.closing(self._get_connection()) as conn: - conn.upgrade() - self.addCleanup(self._clear_all) + else: + self.addCleanup(self.backend.close) + self.addCleanup(clean_backend, self.backend, conf) + with contextlib.closing(self.backend.get_connection()) as conn: + conn.upgrade() def test_zk_persistence_entry_point(self): conf = {'connection': 'zookeeper:'} diff --git a/taskflow/tests/unit/test_arguments_passing.py b/taskflow/tests/unit/test_arguments_passing.py index 0281c1ff..5e9fc3a8 100644 --- a/taskflow/tests/unit/test_arguments_passing.py +++ b/taskflow/tests/unit/test_arguments_passing.py @@ -15,7 +15,6 @@ # under the License. import taskflow.engines - from taskflow import exceptions as exc from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/test_duration.py b/taskflow/tests/unit/test_duration.py index 67d240cb..e1588eb2 100644 --- a/taskflow/tests/unit/test_duration.py +++ b/taskflow/tests/unit/test_duration.py @@ -15,17 +15,17 @@ # under the License. import contextlib -import mock import time -from taskflow import task -from taskflow import test +import mock import taskflow.engines from taskflow import exceptions as exc from taskflow.listeners import timing from taskflow.patterns import linear_flow as lf from taskflow.persistence.backends import impl_memory +from taskflow import task +from taskflow import test from taskflow.tests import utils as t_utils from taskflow.utils import persistence_utils as p_utils diff --git a/taskflow/tests/unit/test_engine_helpers.py b/taskflow/tests/unit/test_engine_helpers.py index da0a276b..26fedb1b 100644 --- a/taskflow/tests/unit/test_engine_helpers.py +++ b/taskflow/tests/unit/test_engine_helpers.py @@ -16,13 +16,26 @@ import mock +import taskflow.engines from taskflow import exceptions as exc from taskflow.patterns import linear_flow from taskflow import test from taskflow.tests import utils as test_utils from taskflow.utils import persistence_utils as p_utils -import taskflow.engines + +class EngineLoadingTestCase(test.TestCase): + def test_default_load(self): + f = linear_flow.Flow('test') + f.add(test_utils.TaskOneReturn("run-1")) + e = taskflow.engines.load(f) + self.assertIsNotNone(e) + + def test_unknown_load(self): + f = linear_flow.Flow('test') + f.add(test_utils.TaskOneReturn("run-1")) + self.assertRaises(exc.NotFound, taskflow.engines.load, f, + engine_conf='not_really_any_engine') class EngineLoadingTestCase(test.TestCase): diff --git a/taskflow/tests/unit/test_action_engine.py b/taskflow/tests/unit/test_engines.py similarity index 99% rename from taskflow/tests/unit/test_action_engine.py rename to taskflow/tests/unit/test_engines.py index 533b5eef..d2fb0d43 100644 --- a/taskflow/tests/unit/test_action_engine.py +++ b/taskflow/tests/unit/test_engines.py @@ -15,28 +15,25 @@ # under the License. import contextlib -import testtools import threading from concurrent import futures - -from taskflow.patterns import graph_flow as gf -from taskflow.patterns import linear_flow as lf -from taskflow.patterns import unordered_flow as uf +import testtools import taskflow.engines - from taskflow.engines.action_engine import engine as eng from taskflow.engines.worker_based import engine as w_eng from taskflow.engines.worker_based import worker as wkr from taskflow import exceptions as exc +from taskflow.patterns import graph_flow as gf +from taskflow.patterns import linear_flow as lf +from taskflow.patterns import unordered_flow as uf from taskflow.persistence import logbook from taskflow import states from taskflow import task from taskflow import test from taskflow.tests import utils from taskflow.types import graph as gr - from taskflow.utils import eventlet_utils as eu from taskflow.utils import misc from taskflow.utils import persistence_utils as p_utils diff --git a/taskflow/tests/unit/test_flow_dependencies.py b/taskflow/tests/unit/test_flow_dependencies.py index 7700499d..3ddb95d9 100644 --- a/taskflow/tests/unit/test_flow_dependencies.py +++ b/taskflow/tests/unit/test_flow_dependencies.py @@ -14,11 +14,10 @@ # License for the specific language governing permissions and limitations # under the License. +from taskflow import exceptions from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf - -from taskflow import exceptions from taskflow import retry from taskflow import test from taskflow.tests import utils diff --git a/taskflow/tests/unit/test_functor_task.py b/taskflow/tests/unit/test_functor_task.py index 53db4c96..676c2c8b 100644 --- a/taskflow/tests/unit/test_functor_task.py +++ b/taskflow/tests/unit/test_functor_task.py @@ -15,7 +15,6 @@ # under the License. import taskflow.engines - from taskflow.patterns import linear_flow from taskflow import task as base from taskflow import test diff --git a/taskflow/tests/unit/test_green_executor.py b/taskflow/tests/unit/test_green_executor.py index 3524a9c2..eae523dc 100644 --- a/taskflow/tests/unit/test_green_executor.py +++ b/taskflow/tests/unit/test_green_executor.py @@ -31,7 +31,7 @@ class GreenExecutorTest(test.TestCase): called[name] += 1 for i in range(0, amount): - yield functools.partial(store_call, name=int(i)) + yield functools.partial(store_call, name=i) def test_func_calls(self): called = collections.defaultdict(int) @@ -44,20 +44,21 @@ class GreenExecutorTest(test.TestCase): self.assertEqual(1, called[1]) def test_no_construction(self): - self.assertRaises(AssertionError, eu.GreenExecutor, 0) - self.assertRaises(AssertionError, eu.GreenExecutor, -1) - self.assertRaises(AssertionError, eu.GreenExecutor, "-1") + self.assertRaises(ValueError, eu.GreenExecutor, 0) + self.assertRaises(ValueError, eu.GreenExecutor, -1) + self.assertRaises(ValueError, eu.GreenExecutor, "-1") def test_result_callback(self): called = collections.defaultdict(int) - def call_back(future): + def callback(future): called[future] += 1 funcs = list(self.make_funcs(called, 1)) with eu.GreenExecutor(2) as e: - f = e.submit(funcs[0]) - f.add_done_callback(call_back) + for func in funcs: + f = e.submit(func) + f.add_done_callback(callback) self.assertEqual(2, len(called)) @@ -87,6 +88,27 @@ class GreenExecutorTest(test.TestCase): result = fs[i].result() self.assertEqual(i, result) + def test_called_restricted_size(self): + called = collections.defaultdict(int) + + with eu.GreenExecutor(1) as e: + for f in self.make_funcs(called, 100): + e.submit(f) + self.assertEqual(99, e.amount_delayed) + + self.assertFalse(e.alive) + self.assertEqual(100, len(called)) + self.assertGreaterEqual(1, e.workers_created) + self.assertEqual(0, e.amount_delayed) + + def test_shutdown_twice(self): + e = eu.GreenExecutor(1) + self.assertTrue(e.alive) + e.shutdown() + self.assertFalse(e.alive) + e.shutdown() + self.assertFalse(e.alive) + def test_func_cancellation(self): called = collections.defaultdict(int) diff --git a/taskflow/tests/unit/test_progress.py b/taskflow/tests/unit/test_progress.py index f1fa15d3..f37d1132 100644 --- a/taskflow/tests/unit/test_progress.py +++ b/taskflow/tests/unit/test_progress.py @@ -16,12 +16,11 @@ import contextlib -from taskflow import task -from taskflow import test - import taskflow.engines from taskflow.patterns import linear_flow as lf from taskflow.persistence.backends import impl_memory +from taskflow import task +from taskflow import test from taskflow.utils import persistence_utils as p_utils diff --git a/taskflow/tests/unit/test_retries.py b/taskflow/tests/unit/test_retries.py index 6953b376..71ea70cb 100644 --- a/taskflow/tests/unit/test_retries.py +++ b/taskflow/tests/unit/test_retries.py @@ -14,13 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. +import taskflow.engines +from taskflow import exceptions as exc from taskflow.patterns import graph_flow as gf from taskflow.patterns import linear_flow as lf from taskflow.patterns import unordered_flow as uf - -import taskflow.engines - -from taskflow import exceptions as exc from taskflow import retry from taskflow import states as st from taskflow import test diff --git a/taskflow/tests/unit/test_types.py b/taskflow/tests/unit/test_types.py new file mode 100644 index 00000000..141cdfc8 --- /dev/null +++ b/taskflow/tests/unit/test_types.py @@ -0,0 +1,306 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +import networkx as nx +import six + +from taskflow import exceptions as excp +from taskflow import test +from taskflow.types import fsm +from taskflow.types import graph +from taskflow.types import timing as tt +from taskflow.types import tree + + +class GraphTest(test.TestCase): + def test_no_successors_no_predecessors(self): + g = graph.DiGraph() + g.add_node("a") + g.add_node("b") + g.add_node("c") + g.add_edge("b", "c") + self.assertEqual(set(['a', 'b']), + set(g.no_predecessors_iter())) + self.assertEqual(set(['a', 'c']), + set(g.no_successors_iter())) + + def test_directed(self): + g = graph.DiGraph() + g.add_node("a") + g.add_node("b") + g.add_edge("a", "b") + self.assertTrue(g.is_directed_acyclic()) + g.add_edge("b", "a") + self.assertFalse(g.is_directed_acyclic()) + + def test_frozen(self): + g = graph.DiGraph() + self.assertFalse(g.frozen) + g.add_node("b") + g.freeze() + self.assertRaises(nx.NetworkXError, g.add_node, "c") + + +class TreeTest(test.TestCase): + def _make_species(self): + # This is the following tree: + # + # animal + # |__mammal + # | |__horse + # | |__primate + # | |__monkey + # | |__human + # |__reptile + a = tree.Node("animal") + m = tree.Node("mammal") + r = tree.Node("reptile") + a.add(m) + a.add(r) + m.add(tree.Node("horse")) + p = tree.Node("primate") + m.add(p) + p.add(tree.Node("monkey")) + p.add(tree.Node("human")) + return a + + def test_path(self): + root = self._make_species() + human = root.find("human") + self.assertIsNotNone(human) + p = list([n.item for n in human.path_iter()]) + self.assertEqual(['human', 'primate', 'mammal', 'animal'], p) + + def test_empty(self): + root = tree.Node("josh") + self.assertTrue(root.empty()) + + def test_not_empty(self): + root = self._make_species() + self.assertFalse(root.empty()) + + def test_node_count(self): + root = self._make_species() + self.assertEqual(7, 1 + root.child_count(only_direct=False)) + + def test_index(self): + root = self._make_species() + self.assertEqual(0, root.index("mammal")) + self.assertEqual(1, root.index("reptile")) + + def test_contains(self): + root = self._make_species() + self.assertIn("monkey", root) + self.assertNotIn("bird", root) + + def test_freeze(self): + root = self._make_species() + root.freeze() + self.assertRaises(tree.FrozenNode, root.add, "bird") + + def test_dfs_itr(self): + root = self._make_species() + things = list([n.item for n in root.dfs_iter(include_self=True)]) + self.assertEqual(set(['animal', 'reptile', 'mammal', 'horse', + 'primate', 'monkey', 'human']), set(things)) + + +class StopWatchTest(test.TestCase): + def test_no_states(self): + watch = tt.StopWatch() + self.assertRaises(RuntimeError, watch.stop) + self.assertRaises(RuntimeError, watch.resume) + + def test_expiry(self): + watch = tt.StopWatch(0.1) + watch.start() + time.sleep(0.2) + self.assertTrue(watch.expired()) + + def test_no_expiry(self): + watch = tt.StopWatch(0.1) + watch.start() + self.assertFalse(watch.expired()) + + def test_elapsed(self): + watch = tt.StopWatch() + watch.start() + time.sleep(0.2) + # NOTE(harlowja): Allow for a slight variation by using 0.19. + self.assertGreaterEqual(0.19, watch.elapsed()) + + def test_pause_resume(self): + watch = tt.StopWatch() + watch.start() + time.sleep(0.05) + watch.stop() + elapsed = watch.elapsed() + time.sleep(0.05) + self.assertAlmostEqual(elapsed, watch.elapsed()) + watch.resume() + self.assertNotEqual(elapsed, watch.elapsed()) + + def test_context_manager(self): + with tt.StopWatch() as watch: + time.sleep(0.05) + self.assertGreater(0.01, watch.elapsed()) + + +class FSMTest(test.TestCase): + def setUp(self): + super(FSMTest, self).setUp() + # NOTE(harlowja): this state machine will never stop if run() is used. + self.jumper = fsm.FSM("down") + self.jumper.add_state('up') + self.jumper.add_state('down') + self.jumper.add_transition('down', 'up', 'jump') + self.jumper.add_transition('up', 'down', 'fall') + self.jumper.add_reaction('up', 'jump', lambda *args: 'fall') + self.jumper.add_reaction('down', 'fall', lambda *args: 'jump') + + def test_bad_start_state(self): + m = fsm.FSM('unknown') + self.assertRaises(excp.NotFound, m.run, 'unknown') + + def test_contains(self): + m = fsm.FSM('unknown') + self.assertNotIn('unknown', m) + m.add_state('unknown') + self.assertIn('unknown', m) + + def test_duplicate_state(self): + m = fsm.FSM('unknown') + m.add_state('unknown') + self.assertRaises(excp.Duplicate, m.add_state, 'unknown') + + def test_duplicate_reaction(self): + self.assertRaises( + # Currently duplicate reactions are not allowed... + excp.Duplicate, + self.jumper.add_reaction, 'down', 'fall', lambda *args: 'skate') + + def test_bad_transition(self): + m = fsm.FSM('unknown') + m.add_state('unknown') + m.add_state('fire') + self.assertRaises(excp.NotFound, m.add_transition, + 'unknown', 'something', 'boom') + self.assertRaises(excp.NotFound, m.add_transition, + 'something', 'unknown', 'boom') + + def test_bad_reaction(self): + m = fsm.FSM('unknown') + m.add_state('unknown') + self.assertRaises(excp.NotFound, m.add_reaction, 'something', 'boom', + lambda *args: 'cough') + + def test_run(self): + m = fsm.FSM('down') + m.add_state('down') + m.add_state('up') + m.add_state('broken', terminal=True) + m.add_transition('down', 'up', 'jump') + m.add_transition('up', 'broken', 'hit-wall') + m.add_reaction('up', 'jump', lambda *args: 'hit-wall') + self.assertEqual(['broken', 'down', 'up'], sorted(m.states)) + self.assertEqual(2, m.events) + m.initialize() + self.assertEqual('down', m.current_state) + self.assertFalse(m.terminated) + m.run('jump') + self.assertTrue(m.terminated) + self.assertEqual('broken', m.current_state) + self.assertRaises(excp.InvalidState, m.run, 'jump', initialize=False) + + def test_on_enter_on_exit(self): + enter_transitions = [] + exit_transitions = [] + + def on_exit(state, event): + exit_transitions.append((state, event)) + + def on_enter(state, event): + enter_transitions.append((state, event)) + + m = fsm.FSM('start') + m.add_state('start', on_exit=on_exit) + m.add_state('down', on_enter=on_enter, on_exit=on_exit) + m.add_state('up', on_enter=on_enter, on_exit=on_exit) + m.add_transition('start', 'down', 'beat') + m.add_transition('down', 'up', 'jump') + m.add_transition('up', 'down', 'fall') + + m.initialize() + m.process_event('beat') + m.process_event('jump') + m.process_event('fall') + self.assertEqual([('down', 'beat'), + ('up', 'jump'), ('down', 'fall')], enter_transitions) + self.assertEqual([('down', 'jump'), ('up', 'fall')], exit_transitions) + + def test_run_iter(self): + up_downs = [] + for (old_state, new_state) in self.jumper.run_iter('jump'): + up_downs.append((old_state, new_state)) + if len(up_downs) >= 3: + break + self.assertEqual([('down', 'up'), ('up', 'down'), ('down', 'up')], + up_downs) + self.assertFalse(self.jumper.terminated) + self.assertEqual('up', self.jumper.current_state) + self.jumper.process_event('fall') + self.assertEqual('down', self.jumper.current_state) + + def test_run_send(self): + up_downs = [] + it = self.jumper.run_iter('jump') + while True: + up_downs.append(it.send(None)) + if len(up_downs) >= 3: + it.close() + break + self.assertEqual('up', self.jumper.current_state) + self.assertFalse(self.jumper.terminated) + self.assertEqual([('down', 'up'), ('up', 'down'), ('down', 'up')], + up_downs) + self.assertRaises(StopIteration, six.next, it) + + def test_run_send_fail(self): + up_downs = [] + it = self.jumper.run_iter('jump') + up_downs.append(six.next(it)) + self.assertRaises(excp.NotFound, it.send, 'fail') + it.close() + self.assertEqual([('down', 'up')], up_downs) + + def test_not_initialized(self): + self.assertRaises(fsm.NotInitialized, + self.jumper.process_event, 'jump') + + def test_iter(self): + transitions = list(self.jumper) + self.assertEqual(2, len(transitions)) + self.assertIn(('up', 'fall', 'down'), transitions) + self.assertIn(('down', 'jump', 'up'), transitions) + + def test_invalid_callbacks(self): + m = fsm.FSM('working') + m.add_state('working') + m.add_state('broken') + self.assertRaises(AssertionError, m.add_state, 'b', on_enter=2) + self.assertRaises(AssertionError, m.add_state, 'b', on_exit=2) diff --git a/taskflow/tests/unit/test_utils.py b/taskflow/tests/unit/test_utils.py index 1d1ea336..d955514c 100644 --- a/taskflow/tests/unit/test_utils.py +++ b/taskflow/tests/unit/test_utils.py @@ -18,7 +18,9 @@ import collections import functools import inspect import sys -import time + +import six +import testtools from taskflow import states from taskflow import test @@ -112,17 +114,22 @@ class GetCallableNameTest(test.TestCase): def test_method(self): name = reflection.get_callable_name(Class.method) - self.assertEqual(name, '.'.join((__name__, 'method'))) + self.assertEqual(name, '.'.join((__name__, 'Class', 'method'))) def test_instance_method(self): name = reflection.get_callable_name(Class().method) self.assertEqual(name, '.'.join((__name__, 'Class', 'method'))) def test_static_method(self): - # NOTE(imelnikov): static method are just functions, class name - # is not recorded anywhere in them. name = reflection.get_callable_name(Class.static_method) - self.assertEqual(name, '.'.join((__name__, 'static_method'))) + if six.PY3: + self.assertEqual(name, + '.'.join((__name__, 'Class', 'static_method'))) + else: + # NOTE(imelnikov): static method are just functions, class name + # is not recorded anywhere in them. + self.assertEqual(name, + '.'.join((__name__, 'static_method'))) def test_class_method(self): name = reflection.get_callable_name(Class.class_method) @@ -142,6 +149,46 @@ class GetCallableNameTest(test.TestCase): '__call__'))) +# These extended/special case tests only work on python 3, due to python 2 +# being broken/incorrect with regard to these special cases... +@testtools.skipIf(not six.PY3, 'python 3.x is not currently available') +class GetCallableNameTestExtended(test.TestCase): + # Tests items in http://legacy.python.org/dev/peps/pep-3155/ + + class InnerCallableClass(object): + def __call__(self): + pass + + def test_inner_callable_class(self): + obj = self.InnerCallableClass() + name = reflection.get_callable_name(obj.__call__) + expected_name = '.'.join((__name__, 'GetCallableNameTestExtended', + 'InnerCallableClass', '__call__')) + self.assertEqual(expected_name, name) + + def test_inner_callable_function(self): + def a(): + + def b(): + pass + + return b + + name = reflection.get_callable_name(a()) + expected_name = '.'.join((__name__, 'GetCallableNameTestExtended', + 'test_inner_callable_function', '', + 'a', '', 'b')) + self.assertEqual(expected_name, name) + + def test_inner_class(self): + obj = self.InnerCallableClass() + name = reflection.get_callable_name(obj) + expected_name = '.'.join((__name__, + 'GetCallableNameTestExtended', + 'InnerCallableClass')) + self.assertEqual(expected_name, name) + + class NotifierTest(test.TestCase): def test_notify_called(self): @@ -494,45 +541,49 @@ class IsValidAttributeNameTestCase(test.TestCase): self.assertFalse(misc.is_valid_attribute_name('mañana')) -class StopWatchUtilsTest(test.TestCase): - def test_no_states(self): - watch = misc.StopWatch() - self.assertRaises(RuntimeError, watch.stop) - self.assertRaises(RuntimeError, watch.resume) +class UriParseTest(test.TestCase): + def test_parse(self): + url = "zookeeper://192.168.0.1:2181/a/b/?c=d" + parsed = misc.parse_uri(url) + self.assertEqual('zookeeper', parsed.scheme) + self.assertEqual(2181, parsed.port) + self.assertEqual('192.168.0.1', parsed.hostname) + self.assertEqual('', parsed.fragment) + self.assertEqual('/a/b/', parsed.path) + self.assertEqual({'c': 'd'}, parsed.params) - def test_expiry(self): - watch = misc.StopWatch(0.1) - watch.start() - time.sleep(0.2) - self.assertTrue(watch.expired()) + def test_multi_params(self): + url = "mysql://www.yahoo.com:3306/a/b/?c=d&c=e" + parsed = misc.parse_uri(url, query_duplicates=True) + self.assertEqual({'c': ['d', 'e']}, parsed.params) - def test_no_expiry(self): - watch = misc.StopWatch(0.1) - watch.start() - self.assertFalse(watch.expired()) + def test_port_provided(self): + url = "rabbitmq://www.yahoo.com:5672" + parsed = misc.parse_uri(url) + self.assertEqual('rabbitmq', parsed.scheme) + self.assertEqual('www.yahoo.com', parsed.hostname) + self.assertEqual(5672, parsed.port) + self.assertEqual('', parsed.path) - def test_elapsed(self): - watch = misc.StopWatch() - watch.start() - time.sleep(0.2) - # NOTE(harlowja): Allow for a slight variation by using 0.19. - self.assertGreaterEqual(0.19, watch.elapsed()) + def test_ipv6_host(self): + url = "rsync://[2001:db8:0:1]:873" + parsed = misc.parse_uri(url) + self.assertEqual('rsync', parsed.scheme) + self.assertEqual('2001:db8:0:1', parsed.hostname) + self.assertEqual(873, parsed.port) - def test_pause_resume(self): - watch = misc.StopWatch() - watch.start() - time.sleep(0.05) - watch.stop() - elapsed = watch.elapsed() - time.sleep(0.05) - self.assertAlmostEqual(elapsed, watch.elapsed()) - watch.resume() - self.assertNotEqual(elapsed, watch.elapsed()) + def test_user_password(self): + url = "rsync://test:test_pw@www.yahoo.com:873" + parsed = misc.parse_uri(url) + self.assertEqual('test', parsed.username) + self.assertEqual('test_pw', parsed.password) + self.assertEqual('www.yahoo.com', parsed.hostname) - def test_context_manager(self): - with misc.StopWatch() as watch: - time.sleep(0.05) - self.assertGreater(0.01, watch.elapsed()) + def test_user(self): + url = "rsync://test@www.yahoo.com:873" + parsed = misc.parse_uri(url) + self.assertEqual('test', parsed.username) + self.assertEqual(None, parsed.password) class UriParseTest(test.TestCase): diff --git a/taskflow/tests/unit/test_utils_async_utils.py b/taskflow/tests/unit/test_utils_async_utils.py index 8e9ab944..0abf4107 100644 --- a/taskflow/tests/unit/test_utils_async_utils.py +++ b/taskflow/tests/unit/test_utils_async_utils.py @@ -14,9 +14,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from concurrent import futures +import testtools from taskflow import test from taskflow.utils import async_utils as au diff --git a/taskflow/tests/unit/test_utils_failure.py b/taskflow/tests/unit/test_utils_failure.py index 394abfd2..4958da62 100644 --- a/taskflow/tests/unit/test_utils_failure.py +++ b/taskflow/tests/unit/test_utils_failure.py @@ -19,7 +19,6 @@ import six from taskflow import exceptions from taskflow import test from taskflow.tests import utils as test_utils - from taskflow.utils import misc @@ -43,6 +42,10 @@ class GeneralFailureObjTestsMixin(object): self.assertEqual(list(self.fail_obj), test_utils.RUNTIME_ERROR_CLASSES[:-2]) + def test_pformat_no_traceback(self): + text = self.fail_obj.pformat() + self.assertNotIn("Traceback", text) + def test_check_str(self): val = 'Exception' self.assertEqual(self.fail_obj.check(val), val) @@ -92,6 +95,10 @@ class ReCreatedFailureTestCase(test.TestCase, GeneralFailureObjTestsMixin): def test_no_exc_info(self): self.assertIs(self.fail_obj.exc_info, None) + def test_pformat_traceback(self): + text = self.fail_obj.pformat(traceback=True) + self.assertIn("Traceback (most recent call last):", text) + def test_reraises(self): exc = self.assertRaises(exceptions.WrappedFailure, self.fail_obj.reraise) @@ -104,6 +111,10 @@ class FromExceptionTestCase(test.TestCase, GeneralFailureObjTestsMixin): super(FromExceptionTestCase, self).setUp() self.fail_obj = misc.Failure.from_exception(RuntimeError('Woot!')) + def test_pformat_no_traceback(self): + text = self.fail_obj.pformat(traceback=True) + self.assertIn("Traceback not available", text) + class FailureObjectTestCase(test.TestCase): @@ -189,6 +200,17 @@ class FailureObjectTestCase(test.TestCase): self.assertNotEqual(captured, None) self.assertFalse(captured.matches(None)) + def test_pformat_traceback(self): + captured = _captured_failure('Woot!') + text = captured.pformat(traceback=True) + self.assertIn("Traceback (most recent call last):", text) + + def test_pformat_traceback_captured_no_exc_info(self): + captured = _captured_failure('Woot!') + captured = misc.Failure.from_dict(captured.to_dict()) + text = captured.pformat(traceback=True) + self.assertIn("Traceback (most recent call last):", text) + class WrappedFailureTestCase(test.TestCase): diff --git a/taskflow/tests/unit/test_utils_lock_utils.py b/taskflow/tests/unit/test_utils_lock_utils.py index 30a120a3..2b2f1f83 100644 --- a/taskflow/tests/unit/test_utils_lock_utils.py +++ b/taskflow/tests/unit/test_utils_lock_utils.py @@ -29,7 +29,7 @@ from taskflow.utils import lock_utils NAPPY_TIME = 0.05 # We will spend this amount of time doing some "fake" work. -WORK_TIMES = [(0.01 + x/100.0) for x in range(0, 5)] +WORK_TIMES = [(0.01 + x / 100.0) for x in range(0, 5)] def _find_overlaps(times, start, end): diff --git a/taskflow/tests/unit/worker_based/test_dispatcher.py b/taskflow/tests/unit/worker_based/test_dispatcher.py new file mode 100644 index 00000000..4dae910d --- /dev/null +++ b/taskflow/tests/unit/worker_based/test_dispatcher.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from kombu import message +import mock + +from taskflow.engines.worker_based import dispatcher +from taskflow import test + + +def mock_acked_message(ack_ok=True, **kwargs): + msg = mock.create_autospec(message.Message, spec_set=True, instance=True, + channel=None, **kwargs) + + def ack_side_effect(*args, **kwargs): + msg.acknowledged = True + + if ack_ok: + msg.ack_log_error.side_effect = ack_side_effect + msg.acknowledged = False + return msg + + +class TestDispatcher(test.MockTestCase): + def test_creation(self): + on_hello = mock.MagicMock() + handlers = {'hello': on_hello} + dispatcher.TypeDispatcher(handlers) + + def test_on_message(self): + on_hello = mock.MagicMock() + handlers = {'hello': on_hello} + d = dispatcher.TypeDispatcher(handlers) + msg = mock_acked_message(properties={'type': 'hello'}) + d.on_message("", msg) + self.assertTrue(on_hello.called) + self.assertTrue(msg.ack_log_error.called) + self.assertTrue(msg.acknowledged) + + def test_on_rejected_message(self): + d = dispatcher.TypeDispatcher({}) + msg = mock_acked_message(properties={'type': 'hello'}) + d.on_message("", msg) + self.assertTrue(msg.reject_log_error.called) + self.assertFalse(msg.acknowledged) + + def test_on_requeue_message(self): + d = dispatcher.TypeDispatcher({}) + d.add_requeue_filter(lambda data, message: True) + msg = mock_acked_message() + d.on_message("", msg) + self.assertTrue(msg.requeue.called) + self.assertFalse(msg.acknowledged) + + def test_failed_ack(self): + on_hello = mock.MagicMock() + handlers = {'hello': on_hello} + d = dispatcher.TypeDispatcher(handlers) + msg = mock_acked_message(ack_ok=False, + properties={'type': 'hello'}) + d.on_message("", msg) + self.assertTrue(msg.ack_log_error.called) + self.assertFalse(msg.acknowledged) + self.assertFalse(on_hello.called) diff --git a/taskflow/tests/unit/worker_based/test_executor.py b/taskflow/tests/unit/worker_based/test_executor.py index 75092003..e6c97e17 100644 --- a/taskflow/tests/unit/worker_based/test_executor.py +++ b/taskflow/tests/unit/worker_based/test_executor.py @@ -14,15 +14,15 @@ # License for the specific language governing permissions and limitations # under the License. -import mock import threading import time from concurrent import futures -from kombu import exceptions as kombu_exc +import mock from taskflow.engines.worker_based import executor from taskflow.engines.worker_based import protocol as pr +from taskflow.openstack.common import timeutils from taskflow import test from taskflow.tests import utils from taskflow.utils import misc @@ -86,158 +86,146 @@ class TestWorkerTaskExecutor(test.MockTestCase): master_mock_calls = [ mock.call.Proxy(self.executor_uuid, self.executor_exchange, - ex._on_message, ex._on_wait, url=self.broker_url) + mock.ANY, ex._on_wait, url=self.broker_url) ] self.assertEqual(self.master_mock.mock_calls, master_mock_calls) def test_on_message_response_state_running(self): response = pr.Response(pr.RUNNING) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) - ex._on_message(response.to_dict(), self.message_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock + ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(self.request_inst_mock.mock_calls, - [mock.call.set_running()]) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) + expected_calls = [ + mock.call.transition_and_log_error(pr.RUNNING, logger=mock.ANY), + ] + self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) def test_on_message_response_state_progress(self): response = pr.Response(pr.PROGRESS, progress=1.0) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) - ex._on_message(response.to_dict(), self.message_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, [mock.call.on_progress(progress=1.0)]) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_state_failure(self): failure = misc.Failure.from_exception(Exception('test')) failure_dict = failure.to_dict() response = pr.Response(pr.FAILURE, result=failure_dict) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) - ex._on_message(response.to_dict(), self.message_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock + ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(len(ex._requests_cache._data), 0) - self.assertEqual(self.request_inst_mock.mock_calls, [ + self.assertEqual(len(ex._requests_cache), 0) + expected_calls = [ + mock.call.transition_and_log_error(pr.FAILURE, logger=mock.ANY), mock.call.set_result(result=utils.FailureMatcher(failure)) - ]) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) + ] + self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) def test_on_message_response_state_success(self): response = pr.Response(pr.SUCCESS, result=self.task_result, event='executed') ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) - ex._on_message(response.to_dict(), self.message_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock + ex._process_response(response.to_dict(), self.message_mock) - self.assertEqual(self.request_inst_mock.mock_calls, - [mock.call.set_result(result=self.task_result, - event='executed')]) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) + expected_calls = [ + mock.call.transition_and_log_error(pr.SUCCESS, logger=mock.ANY), + mock.call.set_result(result=self.task_result, event='executed') + ] + self.assertEqual(expected_calls, self.request_inst_mock.mock_calls) def test_on_message_response_unknown_state(self): response = pr.Response(state='') ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) - ex._on_message(response.to_dict(), self.message_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_unknown_task(self): self.message_mock.properties['correlation_id'] = '' response = pr.Response(pr.RUNNING) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) - ex._on_message(response.to_dict(), self.message_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) def test_on_message_response_no_correlation_id(self): self.message_mock.properties = {'type': pr.RESPONSE} response = pr.Response(pr.RUNNING) ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) - ex._on_message(response.to_dict(), self.message_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock + ex._process_response(response.to_dict(), self.message_mock) self.assertEqual(self.request_inst_mock.mock_calls, []) - self.assertEqual(self.message_mock.mock_calls, [mock.call.ack()]) - - @mock.patch('taskflow.engines.worker_based.executor.LOG.warning') - def test_on_message_unknown_type(self, mocked_warning): - self.message_mock.properties = {'correlation_id': self.task_uuid, - 'type': ''} - ex = self.executor() - ex._on_message({}, self.message_mock) - self.assertTrue(mocked_warning.called) - - @mock.patch('taskflow.engines.worker_based.executor.LOG.warning') - def test_on_message_no_type(self, mocked_warning): - self.message_mock.properties = {'correlation_id': self.task_uuid} - ex = self.executor() - ex._on_message({}, self.message_mock) - self.assertTrue(mocked_warning.called) - - @mock.patch('taskflow.engines.worker_based.executor.LOG.exception') - def test_on_message_acknowledge_raises(self, mocked_exception): - self.message_mock.ack.side_effect = kombu_exc.MessageStateError() - self.executor()._on_message({}, self.message_mock) - self.assertTrue(mocked_exception.called) def test_on_wait_task_not_expired(self): ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock - self.assertEqual(len(ex._requests_cache._data), 1) + self.assertEqual(len(ex._requests_cache), 1) ex._on_wait() - self.assertEqual(len(ex._requests_cache._data), 1) + self.assertEqual(len(ex._requests_cache), 1) def test_on_wait_task_expired(self): + now = timeutils.utcnow() self.request_inst_mock.expired = True - ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + self.request_inst_mock.created_on = now + timeutils.set_time_override(now) + self.addCleanup(timeutils.clear_time_override) + timeutils.advance_time_seconds(120) - self.assertEqual(len(ex._requests_cache._data), 1) + ex = self.executor() + ex._requests_cache[self.task_uuid] = self.request_inst_mock + + self.assertEqual(len(ex._requests_cache), 1) ex._on_wait() - self.assertEqual(len(ex._requests_cache._data), 0) + self.assertEqual(len(ex._requests_cache), 0) def test_remove_task_non_existent(self): ex = self.executor() - ex._requests_cache.set(self.task_uuid, self.request_inst_mock) + ex._requests_cache[self.task_uuid] = self.request_inst_mock - self.assertEqual(len(ex._requests_cache._data), 1) - ex._requests_cache.delete(self.task_uuid) - self.assertEqual(len(ex._requests_cache._data), 0) + self.assertEqual(len(ex._requests_cache), 1) + del ex._requests_cache[self.task_uuid] + self.assertEqual(len(ex._requests_cache), 0) # delete non-existent - ex._requests_cache.delete(self.task_uuid) - self.assertEqual(len(ex._requests_cache._data), 0) + try: + del ex._requests_cache[self.task_uuid] + except KeyError: + pass + self.assertEqual(len(ex._requests_cache), 0) def test_execute_task(self): self.message_mock.properties['type'] = pr.NOTIFY notify = pr.Notify(topic=self.executor_topic, tasks=[self.task.name]) ex = self.executor() - ex._on_message(notify.to_dict(), self.message_mock) + ex._process_notify(notify.to_dict(), self.message_mock) ex.execute_task(self.task, self.task_uuid, self.task_args) expected_calls = [ mock.call.Request(self.task, self.task_uuid, 'execute', self.task_args, None, self.timeout), - mock.call.request.set_pending(), + mock.call.request.transition_and_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_revert_task(self): self.message_mock.properties['type'] = pr.NOTIFY notify = pr.Notify(topic=self.executor_topic, tasks=[self.task.name]) ex = self.executor() - ex._on_message(notify.to_dict(), self.message_mock) + ex._process_notify(notify.to_dict(), self.message_mock) ex.revert_task(self.task, self.task_uuid, self.task_args, self.task_result, self.task_failures) @@ -246,13 +234,14 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.task_args, None, self.timeout, failures=self.task_failures, result=self.task_result), - mock.call.request.set_pending(), + mock.call.request.transition_and_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, correlation_id=self.task_uuid) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_execute_task_topic_not_found(self): workers_info = {self.executor_topic: ['']} @@ -270,20 +259,23 @@ class TestWorkerTaskExecutor(test.MockTestCase): self.proxy_inst_mock.publish.side_effect = Exception('Woot!') notify = pr.Notify(topic=self.executor_topic, tasks=[self.task.name]) ex = self.executor() - ex._on_message(notify.to_dict(), self.message_mock) + ex._process_notify(notify.to_dict(), self.message_mock) ex.execute_task(self.task, self.task_uuid, self.task_args) expected_calls = [ mock.call.Request(self.task, self.task_uuid, 'execute', self.task_args, None, self.timeout), - mock.call.request.set_pending(), + mock.call.request.transition_and_log_error(pr.PENDING, + logger=mock.ANY), mock.call.proxy.publish(msg=self.request_inst_mock, routing_key=self.executor_topic, reply_to=self.executor_uuid, correlation_id=self.task_uuid), + mock.call.request.transition_and_log_error(pr.FAILURE, + logger=mock.ANY), mock.call.request.set_result(mock.ANY) ] - self.assertEqual(self.master_mock.mock_calls, expected_calls) + self.assertEqual(expected_calls, self.master_mock.mock_calls) def test_wait_for_any(self): fs = [futures.Future(), futures.Future()] diff --git a/taskflow/tests/unit/worker_based/test_message_pump.py b/taskflow/tests/unit/worker_based/test_message_pump.py new file mode 100644 index 00000000..10116c21 --- /dev/null +++ b/taskflow/tests/unit/worker_based/test_message_pump.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +import mock + +from taskflow.engines.worker_based import protocol as pr +from taskflow.engines.worker_based import proxy +from taskflow.openstack.common import uuidutils +from taskflow import test +from taskflow.tests import utils as test_utils +from taskflow.types import latch + +TEST_EXCHANGE, TEST_TOPIC = ('test-exchange', 'test-topic') +BARRIER_WAIT_TIMEOUT = 1.0 +POLLING_INTERVAL = 0.01 + + +class TestMessagePump(test.MockTestCase): + def test_notify(self): + barrier = threading.Event() + + on_notify = mock.MagicMock() + on_notify.side_effect = lambda *args, **kwargs: barrier.set() + + handlers = {pr.NOTIFY: on_notify} + p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, + transport='memory', + transport_options={ + 'polling_interval': POLLING_INTERVAL, + }) + + t = threading.Thread(target=p.start) + t.daemon = True + t.start() + p.wait() + p.publish(pr.Notify(), TEST_TOPIC) + + barrier.wait(BARRIER_WAIT_TIMEOUT) + self.assertTrue(barrier.is_set()) + p.stop() + t.join() + + self.assertTrue(on_notify.called) + on_notify.assert_called_with({}, mock.ANY) + + def test_response(self): + barrier = threading.Event() + + on_response = mock.MagicMock() + on_response.side_effect = lambda *args, **kwargs: barrier.set() + + handlers = {pr.RESPONSE: on_response} + p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, + transport='memory', + transport_options={ + 'polling_interval': POLLING_INTERVAL, + }) + + t = threading.Thread(target=p.start) + t.daemon = True + t.start() + p.wait() + resp = pr.Response(pr.RUNNING) + p.publish(resp, TEST_TOPIC) + + barrier.wait(BARRIER_WAIT_TIMEOUT) + self.assertTrue(barrier.is_set()) + p.stop() + t.join() + + self.assertTrue(on_response.called) + on_response.assert_called_with(resp.to_dict(), mock.ANY) + + def test_multi_message(self): + message_count = 30 + barrier = latch.Latch(message_count) + countdown = lambda data, message: barrier.countdown() + + on_notify = mock.MagicMock() + on_notify.side_effect = countdown + + on_response = mock.MagicMock() + on_response.side_effect = countdown + + on_request = mock.MagicMock() + on_request.side_effect = countdown + + handlers = { + pr.NOTIFY: on_notify, + pr.RESPONSE: on_response, + pr.REQUEST: on_request, + } + p = proxy.Proxy(TEST_TOPIC, TEST_EXCHANGE, handlers, + transport='memory', + transport_options={ + 'polling_interval': POLLING_INTERVAL, + }) + + t = threading.Thread(target=p.start) + t.daemon = True + t.start() + p.wait() + + for i in range(0, message_count): + j = i % 3 + if j == 0: + p.publish(pr.Notify(), TEST_TOPIC) + elif j == 1: + p.publish(pr.Response(pr.RUNNING), TEST_TOPIC) + else: + p.publish(pr.Request(test_utils.DummyTask("dummy_%s" % i), + uuidutils.generate_uuid(), + pr.EXECUTE, [], None, None), TEST_TOPIC) + + barrier.wait(BARRIER_WAIT_TIMEOUT) + self.assertEqual(0, barrier.needed) + p.stop() + t.join() + + self.assertTrue(on_notify.called) + self.assertTrue(on_response.called) + self.assertTrue(on_request.called) + + self.assertEqual(10, on_notify.call_count) + self.assertEqual(10, on_response.call_count) + self.assertEqual(10, on_request.call_count) + + call_count = sum([ + on_notify.call_count, + on_response.call_count, + on_request.call_count, + ]) + self.assertEqual(message_count, call_count) diff --git a/taskflow/tests/unit/worker_based/test_pipeline.py b/taskflow/tests/unit/worker_based/test_pipeline.py new file mode 100644 index 00000000..8809785e --- /dev/null +++ b/taskflow/tests/unit/worker_based/test_pipeline.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +from concurrent import futures + +from taskflow.engines.worker_based import endpoint +from taskflow.engines.worker_based import executor as worker_executor +from taskflow.engines.worker_based import server as worker_server +from taskflow.openstack.common import uuidutils +from taskflow import test +from taskflow.tests import utils as test_utils +from taskflow.utils import misc + + +TEST_EXCHANGE, TEST_TOPIC = ('test-exchange', 'test-topic') +WAIT_TIMEOUT = 1.0 +POLLING_INTERVAL = 0.01 + + +class TestPipeline(test.MockTestCase): + def _fetch_server(self, task_classes): + endpoints = [] + for cls in task_classes: + endpoints.append(endpoint.Endpoint(cls)) + server = worker_server.Server( + TEST_TOPIC, TEST_EXCHANGE, + futures.ThreadPoolExecutor(1), endpoints, + transport='memory', + transport_options={ + 'polling_interval': POLLING_INTERVAL, + }) + server_thread = threading.Thread(target=server.start) + server_thread.daemon = True + return (server, server_thread) + + def _fetch_executor(self): + executor = worker_executor.WorkerTaskExecutor( + uuidutils.generate_uuid(), + TEST_EXCHANGE, + [TEST_TOPIC], + transport='memory', + transport_options={ + 'polling_interval': POLLING_INTERVAL, + }) + return executor + + def _start_components(self, task_classes): + server, server_thread = self._fetch_server(task_classes) + executor = self._fetch_executor() + self.addCleanup(executor.stop) + self.addCleanup(server_thread.join) + self.addCleanup(server.stop) + executor.start() + server_thread.start() + server.wait() + return (executor, server) + + def test_execution_pipeline(self): + executor, server = self._start_components([test_utils.TaskOneReturn]) + self.assertEqual(0, executor.wait_for_workers(timeout=WAIT_TIMEOUT)) + + t = test_utils.TaskOneReturn() + f = executor.execute_task(t, uuidutils.generate_uuid(), {}) + executor.wait_for_any([f]) + + t2, _action, result = f.result() + + self.assertEqual(1, result) + self.assertEqual(t, t2) + + def test_execution_failure_pipeline(self): + task_classes = [ + test_utils.TaskWithFailure, + ] + executor, server = self._start_components(task_classes) + + t = test_utils.TaskWithFailure() + f = executor.execute_task(t, uuidutils.generate_uuid(), {}) + executor.wait_for_any([f]) + + _t2, _action, result = f.result() + self.assertIsInstance(result, misc.Failure) + self.assertEqual(RuntimeError, result.check(RuntimeError)) diff --git a/taskflow/tests/unit/worker_based/test_protocol.py b/taskflow/tests/unit/worker_based/test_protocol.py index 27d6e00d..7d51da31 100644 --- a/taskflow/tests/unit/worker_based/test_protocol.py +++ b/taskflow/tests/unit/worker_based/test_protocol.py @@ -14,16 +14,78 @@ # License for the specific language governing permissions and limitations # under the License. +from concurrent import futures import mock -from concurrent import futures - from taskflow.engines.worker_based import protocol as pr +from taskflow import exceptions as excp +from taskflow.openstack.common import uuidutils from taskflow import test from taskflow.tests import utils from taskflow.utils import misc +class TestProtocolValidation(test.TestCase): + def test_send_notify(self): + msg = pr.Notify() + pr.Notify.validate(msg.to_dict(), False) + + def test_send_notify_invalid(self): + msg = { + 'all your base': 'are belong to us', + } + self.assertRaises(excp.InvalidFormat, + pr.Notify.validate, msg, False) + + def test_reply_notify(self): + msg = pr.Notify(topic="bob", tasks=['a', 'b', 'c']) + pr.Notify.validate(msg.to_dict(), True) + + def test_reply_notify_invalid(self): + msg = { + 'topic': {}, + 'tasks': 'not yours', + } + self.assertRaises(excp.InvalidFormat, + pr.Notify.validate, msg, True) + + def test_request(self): + msg = pr.Request(utils.DummyTask("hi"), uuidutils.generate_uuid(), + pr.EXECUTE, {}, None, 1.0) + pr.Request.validate(msg.to_dict()) + + def test_request_invalid(self): + msg = { + 'task_name': 1, + 'task_cls': False, + 'arguments': [], + } + self.assertRaises(excp.InvalidFormat, pr.Request.validate, msg) + + def test_request_invalid_action(self): + msg = pr.Request(utils.DummyTask("hi"), uuidutils.generate_uuid(), + pr.EXECUTE, {}, None, 1.0) + msg = msg.to_dict() + msg['action'] = 'NOTHING' + self.assertRaises(excp.InvalidFormat, pr.Request.validate, msg) + + def test_response_progress(self): + msg = pr.Response(pr.PROGRESS, progress=0.5, event_data={}) + pr.Response.validate(msg.to_dict()) + + def test_response_completion(self): + msg = pr.Response(pr.SUCCESS, result=1) + pr.Response.validate(msg.to_dict()) + + def test_response_mixed_invalid(self): + msg = pr.Response(pr.PROGRESS, progress=0.5, event_data={}, result=1) + self.assertRaises(excp.InvalidFormat, pr.Response.validate, msg) + + def test_response_bad_state(self): + msg = pr.Response('STUFF') + self.assertRaises(excp.InvalidFormat, pr.Response.validate, msg) + + class TestProtocol(test.TestCase): def setUp(self): @@ -53,6 +115,18 @@ class TestProtocol(test.TestCase): to_dict.update(kwargs) return to_dict + def test_request_transitions(self): + request = self.request() + self.assertEqual(pr.WAITING, request.state) + self.assertIn(request.state, pr.WAITING_STATES) + self.assertRaises(excp.InvalidState, request.transition, pr.SUCCESS) + self.assertFalse(request.transition(pr.WAITING)) + self.assertTrue(request.transition(pr.PENDING)) + self.assertTrue(request.transition(pr.RUNNING)) + self.assertTrue(request.transition(pr.SUCCESS)) + for s in (pr.PENDING, pr.WAITING): + self.assertRaises(excp.InvalidState, request.transition, s) + def test_creation(self): request = self.request() self.assertEqual(request.uuid, self.task_uuid) @@ -60,15 +134,6 @@ class TestProtocol(test.TestCase): self.assertIsInstance(request.result, futures.Future) self.assertFalse(request.result.done()) - def test_str(self): - request = self.request() - self.assertEqual(str(request), - " %s" % self.request_to_dict()) - - def test_repr(self): - expected = '%s:%s' % (self.task.name, self.task_action) - self.assertEqual(repr(self.request()), expected) - def test_to_dict_default(self): self.assertEqual(self.request().to_dict(), self.request_to_dict()) @@ -94,19 +159,20 @@ class TestProtocol(test.TestCase): @mock.patch('taskflow.engines.worker_based.protocol.misc.wallclock') def test_pending_not_expired(self, mocked_wallclock): - mocked_wallclock.side_effect = [1, self.timeout] + mocked_wallclock.side_effect = [0, self.timeout - 1] self.assertFalse(self.request().expired) @mock.patch('taskflow.engines.worker_based.protocol.misc.wallclock') def test_pending_expired(self, mocked_wallclock): - mocked_wallclock.side_effect = [1, self.timeout + 2] + mocked_wallclock.side_effect = [0, self.timeout + 2] self.assertTrue(self.request().expired) @mock.patch('taskflow.engines.worker_based.protocol.misc.wallclock') def test_running_not_expired(self, mocked_wallclock): - mocked_wallclock.side_effect = [1, self.timeout + 2] + mocked_wallclock.side_effect = [0, self.timeout + 2] request = self.request() - request.set_running() + request.transition(pr.PENDING) + request.transition(pr.RUNNING) self.assertFalse(request.expired) def test_set_result(self): diff --git a/taskflow/tests/unit/worker_based/test_proxy.py b/taskflow/tests/unit/worker_based/test_proxy.py index 8876c23c..e2dc02e8 100644 --- a/taskflow/tests/unit/worker_based/test_proxy.py +++ b/taskflow/tests/unit/worker_based/test_proxy.py @@ -14,10 +14,11 @@ # License for the specific language governing permissions and limitations # under the License. -import mock import socket import threading +import mock + from taskflow.engines.worker_based import proxy from taskflow import test @@ -65,7 +66,6 @@ class TestProxy(test.MockTestCase): self.conn_inst_mock.Consumer.return_value.__exit__ = mock.MagicMock() # other mocking - self.on_message_mock = mock.MagicMock(name='on_message') self.on_wait_mock = mock.MagicMock(name='on_wait') self.master_mock.attach_mock(self.on_wait_mock, 'on_wait') @@ -84,7 +84,7 @@ class TestProxy(test.MockTestCase): auto_delete=True, channel=self.conn_inst_mock), mock.call.connection.Consumer(queues=self.queue_inst_mock, - callbacks=[self.on_message_mock]), + callbacks=[mock.ANY]), mock.call.connection.Consumer().__enter__(), ] + calls + [ mock.call.connection.Consumer().__exit__(exc_type, mock.ANY, @@ -94,8 +94,8 @@ class TestProxy(test.MockTestCase): def proxy(self, reset_master_mock=False, **kwargs): proxy_kwargs = dict(topic=self.topic, exchange_name=self.exchange_name, - on_message=self.on_message_mock, - url=self.broker_url) + url=self.broker_url, + type_handlers={}) proxy_kwargs.update(kwargs) p = proxy.Proxy(**proxy_kwargs) if reset_master_mock: diff --git a/taskflow/tests/unit/worker_based/test_server.py b/taskflow/tests/unit/worker_based/test_server.py index a4eab7a8..2a64c960 100644 --- a/taskflow/tests/unit/worker_based/test_server.py +++ b/taskflow/tests/unit/worker_based/test_server.py @@ -15,11 +15,8 @@ # under the License. import mock - import six -from kombu import exceptions as exc - from taskflow.engines.worker_based import endpoint as ep from taskflow.engines.worker_based import protocol as pr from taskflow.engines.worker_based import server @@ -88,9 +85,9 @@ class TestServer(test.MockTestCase): # check calls master_mock_calls = [ mock.call.Proxy(self.server_topic, self.server_exchange, - s._on_message, url=self.broker_url) + mock.ANY, url=self.broker_url, on_wait=mock.ANY) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.master_mock.assert_has_calls(master_mock_calls) self.assertEqual(len(s._endpoints), 3) def test_creation_with_endpoints(self): @@ -99,72 +96,11 @@ class TestServer(test.MockTestCase): # check calls master_mock_calls = [ mock.call.Proxy(self.server_topic, self.server_exchange, - s._on_message, url=self.broker_url) + mock.ANY, url=self.broker_url, on_wait=mock.ANY) ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) + self.master_mock.assert_has_calls(master_mock_calls) self.assertEqual(len(s._endpoints), len(self.endpoints)) - def test_on_message_proxy_running_ack_success(self): - request = self.make_request() - s = self.server(reset_master_mock=True) - s._on_message(request, self.message_mock) - - # check calls - master_mock_calls = [ - mock.call.message.ack(), - mock.call.executor.submit(s._process_request, request, - self.message_mock) - ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) - - def test_on_message_proxy_running_ack_failure(self): - self.message_mock.ack.side_effect = exc.MessageStateError('Woot!') - s = self.server(reset_master_mock=True) - s._on_message({}, self.message_mock) - - # check calls - master_mock_calls = [ - mock.call.message.ack() - ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) - - def test_on_message_proxy_not_running_requeue_success(self): - self.proxy_inst_mock.is_running = False - s = self.server(reset_master_mock=True) - s._on_message({}, self.message_mock) - - # check calls - master_mock_calls = [ - mock.call.message.requeue() - ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) - - def test_on_message_proxy_not_running_requeue_failure(self): - self.message_mock.requeue.side_effect = exc.MessageStateError('Woot!') - self.proxy_inst_mock.is_running = False - s = self.server(reset_master_mock=True) - s._on_message({}, self.message_mock) - - # check calls - master_mock_calls = [ - mock.call.message.requeue() - ] - self.assertEqual(self.master_mock.mock_calls, master_mock_calls) - - @mock.patch('taskflow.engines.worker_based.server.LOG.warning') - def test_on_message_unknown_type(self, mocked_warning): - self.message_mock.properties['type'] = '' - s = self.server() - s._on_message({}, self.message_mock) - self.assertTrue(mocked_warning.called) - - @mock.patch('taskflow.engines.worker_based.server.LOG.warning') - def test_on_message_no_type(self, mocked_warning): - self.message_mock.properties = {} - s = self.server() - s._on_message({}, self.message_mock) - self.assertTrue(mocked_warning.called) - def test_parse_request(self): request = self.make_request() task_cls, action, task_args = server.Server._parse_request(**request) diff --git a/taskflow/tests/utils.py b/taskflow/tests/utils.py index ce3289ec..d7c85b95 100644 --- a/taskflow/tests/utils.py +++ b/taskflow/tests/utils.py @@ -15,6 +15,7 @@ # under the License. import contextlib +import string import threading import six @@ -346,3 +347,16 @@ class WaitForOneFromTask(SaveOrderTask): if name not in self.wait_for or state not in self.wait_states: return self.event.set() + + +def make_many(amount, task_cls=DummyTask, offset=0): + name_pool = string.ascii_lowercase + string.ascii_uppercase + tasks = [] + while amount > 0: + if offset >= len(name_pool): + raise AssertionError('Name pool size to small (%s < %s)' + % (len(name_pool), offset + 1)) + tasks.append(task_cls(name=name_pool[offset])) + offset += 1 + amount -= 1 + return tasks diff --git a/taskflow/types/cache.py b/taskflow/types/cache.py new file mode 100644 index 00000000..72214fed --- /dev/null +++ b/taskflow/types/cache.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + +from taskflow.utils import lock_utils as lu +from taskflow.utils import reflection + + +class ExpiringCache(object): + """Represents a thread-safe time-based expiring cache. + + NOTE(harlowja): the values in this cache must have a expired attribute that + can be used to determine if the key and associated value has expired or if + it has not. + """ + + def __init__(self): + self._data = {} + self._lock = lu.ReaderWriterLock() + + def __setitem__(self, key, value): + """Set a value in the cache.""" + with self._lock.write_lock(): + self._data[key] = value + + def __len__(self): + """Returns how many items are in this cache.""" + with self._lock.read_lock(): + return len(self._data) + + def get(self, key, default=None): + """Retrieve a value from the cache (returns default if not found).""" + with self._lock.read_lock(): + return self._data.get(key, default) + + def __getitem__(self, key): + """Retrieve a value from the cache.""" + with self._lock.read_lock(): + return self._data[key] + + def __delitem__(self, key): + """Delete a key & value from the cache.""" + with self._lock.write_lock(): + del self._data[key] + + def cleanup(self, on_expired_callback=None): + """Delete out-dated keys & values from the cache.""" + with self._lock.write_lock(): + expired_values = [(k, v) for k, v in six.iteritems(self._data) + if v.expired] + for (k, _v) in expired_values: + del self._data[k] + if on_expired_callback: + arg_c = len(reflection.get_callable_args(on_expired_callback)) + for (k, v) in expired_values: + if arg_c == 2: + on_expired_callback(k, v) + else: + on_expired_callback(v) diff --git a/taskflow/types/fsm.py b/taskflow/types/fsm.py new file mode 100644 index 00000000..cbe85b78 --- /dev/null +++ b/taskflow/types/fsm.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + from collections import OrderedDict # noqa +except ImportError: + from ordereddict import OrderedDict # noqa + +import prettytable +import six + +from taskflow import exceptions as excp + + +class _Jump(object): + """A FSM transition tracks this data while jumping.""" + def __init__(self, name, on_enter, on_exit): + self.name = name + self.on_enter = on_enter + self.on_exit = on_exit + + +class NotInitialized(excp.TaskFlowException): + """Error raised when an action is attempted on a not inited machine.""" + + +class FSM(object): + """A finite state machine. + + This state machine can be used to automatically run a given set of + transitions and states in response to events (either from callbacks or from + generator/iterator send() values, see PEP 342). On each triggered event, a + on_enter and on_exit callback can also be provided which will be called to + perform some type of action on leaving a prior state and before entering a + new state. + + NOTE(harlowja): reactions will *only* be called when the generator/iterator + from run_iter() does *not* send back a new event (they will always be + called if the run() method is used). This allows for two unique ways (these + ways can also be intermixed) to use this state machine when using + run_iter(); one where *external* events trigger the next state transition + and one where *internal* reaction callbacks trigger the next state + transition. The other way to use this state machine is to skip using run() + or run_iter() completely and use the process_event() method explicitly and + trigger the events via some *external* functionality. + """ + def __init__(self, start_state): + self._transitions = {} + self._states = OrderedDict() + self._start_state = start_state + self._current = None + + @property + def start_state(self): + return self._start_state + + @property + def current_state(self): + if self._current is not None: + return self._current.name + return None + + @property + def terminated(self): + """Returns whether the state machine is in a terminal state.""" + if self._current is None: + return False + return self._states[self._current.name]['terminal'] + + def add_state(self, state, terminal=False, on_enter=None, on_exit=None): + """Adds a given state to the state machine. + + The on_enter and on_exit callbacks, if provided will be expected to + take two positional parameters, these being the state being exited (for + on_exit) or the state being entered (for on_enter) and a second + parameter which is the event that is being processed that caused the + state transition. + """ + if state in self._states: + raise excp.Duplicate("State '%s' already defined" % state) + if on_enter is not None: + assert six.callable(on_enter), "On enter callback must be callable" + if on_exit is not None: + assert six.callable(on_exit), "On exit callback must be callable" + self._states[state] = { + 'terminal': bool(terminal), + 'reactions': {}, + 'on_enter': on_enter, + 'on_exit': on_exit, + } + self._transitions[state] = OrderedDict() + + def add_reaction(self, state, event, reaction, *args, **kwargs): + """Adds a reaction that may get triggered by the given event & state. + + Reaction callbacks may (depending on how the state machine is ran) be + used after an event is processed (and a transition occurs) to cause the + machine to react to the newly arrived at stable state. + + These callbacks are expected to accept three default positional + parameters (although more can be passed in via *args and **kwargs, + these will automatically get provided to the callback when it is + activated *ontop* of the three default). The three default parameters + are the last stable state, the new stable state and the event that + caused the transition to this new stable state to be arrived at. + + The expected result of a callback is expected to be a new event that + the callback wants the state machine to react to. This new event + may (depending on how the state machine is ran) get processed (and + this process typically repeats) until the state machine reaches a + terminal state. + """ + if state not in self._states: + raise excp.NotFound("Can not add a reaction to event '%s' for an" + " undefined state '%s'" % (event, state)) + assert six.callable(reaction), "Reaction callback must be callable" + if event not in self._states[state]['reactions']: + self._states[state]['reactions'][event] = (reaction, args, kwargs) + else: + raise excp.Duplicate("State '%s' reaction to event '%s'" + " already defined" % (state, event)) + + def add_transition(self, start, end, event): + """Adds an allowed transition from start -> end for the given event.""" + if start not in self._states: + raise excp.NotFound("Can not add a transition on event '%s' that" + " starts in a undefined state '%s'" % (event, + start)) + if end not in self._states: + raise excp.NotFound("Can not add a transition on event '%s' that" + " ends in a undefined state '%s'" % (event, + end)) + self._transitions[start][event] = _Jump(end, + self._states[end]['on_enter'], + self._states[start]['on_exit']) + + def process_event(self, event): + """Trigger a state change in response to the provided event.""" + current = self._current + if current is None: + raise NotInitialized("Can only process events after" + " being initialized (not before)") + if self._states[current.name]['terminal']: + raise excp.InvalidState("Can not transition from terminal" + " state '%s' on event '%s'" + % (current.name, event)) + if event not in self._transitions[current.name]: + raise excp.NotFound("Can not transition from state '%s' on" + " event '%s' (no defined transition)" + % (current.name, event)) + replacement = self._transitions[current.name][event] + if current.on_exit is not None: + current.on_exit(current.name, event) + if replacement.on_enter is not None: + replacement.on_enter(replacement.name, event) + self._current = replacement + return ( + self._states[replacement.name]['reactions'].get(event), + self._states[replacement.name]['terminal'], + ) + + def initialize(self): + """Sets up the state machine (sets current state to start state...).""" + if self._start_state not in self._states: + raise excp.NotFound("Can not start from a undefined" + " state '%s'" % (self._start_state)) + if self._states[self._start_state]['terminal']: + raise excp.InvalidState("Can not start from a terminal" + " state '%s'" % (self._start_state)) + self._current = _Jump(self._start_state, None, None) + + def run(self, event, initialize=True): + """Runs the state machine, using reactions only.""" + for transition in self.run_iter(event, initialize=initialize): + pass + + def run_iter(self, event, initialize=True): + """Returns a iterator/generator that will run the state machine. + + NOTE(harlowja): only one runner iterator/generator should be active for + a machine, if this is not observed then it is possible for + initialization and other local state to be corrupted and cause issues + when running... + """ + if initialize: + self.initialize() + while True: + old_state = self.current_state + reaction, terminal = self.process_event(event) + new_state = self.current_state + try: + sent_event = yield (old_state, new_state) + except GeneratorExit: + break + if terminal: + break + if reaction is None and sent_event is None: + raise excp.NotFound("Unable to progress since no reaction (or" + " sent event) has been made available in" + " new state '%s' (moved to from state '%s'" + " in response to event '%s')" + % (new_state, old_state, event)) + elif sent_event is not None: + event = sent_event + else: + cb, args, kwargs = reaction + event = cb(old_state, new_state, event, *args, **kwargs) + + def __contains__(self, state): + return state in self._states + + @property + def states(self): + """Returns the state names.""" + return list(six.iterkeys(self._states)) + + @property + def events(self): + """Returns how many events exist.""" + c = 0 + for state in six.iterkeys(self._states): + c += len(self._transitions[state]) + return c + + def __iter__(self): + """Iterates over (start, event, end) transition tuples.""" + for state in six.iterkeys(self._states): + for event, target in six.iteritems(self._transitions[state]): + yield (state, event, target.name) + + def pformat(self, sort=True): + """Pretty formats the state + transition table into a string. + + NOTE(harlowja): the sort parameter can be provided to sort the states + and transitions by sort order; with it being provided as false the rows + will be iterated in addition order instead. + """ + def orderedkeys(data): + if sort: + return sorted(six.iterkeys(data)) + return list(six.iterkeys(data)) + tbl = prettytable.PrettyTable( + ["Start", "Event", "End", "On Enter", "On Exit"]) + for state in orderedkeys(self._states): + prefix_markings = [] + if self.current_state == state: + prefix_markings.append("@") + postfix_markings = [] + if self.start_state == state: + postfix_markings.append("^") + if self._states[state]['terminal']: + postfix_markings.append("$") + pretty_state = "%s%s" % ("".join(prefix_markings), state) + if postfix_markings: + pretty_state += "[%s]" % "".join(postfix_markings) + if self._transitions[state]: + for event in orderedkeys(self._transitions[state]): + target = self._transitions[state][event] + row = [pretty_state, event, target.name] + if target.on_enter is not None: + try: + row.append(target.on_enter.__name__) + except AttributeError: + row.append(target.on_enter) + else: + row.append('') + if target.on_exit is not None: + try: + row.append(target.on_exit.__name__) + except AttributeError: + row.append(target.on_exit) + else: + row.append('') + tbl.add_row(row) + else: + tbl.add_row([pretty_state, "", "", "", ""]) + return tbl.get_string(print_empty=True) diff --git a/taskflow/types/graph.py b/taskflow/types/graph.py index 358f018a..d3e2bae2 100644 --- a/taskflow/types/graph.py +++ b/taskflow/types/graph.py @@ -31,8 +31,7 @@ class DiGraph(nx.DiGraph): return self def get_edge_data(self, u, v, default=None): - """Returns a *copy* of the attribute dictionary associated with edges - between (u, v). + """Returns a *copy* of the edge attribute dictionary between (u, v). NOTE(harlowja): this differs from the networkx get_edge_data() as that function does not return a copy (but returns a reference to the actual @@ -48,7 +47,9 @@ class DiGraph(nx.DiGraph): return nx.topological_sort(self) def pformat(self): - """Pretty formats your graph into a string representation that includes + """Pretty formats your graph into a string. + + This pretty formatted string representation includes many useful details about your graph, including; name, type, frozeness, node count, nodes, edge count, edges, graph density and graph cycles (if any). """ diff --git a/taskflow/types/latch.py b/taskflow/types/latch.py new file mode 100644 index 00000000..9aa2622d --- /dev/null +++ b/taskflow/types/latch.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +from taskflow.types import timing as tt + + +class Latch(object): + """A class that ensures N-arrivals occur before unblocking.""" + + def __init__(self, count): + count = int(count) + if count <= 0: + raise ValueError("Count must be greater than zero") + self._count = count + self._cond = threading.Condition() + + @property + def needed(self): + """Returns how many decrements are needed before latch is released.""" + return max(0, self._count) + + def countdown(self): + """Decrements the internal counter due to an arrival.""" + self._cond.acquire() + try: + self._count -= 1 + if self._count <= 0: + self._cond.notify_all() + finally: + self._cond.release() + + def wait(self, timeout=None): + """Waits until the latch is released. + + NOTE(harlowja): if a timeout is provided this function will wait + until that timeout expires, if the latch has been released before the + timeout expires then this will return True, otherwise it will + return False. + """ + w = None + if timeout is not None: + w = tt.StopWatch(timeout).start() + self._cond.acquire() + try: + while self._count > 0: + if w is not None: + if w.expired(): + return False + else: + timeout = w.leftover() + self._cond.wait(timeout) + return True + finally: + self._cond.release() diff --git a/taskflow/types/timing.py b/taskflow/types/timing.py new file mode 100644 index 00000000..cd822ae7 --- /dev/null +++ b/taskflow/types/timing.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import threading + +from taskflow.utils import misc + + +class Timeout(object): + """An object which represents a timeout. + + This object has the ability to be interrupted before the actual timeout + is reached. + """ + def __init__(self, timeout): + if timeout < 0: + raise ValueError("Timeout must be >= 0 and not %s" % (timeout)) + self._timeout = timeout + self._event = threading.Event() + + def interrupt(self): + self._event.set() + + def is_stopped(self): + return self._event.is_set() + + def wait(self): + self._event.wait(self._timeout) + + def reset(self): + self._event.clear() + + +class StopWatch(object): + """A simple timer/stopwatch helper class. + + Inspired by: apache-commons-lang java stopwatch. + + Not thread-safe. + """ + _STARTED = 'STARTED' + _STOPPED = 'STOPPED' + + def __init__(self, duration=None): + self._duration = duration + self._started_at = None + self._stopped_at = None + self._state = None + + def start(self): + if self._state == self._STARTED: + return self + self._started_at = misc.wallclock() + self._stopped_at = None + self._state = self._STARTED + return self + + def elapsed(self): + if self._state == self._STOPPED: + return float(self._stopped_at - self._started_at) + elif self._state == self._STARTED: + return float(misc.wallclock() - self._started_at) + else: + raise RuntimeError("Can not get the elapsed time of an invalid" + " stopwatch") + + def __enter__(self): + self.start() + return self + + def __exit__(self, type, value, traceback): + try: + self.stop() + except RuntimeError: + pass + # NOTE(harlowja): don't silence the exception. + return False + + def leftover(self): + if self._duration is None: + raise RuntimeError("Can not get the leftover time of a watch that" + " has no duration") + if self._state != self._STARTED: + raise RuntimeError("Can not get the leftover time of a stopwatch" + " that has not been started") + end_time = self._started_at + self._duration + return max(0.0, end_time - misc.wallclock()) + + def expired(self): + if self._duration is None: + return False + if self.elapsed() > self._duration: + return True + return False + + def resume(self): + if self._state == self._STOPPED: + self._state = self._STARTED + return self + else: + raise RuntimeError("Can not resume a stopwatch that has not been" + " stopped") + + def stop(self): + if self._state == self._STOPPED: + return self + if self._state != self._STARTED: + raise RuntimeError("Can not stop a stopwatch that has not been" + " started") + self._stopped_at = misc.wallclock() + self._state = self._STOPPED + return self diff --git a/taskflow/types/tree.py b/taskflow/types/tree.py new file mode 100644 index 00000000..41369b04 --- /dev/null +++ b/taskflow/types/tree.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- + +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import six + + +class FrozenNode(Exception): + """Exception raised when a frozen node is modified.""" + + +class _DFSIter(object): + """Depth first iterator (non-recursive) over the child nodes.""" + + def __init__(self, root, include_self=False): + self.root = root + self.include_self = bool(include_self) + + def __iter__(self): + stack = [] + if self.include_self: + stack.append(self.root) + else: + for child_node in self.root: + stack.append(child_node) + while stack: + node = stack.pop() + # Visit the node. + yield node + # Traverse the left & right subtree. + for child_node in reversed(list(node)): + stack.append(child_node) + + +class Node(object): + """A n-ary node class that can be used to create tree structures.""" + + def __init__(self, item, **kwargs): + self.item = item + self.parent = None + self.metadata = dict(kwargs) + self._children = [] + self._frozen = False + + def _frozen_add(self, child): + raise FrozenNode("Frozen node(s) can't be modified") + + def freeze(self): + if not self._frozen: + for n in self: + n.freeze() + self.add = self._frozen_add + self._frozen = True + + def add(self, child): + child.parent = self + self._children.append(child) + + def empty(self): + """Returns if the node is a leaf node.""" + return self.child_count() == 0 + + def path_iter(self, include_self=True): + """Yields back the path from this node to the root node.""" + if include_self: + node = self + else: + node = self.parent + while node is not None: + yield node + node = node.parent + + def find(self, item): + """Returns the node for an item if it exists in this node. + + This will search not only this node but also any children nodes and + finally if nothing is found then None is returned instead of a node + object. + """ + for n in self.dfs_iter(include_self=True): + if n.item == item: + return n + return None + + def __contains__(self, item): + """Returns if this item exists in this node or this nodes children.""" + return self.find(item) is not None + + def __getitem__(self, index): + # NOTE(harlowja): 0 is the right most index, len - 1 is the left most + return self._children[index] + + def pformat(self): + """Recursively formats a node into a nice string representation. + + Example Input: + yahoo = tt.Node("CEO") + yahoo.add(tt.Node("Infra")) + yahoo[0].add(tt.Node("Boss")) + yahoo[0][0].add(tt.Node("Me")) + yahoo.add(tt.Node("Mobile")) + yahoo.add(tt.Node("Mail")) + + Example Output: + CEO + |__Infra + | |__Boss + | |__Me + |__Mobile + |__Mail + """ + def _inner_pformat(node, level): + if level == 0: + yield six.text_type(node.item) + prefix = "" + else: + yield "__%s" % six.text_type(node.item) + prefix = " " * 2 + children = list(node) + for (i, child) in enumerate(children): + for (j, text) in enumerate(_inner_pformat(child, level + 1)): + if j == 0 or i + 1 < len(children): + text = prefix + "|" + text + else: + text = prefix + " " + text + yield text + expected_lines = self.child_count(only_direct=False) + accumulator = six.StringIO() + for i, line in enumerate(_inner_pformat(self, 0)): + accumulator.write(line) + if i < expected_lines: + accumulator.write('\n') + return accumulator.getvalue() + + def child_count(self, only_direct=True): + """Returns how many children this node has. + + This can be either only the direct children of this node or inclusive + of all children nodes of this node (children of children and so-on). + + NOTE(harlowja): it does not account for the current node in this count. + """ + if not only_direct: + count = 0 + for _node in self.dfs_iter(): + count += 1 + return count + return len(self._children) + + def __iter__(self): + """Iterates over the direct children of this node (right->left).""" + for c in self._children: + yield c + + def index(self, item): + """Finds the child index of a given item, searchs in added order.""" + index_at = None + for (i, child) in enumerate(self._children): + if child.item == item: + index_at = i + break + if index_at is None: + raise ValueError("%s is not contained in any child" % (item)) + return index_at + + def dfs_iter(self, include_self=False): + """Depth first iteration (non-recursive) over the child nodes.""" + return _DFSIter(self, include_self=include_self) diff --git a/taskflow/utils/eventlet_utils.py b/taskflow/utils/eventlet_utils.py index 347fba31..d80f97e3 100644 --- a/taskflow/utils/eventlet_utils.py +++ b/taskflow/utils/eventlet_utils.py @@ -15,15 +15,14 @@ # under the License. import logging -import threading from concurrent import futures try: - from eventlet.green import threading as green_threading + from eventlet.green import threading as greenthreading from eventlet import greenpool - from eventlet import patcher - from eventlet import queue + from eventlet import patcher as greenpatcher + from eventlet import queue as greenqueue EVENTLET_AVAILABLE = True except ImportError: EVENTLET_AVAILABLE = False @@ -33,9 +32,10 @@ from taskflow.utils import lock_utils LOG = logging.getLogger(__name__) -# NOTE(harlowja): this object signals to threads that they should stop -# working and rest in peace. -_TOMBSTONE = object() +_DONE_STATES = frozenset([ + futures._base.CANCELLED_AND_NOTIFIED, + futures._base.FINISHED, +]) _DONE_STATES = frozenset([ futures._base.CANCELLED_AND_NOTIFIED, @@ -62,26 +62,29 @@ class _WorkItem(object): class _Worker(object): - def __init__(self, executor, work_queue, worker_id): + def __init__(self, executor, work, work_queue): self.executor = executor + self.work = work self.work_queue = work_queue - self.worker_id = worker_id def __call__(self): + # Run our main piece of work. try: + self.work.run() + finally: + # Consume any delayed work before finishing (this is how we finish + # work that was to big for the pool size, but needs to be finished + # no matter). while True: - work = self.work_queue.get(block=True) - if work is _TOMBSTONE: - # NOTE(harlowja): give notice to other workers (this is - # basically a chain of tombstone calls that will cause all - # the workers on the queue to eventually shut-down). - self.work_queue.put(_TOMBSTONE) + try: + w = self.work_queue.get_nowait() + except greenqueue.Empty: break else: - work.run() - except BaseException: - LOG.critical("Exception in worker %s of '%s'", - self.worker_id, self.executor, exc_info=True) + try: + w.run() + finally: + self.work_queue.task_done() class GreenFuture(futures.Future): @@ -93,8 +96,8 @@ class GreenFuture(futures.Future): # functions will correctly yield to eventlet. If this is not done then # waiting on the future never actually causes the greenthreads to run # and thus you wait for infinity. - if not patcher.is_monkey_patched('threading'): - self._condition = green_threading.Condition() + if not greenpatcher.is_monkey_patched('threading'): + self._condition = greenthreading.Condition() class GreenExecutor(futures.Executor): @@ -102,44 +105,59 @@ class GreenExecutor(futures.Executor): def __init__(self, max_workers=1000): assert EVENTLET_AVAILABLE, 'eventlet is needed to use a green executor' - assert int(max_workers) > 0, 'Max workers must be greater than zero' self._max_workers = int(max_workers) + if self._max_workers <= 0: + raise ValueError('Max workers must be greater than zero') self._pool = greenpool.GreenPool(self._max_workers) - self._work_queue = queue.LightQueue() - self._shutdown_lock = threading.RLock() + self._delayed_work = greenqueue.Queue() + self._shutdown_lock = greenthreading.Lock() self._shutdown = False + self._workers_created = 0 + + @property + def workers_created(self): + return self._workers_created + + @property + def amount_delayed(self): + return self._delayed_work.qsize() + + @property + def alive(self): + return not self._shutdown @lock_utils.locked(lock='_shutdown_lock') def submit(self, fn, *args, **kwargs): if self._shutdown: raise RuntimeError('cannot schedule new futures after shutdown') f = GreenFuture() - w = _WorkItem(f, fn, args, kwargs) - self._work_queue.put(w) - # Spin up any new workers (since they are spun up on demand and - # not at executor initialization). - self._spin_up() + work = _WorkItem(f, fn, args, kwargs) + if not self._spin_up(work): + self._delayed_work.put(work) return f - def _spin_up(self): - cur_am = (self._pool.running() + self._pool.waiting()) - if cur_am < self._max_workers and cur_am < self._work_queue.qsize(): - # Spin up a new worker to do the work as we are behind. - worker = _Worker(self, self._work_queue, cur_am + 1) - self._pool.spawn(worker) + def _spin_up(self, work): + alive = self._pool.running() + self._pool.waiting() + if alive < self._max_workers: + self._pool.spawn_n(_Worker(self, work, self._delayed_work)) + self._workers_created += 1 + return True + return False def shutdown(self, wait=True): with self._shutdown_lock: self._shutdown = True - self._work_queue.put(_TOMBSTONE) if wait: self._pool.waitall() + # NOTE(harlowja): Fixed in eventlet 0.15 (remove when able to use) + if not self._delayed_work.empty(): + self._delayed_work.join() class _GreenWaiter(object): """Provides the event that wait_for_any() blocks on.""" def __init__(self): - self.event = green_threading.Event() + self.event = greenthreading.Event() def add_result(self, future): self.event.set() diff --git a/taskflow/utils/kazoo_utils.py b/taskflow/utils/kazoo_utils.py index 8ca8bf52..ae62e880 100644 --- a/taskflow/utils/kazoo_utils.py +++ b/taskflow/utils/kazoo_utils.py @@ -15,9 +15,11 @@ # under the License. from kazoo import client +from kazoo import exceptions as k_exc import six from taskflow import exceptions as exc +from taskflow.utils import reflection def _parse_hosts(hosts): @@ -33,6 +35,92 @@ def _parse_hosts(hosts): return hosts +def prettify_failures(failures, limit=-1): + """Prettifies a checked commits failures (ignores sensitive data...). + + Example input and output: + + >>> from taskflow.utils import kazoo_utils + >>> conf = {"hosts": ['localhost:2181']} + >>> c = kazoo_utils.make_client(conf) + >>> c.start(timeout=1) + >>> txn = c.transaction() + >>> txn.create("/test") + >>> txn.check("/test", 2) + >>> txn.delete("/test") + >>> try: + ... kazoo_utils.checked_commit(txn) + ... except kazoo_utils.KazooTransactionException as e: + ... print(kazoo_utils.prettify_failures(e.failures, limit=1)) + ... + RolledBackError@Create(path='/test') and 2 more... + >>> c.stop() + >>> c.close() + """ + prettier = [] + for (op, r) in failures: + pretty_op = reflection.get_class_name(op, fully_qualified=False) + # Pick off a few attributes that are meaningful (but one that don't + # show actual data, which might not be desired to show...). + selected_attrs = [ + "path=%r" % op.path, + ] + try: + if op.version != -1: + selected_attrs.append("version=%s" % op.version) + except AttributeError: + pass + pretty_op += "(%s)" % (", ".join(selected_attrs)) + pretty_cause = reflection.get_class_name(r, fully_qualified=False) + prettier.append("%s@%s" % (pretty_cause, pretty_op)) + if limit <= 0 or len(prettier) <= limit: + return ", ".join(prettier) + else: + leftover = prettier[limit:] + prettier = prettier[0:limit] + return ", ".join(prettier) + " and %s more..." % len(leftover) + + +class KazooTransactionException(k_exc.KazooException): + """Exception raised when a checked commit fails.""" + + def __init__(self, message, failures): + super(KazooTransactionException, self).__init__(message) + self._failures = tuple(failures) + + @property + def failures(self): + return self._failures + + +def checked_commit(txn): + # Until https://github.com/python-zk/kazoo/pull/224 is fixed we have + # to workaround the transaction failing silently. + if not txn.operations: + return [] + results = txn.commit() + failures = [] + for op, result in six.moves.zip(txn.operations, results): + if isinstance(result, k_exc.KazooException): + failures.append((op, result)) + if len(results) < len(txn.operations): + raise KazooTransactionException( + "Transaction returned %s results, this is less than" + " the number of expected transaction operations %s" + % (len(results), len(txn.operations)), failures) + if len(results) > len(txn.operations): + raise KazooTransactionException( + "Transaction returned %s results, this is greater than" + " the number of expected transaction operations %s" + % (len(results), len(txn.operations)), failures) + if failures: + raise KazooTransactionException( + "Transaction with %s operations failed: %s" + % (len(txn.operations), + prettify_failures(failures, limit=1)), failures) + return results + + def finalize_client(client): """Stops and closes a client, even if it wasn't started.""" client.stop() @@ -46,8 +134,12 @@ def finalize_client(client): def check_compatible(client, min_version=None, max_version=None): - """Checks if a kazoo client is backed by a zookeeper server version - that satisfies a given min (inclusive) and max (inclusive) version range. + """Checks if a kazoo client is backed by a zookeeper server version. + + This check will verify that the zookeeper server version that the client + is connected to satisfies a given minimum version (inclusive) and + maximum (inclusive) version range. If the server is not in the provided + version range then a exception is raised indiciating this. """ server_version = None if min_version: diff --git a/taskflow/utils/lock_utils.py b/taskflow/utils/lock_utils.py index 942e27bb..dbc0b778 100644 --- a/taskflow/utils/lock_utils.py +++ b/taskflow/utils/lock_utils.py @@ -48,21 +48,35 @@ def try_lock(lock): def locked(*args, **kwargs): - """A decorator that looks for a given attribute (typically a lock or a list - of locks) and before executing the decorated function uses the given lock - or list of locks as a context manager, automatically releasing on exit. + """A locking decorator. + + It will look for a provided attribute (typically a lock or a list + of locks) on the first argument of the function decorated (typically this + is the 'self' object) and before executing the decorated function it + activates the given lock or list of locks as a context manager, + automatically releasing that lock on exit. + + NOTE(harlowja): if no attribute is provided then by default the attribute + named '_lock' is looked for in the instance object this decorator is + attached to. + + NOTE(harlowja): when we get the wrapt module approved we can address the + correctness of this decorator with regards to classmethods, to keep sanity + and correctness it is recommended to avoid using this on classmethods, once + https://review.openstack.org/#/c/94754/ is merged this will be refactored + and that use-case can be provided in a correct manner. """ def decorator(f): attr_name = kwargs.get('lock', '_lock') - @misc.wraps(f) - def wrapper(*args, **kwargs): - lock = getattr(args[0], attr_name) + @six.wraps(f) + def wrapper(self, *args, **kwargs): + lock = getattr(self, attr_name) if isinstance(lock, (tuple, list)): lock = MultiLock(locks=list(lock)) with lock: - return f(*args, **kwargs) + return f(self, *args, **kwargs) return wrapper @@ -244,8 +258,11 @@ class ReaderWriterLock(_ReaderWriterLockBase): class DummyReaderWriterLock(_ReaderWriterLockBase): - """A dummy reader/writer lock that doesn't lock anything but provides same - functions as a normal reader/writer lock class. + """A dummy reader/writer lock. + + This dummy lock doesn't lock anything but provides the same functions as a + normal reader/writer lock class and can be useful in unit tests or other + similar scenarios (do *not* use it if locking is actually required). """ @contextlib.contextmanager def write_lock(self): @@ -271,11 +288,10 @@ class DummyReaderWriterLock(_ReaderWriterLockBase): class MultiLock(object): - """A class which can attempt to obtain many locks at once and release - said locks when exiting. + """A class which attempts to obtain & release many locks at once. - Useful as a context manager around many locks (instead of having to nest - said individual context managers). + It is typically useful as a context manager around many locks (instead of + having to nest individual lock context managers). """ def __init__(self, locks): @@ -318,7 +334,9 @@ class MultiLock(object): class _InterProcessLock(object): - """Lock implementation which allows multiple locks, working around + """An interprocess locking implementation. + + This is a lock implementation which allows multiple locks, working around issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does not require any cleanup. Since the lock is always held on a file descriptor rather than outside of the process, the lock gets dropped @@ -377,7 +395,6 @@ class _InterProcessLock(object): try: self.unlock() self.lockfile.close() - # This is fixed in: https://review.openstack.org/70506 LOG.debug('Released file lock "%s"', self.fname) except IOError: LOG.exception("Could not release the acquired lock `%s`", @@ -386,6 +403,9 @@ class _InterProcessLock(object): def __exit__(self, exc_type, exc_val, exc_tb): self.release() + def exists(self): + return os.path.exists(self.fname) + def trylock(self): raise NotImplementedError() diff --git a/taskflow/utils/misc.py b/taskflow/utils/misc.py index d2360c9b..d34e0fb4 100644 --- a/taskflow/utils/misc.py +++ b/taskflow/utils/misc.py @@ -20,7 +20,6 @@ import contextlib import copy import datetime import errno -import functools import inspect import keyword import logging @@ -28,7 +27,6 @@ import os import re import string import sys -import threading import time import traceback @@ -50,9 +48,11 @@ _SCHEME_REGEX = re.compile(r"^([A-Za-z][A-Za-z0-9+.-]*):") def merge_uri(uri_pieces, conf): - """Merges the username, password, hostname, and query params of a uri into - the given configuration (does not overwrite the configuration keys if they - already exist) and returns the adjusted configuration. + """Merges a parsed uri into the given configuration dictionary. + + Merges the username, password, hostname, and query params of a uri into + the given configuration (it does not overwrite the configuration keys if + they already exist) and returns the adjusted configuration. NOTE(harlowja): does not merge the path, scheme or fragment. """ @@ -72,9 +72,7 @@ def merge_uri(uri_pieces, conf): def parse_uri(uri, query_duplicates=False): - """Parses a uri into its components and returns a dictionary containing - those components. - """ + """Parses a uri into its components.""" # Do some basic validation before continuing... if not isinstance(uri, six.string_types): raise TypeError("Can only parse string types to uri data, " @@ -107,26 +105,15 @@ def parse_uri(uri, query_duplicates=False): query_params = tmp_query_params else: query_params = {} - uri_pieces = { - 'scheme': parsed.scheme, - 'username': parsed.username, - 'password': parsed.password, - 'fragment': parsed.fragment, - 'path': parsed.path, - 'params': query_params, - } - for k in ('hostname', 'port'): - try: - uri_pieces[k] = getattr(parsed, k) - except (IndexError, ValueError): - # The underlying network_utils throws when the host string is empty - # which it may be in cases where it is not provided. - # - # NOTE(harlowja): when https://review.openstack.org/#/c/86921/ gets - # merged we can just remove this since that error will no longer - # occur. - uri_pieces[k] = None - return AttrDict(**uri_pieces) + return AttrDict( + scheme=parsed.scheme, + username=parsed.username, + password=parsed.password, + fragment=parsed.fragment, + path=parsed.path, + params=query_params, + hostname=parsed.hostname, + port=parsed.port) def binary_encode(text, encoding='utf-8'): @@ -176,9 +163,17 @@ def decode_json(raw_data, root_types=(dict,)): class cachedproperty(object): - """Descriptor that can be placed on instance methods to translate + """A descriptor property that is only evaluated once.. + + This caching descriptor can be placed on instance methods to translate those methods into properties that will be cached in the instance (avoiding - repeated creation checking logic to do the equivalent). + repeated attribute checking logic to do the equivalent). + + NOTE(harlowja): by default the property that will be saved will be under + the decorated methods name prefixed with an underscore. For example if we + were to attach this descriptor to an instance method 'get_thing(self)' the + cached property would be stored under '_get_thing' in the self object + after the first call to 'get_thing' occurs. """ def __init__(self, fget): # If a name is provided (as an argument) then this will be the string @@ -225,17 +220,9 @@ def wallclock(): return time.time() -def wraps(fn): - """This will not be needed in python 3.2 or greater which already has this - built-in to its functools.wraps method. - """ - - def wrapper(f): - f = functools.wraps(fn)(f) - f.__wrapped__ = getattr(fn, '__wrapped__', fn) - return f - - return wrapper +def millis_to_datetime(milliseconds): + """Converts number of milliseconds (from epoch) into a datetime object.""" + return datetime.datetime.fromtimestamp(float(milliseconds) / 1000) def millis_to_datetime(milliseconds): @@ -313,9 +300,7 @@ _ASCII_WORD_SYMBOLS = frozenset(string.ascii_letters + string.digits + '_') def is_valid_attribute_name(name, allow_self=False, allow_hidden=False): - """Validates that a string name is a valid/invalid python attribute - name. - """ + """Checks that a string is a valid/invalid python attribute name.""" return all(( isinstance(name, six.string_types), len(name) > 0, @@ -332,8 +317,12 @@ def is_valid_attribute_name(name, allow_self=False, allow_hidden=False): class AttrDict(dict): - """Helper utility dict sub-class to create a class that can be accessed by - attribute name from a dictionary that contains a set of keys and values. + """Dictionary subclass that allows for attribute based access. + + This subclass allows for accessing a dictionaries keys and values by + accessing those keys as regular attributes. Keys that are not valid python + attribute names can not of course be acccessed/set (those keys must be + accessed/set by the traditional dictionary indexing operators instead). """ NO_ATTRS = tuple(reflection.get_member_names(dict)) @@ -366,35 +355,13 @@ class AttrDict(dict): self[name] = value -class Timeout(object): - """An object which represents a timeout. - - This object has the ability to be interrupted before the actual timeout - is reached. - """ - def __init__(self, timeout): - if timeout < 0: - raise ValueError("Timeout must be >= 0 and not %s" % (timeout)) - self._timeout = timeout - self._event = threading.Event() - - def interrupt(self): - self._event.set() - - def is_stopped(self): - return self._event.is_set() - - def wait(self): - self._event.wait(self._timeout) - - def reset(self): - self._event.clear() - - class ExponentialBackoff(object): - """An iterable object that will yield back an exponential delay sequence - provided an exponent and a number of items to yield. This object may be - iterated over multiple times (yielding the same sequence each time). + """An iterable object that will yield back an exponential delay sequence. + + This objects provides for a configurable exponent, count of numbers + to generate, and a maximum number that will be returned. This object may + also be iterated over multiple times (yielding the same sequence each + time). """ def __init__(self, count, exponent=2, max_backoff=3600): self.count = max(0, int(count)) @@ -411,18 +378,6 @@ class ExponentialBackoff(object): return "ExponentialBackoff: %s" % ([str(v) for v in self]) -def as_bool(val): - """Converts an arbitrary value into a boolean.""" - if isinstance(val, bool): - return val - if isinstance(val, six.string_types): - if val.lower() in ('f', 'false', '0', 'n', 'no'): - return False - if val.lower() in ('t', 'true', '1', 'y', 'yes'): - return True - return bool(val) - - def as_int(obj, quiet=False): """Converts an arbitrary value into a integer.""" # Try "2" -> 2 @@ -459,91 +414,13 @@ def ensure_tree(path): raise -class StopWatch(object): - """A simple timer/stopwatch helper class. - - Inspired by: apache-commons-lang java stopwatch. - - Not thread-safe. - """ - _STARTED = 'STARTED' - _STOPPED = 'STOPPED' - - def __init__(self, duration=None): - self._duration = duration - self._started_at = None - self._stopped_at = None - self._state = None - - def start(self): - if self._state == self._STARTED: - return self - self._started_at = wallclock() - self._stopped_at = None - self._state = self._STARTED - return self - - def elapsed(self): - if self._state == self._STOPPED: - return float(self._stopped_at - self._started_at) - elif self._state == self._STARTED: - return float(wallclock() - self._started_at) - else: - raise RuntimeError("Can not get the elapsed time of an invalid" - " stopwatch") - - def __enter__(self): - self.start() - return self - - def __exit__(self, type, value, traceback): - try: - self.stop() - except RuntimeError: - pass - # NOTE(harlowja): don't silence the exception. - return False - - def leftover(self): - if self._duration is None: - raise RuntimeError("Can not get the leftover time of a watch that" - " has no duration") - if self._state != self._STARTED: - raise RuntimeError("Can not get the leftover time of a stopwatch" - " that has not been started") - end_time = self._started_at + self._duration - return max(0.0, end_time - wallclock()) - - def expired(self): - if self._duration is None: - return False - if self.elapsed() > self._duration: - return True - return False - - def resume(self): - if self._state == self._STOPPED: - self._state = self._STARTED - return self - else: - raise RuntimeError("Can not resume a stopwatch that has not been" - " stopped") - - def stop(self): - if self._state == self._STOPPED: - return self - if self._state != self._STARTED: - raise RuntimeError("Can not stop a stopwatch that has not been" - " started") - self._stopped_at = wallclock() - self._state = self._STOPPED - return self - - class Notifier(object): - """A utility helper class that can be used to subscribe to - notifications of events occurring as well as allow a entity to post said - notifications to subscribers. + """A notification helper class. + + It is intended to be used to subscribe to notifications of events + occurring as well as allow a entity to post said notifications to any + associated subscribers without having either entity care about how this + notification occurs. """ RESERVED_KEYS = ('details',) @@ -665,12 +542,15 @@ def are_equal_exc_info_tuples(ei1, ei2): @contextlib.contextmanager def capture_failure(): - """Save current exception, and yield back the failure (or raises a - runtime error if no active exception is being handled). + """Captures the occuring exception and provides a failure back. - In some cases the exception context can be cleared, resulting in None - being attempted to be saved after an exception handler is run. This - can happen when eventlet switches greenthreads or when running an + This will save the current exception information and yield back a + failure object for the caller to use (it will raise a runtime error if + no active exception is being handled). + + This is useful since in some cases the exception context can be cleared, + resulting in None being attempted to be saved after an exception handler is + run. This can happen when eventlet switches greenthreads or when running an exception handler, code raises and catches an exception. In both cases the exception context will be cleared. @@ -823,8 +703,23 @@ class Failure(object): return None def __str__(self): - return 'Failure: %s: %s' % (self._exc_type_names[0], - self._exception_str) + return self.pformat() + + def pformat(self, traceback=False): + buf = six.StringIO() + buf.write( + 'Failure: %s: %s' % (self._exc_type_names[0], self._exception_str)) + if traceback: + if self._traceback_str is not None: + traceback_str = self._traceback_str.rstrip() + else: + traceback_str = None + if traceback_str: + buf.write('\nTraceback (most recent call last):\n') + buf.write(traceback_str) + else: + buf.write('\nTraceback not available.') + return buf.getvalue() def __iter__(self): """Iterate over exception type names.""" diff --git a/taskflow/utils/persistence_utils.py b/taskflow/utils/persistence_utils.py index f58cea09..e3c4ba36 100644 --- a/taskflow/utils/persistence_utils.py +++ b/taskflow/utils/persistence_utils.py @@ -39,11 +39,10 @@ def temporary_log_book(backend=None): def temporary_flow_detail(backend=None): - """Creates a temporary flow detail and logbook for temporary usage in - the given backend. + """Creates a temporary flow detail and logbook in the given backend. Mainly useful for tests and other use cases where a temporary flow detail - is needed for a short-period of time. + and a temporary logbook is needed for a short-period of time. """ flow_id = uuidutils.generate_uuid() book = temporary_log_book(backend) @@ -57,9 +56,18 @@ def temporary_flow_detail(backend=None): def create_flow_detail(flow, book=None, backend=None, meta=None): - """Creates a flow detail for the given flow and adds it to the provided - logbook (if provided) and then uses the given backend (if provided) to - save the logbook then returns the created flow detail. + """Creates a flow detail for a flow & adds & saves it in a logbook. + + This will create a flow detail for the given flow using the flow name, + and add it to the provided logbook and then uses the given backend to save + the logbook and then returns the created flow detail. + + If no book is provided a temporary one will be created automatically (no + reference to the logbook will be returned, so this should nearly *always* + be provided or only used in situations where no logbook is needed, for + example in tests). If no backend is provided then no saving will occur and + the created flow detail will not be persisted even if the flow detail was + added to a given (or temporarily generated) logbook. """ flow_id = uuidutils.generate_uuid() flow_name = getattr(flow, 'name', None) diff --git a/taskflow/utils/reflection.py b/taskflow/utils/reflection.py index a5d80b55..bc5a3223 100644 --- a/taskflow/utils/reflection.py +++ b/taskflow/utils/reflection.py @@ -21,6 +21,16 @@ import six from taskflow.openstack.common import importutils +try: + _TYPE_TYPE = types.TypeType +except AttributeError: + _TYPE_TYPE = type + +# See: https://docs.python.org/2/library/__builtin__.html#module-__builtin__ +# and see https://docs.python.org/2/reference/executionmodel.html (and likely +# others)... +_BUILTIN_MODULES = ('builtins', '__builtin__', 'exceptions') + def _get_members(obj, exclude_hidden): """Yields the members of an object, filtering by hidden/not hidden.""" @@ -31,8 +41,11 @@ def _get_members(obj, exclude_hidden): def find_subclasses(locations, base_cls, exclude_hidden=True): - """Examines the given locations for types which are subclasses of the base - class type provided and returns the found subclasses. + """Finds subclass types in the given locations. + + This will examines the given locations for types which are subclasses of + the base class type provided and returns the found subclasses (or fails + with exceptions if this introspection can not be accomplished). If a string is provided as one of the locations it will be imported and examined if it is a subclass of the base class. If a module is given, @@ -74,7 +87,7 @@ def get_member_names(obj, exclude_hidden=True): return [name for (name, _obj) in _get_members(obj, exclude_hidden)] -def get_class_name(obj): +def get_class_name(obj, fully_qualified=True): """Get class name for object. If object is a type, fully qualified name of the type is returned. @@ -83,9 +96,27 @@ def get_class_name(obj): """ if not isinstance(obj, six.class_types): obj = type(obj) - if obj.__module__ in ('builtins', '__builtin__', 'exceptions'): - return obj.__name__ - return '.'.join((obj.__module__, obj.__name__)) + try: + built_in = obj.__module__ in _BUILTIN_MODULES + except AttributeError: + pass + else: + if built_in: + try: + return obj.__qualname__ + except AttributeError: + return obj.__name__ + pieces = [] + try: + pieces.append(obj.__qualname__) + except AttributeError: + pieces.append(obj.__name__) + if fully_qualified: + try: + pieces.insert(0, obj.__module__) + except AttributeError: + pass + return '.'.join(pieces) def get_all_class_names(obj, up_to=object): @@ -109,21 +140,36 @@ def get_callable_name(function): """ method_self = get_method_self(function) if method_self is not None: - # this is bound method + # This is a bound method. if isinstance(method_self, six.class_types): - # this is bound class method + # This is a bound class method. im_class = method_self else: im_class = type(method_self) - parts = (im_class.__module__, im_class.__name__, - function.__name__) - elif inspect.isfunction(function) or inspect.ismethod(function): - parts = (function.__module__, function.__name__) + try: + parts = (im_class.__module__, function.__qualname__) + except AttributeError: + parts = (im_class.__module__, im_class.__name__, function.__name__) + elif inspect.ismethod(function) or inspect.isfunction(function): + # This could be a function, a static method, a unbound method... + try: + parts = (function.__module__, function.__qualname__) + except AttributeError: + if hasattr(function, 'im_class'): + # This is a unbound method, which exists only in python 2.x + im_class = function.im_class + parts = (im_class.__module__, + im_class.__name__, function.__name__) + else: + parts = (function.__module__, function.__name__) else: im_class = type(function) - if im_class is type: + if im_class is _TYPE_TYPE: im_class = function - parts = (im_class.__module__, im_class.__name__) + try: + parts = (im_class.__module__, im_class.__qualname__) + except AttributeError: + parts = (im_class.__module__, im_class.__name__) return '.'.join(parts) diff --git a/taskflow/utils/threading_utils.py b/taskflow/utils/threading_utils.py index c669619a..2af17023 100644 --- a/taskflow/utils/threading_utils.py +++ b/taskflow/utils/threading_utils.py @@ -17,13 +17,12 @@ import multiprocessing import threading -import six +from six.moves import _thread -if six.PY2: - from thread import get_ident # noqa -else: - # In python3+ the get_ident call moved (whhhy??) - from threading import get_ident # noqa + +def get_ident(): + """Return the 'thread identifier' of the current thread.""" + return _thread.get_ident() def get_optimal_thread_count(): diff --git a/taskflow/version.py b/taskflow/version.py index 1777ba62..7f7fcd9a 100644 --- a/taskflow/version.py +++ b/taskflow/version.py @@ -14,17 +14,19 @@ # License for the specific language governing permissions and limitations # under the License. -from pbr import version as pbr_version +import pkg_resources TASK_VENDOR = "OpenStack Foundation" TASK_PRODUCT = "OpenStack TaskFlow" TASK_PACKAGE = None # OS distro package version suffix -version_info = pbr_version.VersionInfo('taskflow') - - -def version_string(): - return version_info.version_string() +try: + from pbr import version as pbr_version + _version_info = pbr_version.VersionInfo('taskflow') + version_string = _version_info.version_string +except ImportError: + _version_info = pkg_resources.get_distribution('taskflow') + version_string = lambda: _version_info.version def version_string_with_package(): diff --git a/test-requirements.txt b/test-requirements.txt index 8c0d3106..4068d786 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,11 +1,15 @@ -hacking>=0.8.0,<0.9 +# The order of packages is significant, because pip processes them in the order +# of appearance. Changing the order has an impact on the overall integration +# process, which may cause wedges in the gate later. + +hacking>=0.9.2,<0.10 discover coverage>=3.6 mock>=1.0 python-subunit>=0.0.18 testrepository>=0.0.18 testtools>=0.9.34 -zake>=0.0.18 +zake>=0.1 # Apache-2.0 # docs build jobs -sphinx>=1.2.1,<1.3 -oslosphinx +sphinx>=1.1.2,!=1.2.0,<1.3 +oslosphinx>=2.2.0.0a2 diff --git a/tools/generate_states.sh b/tools/generate_states.sh new file mode 100755 index 00000000..2da75817 --- /dev/null +++ b/tools/generate_states.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -u +xsltproc=`which xsltproc` +if [ -z "$xsltproc" ]; then + echo "Please install xsltproc before continuing." + exit 1 +fi + +set -e +if [ ! -d "$PWD/.diagram-tools" ]; then + git clone "https://github.com/vidarh/diagram-tools.git" "$PWD/.diagram-tools" +fi + +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +img_dir="$script_dir/../doc/source/img" + +echo "---- Updating task state diagram ----" +python $script_dir/state_graph.py -t -f /tmp/states.svg +$xsltproc $PWD/.diagram-tools/notugly.xsl /tmp/states.svg > $img_dir/task_states.svg + +echo "---- Updating flow state diagram ----" +python $script_dir/state_graph.py -f /tmp/states.svg +$xsltproc $PWD/.diagram-tools/notugly.xsl /tmp/states.svg > $img_dir/flow_states.svg + +echo "---- Updating engine state diagram ----" +python $script_dir/state_graph.py -e -f /tmp/states.svg +$xsltproc $PWD/.diagram-tools/notugly.xsl /tmp/states.svg > $img_dir/engine_states.svg + +echo "---- Updating retry state diagram ----" +python $script_dir/state_graph.py -r -f /tmp/states.svg +$xsltproc $PWD/.diagram-tools/notugly.xsl /tmp/states.svg > $img_dir/retry_states.svg diff --git a/tools/state_graph.py b/tools/state_graph.py old mode 100644 new mode 100755 index f6a2057d..77b85636 --- a/tools/state_graph.py +++ b/tools/state_graph.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +import optparse import os import sys @@ -7,41 +8,13 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) sys.path.insert(0, top_dir) -import optparse -import subprocess -import tempfile +import networkx as nx + +# To get this installed you may have to follow: +# https://code.google.com/p/pydot/issues/detail?id=93 (until fixed). +import pydot from taskflow import states -from taskflow.types import graph as gr - - -def mini_exec(cmd, ok_codes=(0,)): - stdout = subprocess.PIPE - stderr = subprocess.PIPE - proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, stdin=None) - (stdout, stderr) = proc.communicate() - rc = proc.returncode - if rc not in ok_codes: - raise RuntimeError("Could not run %s [%s]\nStderr: %s" - % (cmd, rc, stderr)) - return (stdout, stderr) - - -def make_svg(graph, output_filename, output_format): - # NOTE(harlowja): requires pydot! - gdot = graph.export_to_dot() - if output_format == 'dot': - output = gdot - elif output_format in ('svg', 'svgz', 'png'): - with tempfile.NamedTemporaryFile(suffix=".dot") as fh: - fh.write(gdot) - fh.flush() - cmd = ['dot', '-T%s' % output_format, fh.name] - output, _stderr = mini_exec(cmd) - else: - raise ValueError('Unknown format: %s' % output_filename) - with open(output_filename, "wb") as fh: - fh.write(output) def main(): @@ -52,6 +25,14 @@ def main(): action='store_true', help="use task state transitions", default=False) + parser.add_option("-r", "--retries", dest="retries", + action='store_true', + help="use retry state transitions", + default=False) + parser.add_option("-e", "--engines", dest="engines", + action='store_true', + help="use engine state transitions", + default=False) parser.add_option("-T", "--format", dest="format", help="output in given format", default='svg') @@ -60,20 +41,90 @@ def main(): if options.filename is None: options.filename = 'states.%s' % options.format - g = gr.DiGraph(name="State transitions") - if not options.tasks: - source = states._ALLOWED_FLOW_TRANSITIONS + types = [options.engines, options.retries, options.tasks] + if sum([int(i) for i in types]) > 1: + parser.error("Only one of task/retry/engines may be specified.") + + disallowed = set() + start_node = states.PENDING + if options.tasks: + source = list(states._ALLOWED_TASK_TRANSITIONS) + source_type = "Tasks" + disallowed.add(states.RETRYING) + elif options.retries: + source = list(states._ALLOWED_TASK_TRANSITIONS) + source_type = "Retries" + elif options.engines: + # TODO(harlowja): place this in states.py + source = [ + (states.RESUMING, states.SCHEDULING), + (states.SCHEDULING, states.WAITING), + (states.WAITING, states.ANALYZING), + (states.ANALYZING, states.SCHEDULING), + (states.ANALYZING, states.WAITING), + ] + for u in (states.SCHEDULING, states.ANALYZING): + for v in (states.SUSPENDED, states.SUCCESS, states.REVERTED): + source.append((u, v)) + source_type = "Engines" + start_node = states.RESUMING else: - source = states._ALLOWED_TASK_TRANSITIONS + source = list(states._ALLOWED_FLOW_TRANSITIONS) + source_type = "Flow" + + transitions = nx.DiGraph() for (u, v) in source: - if not g.has_node(u): - g.add_node(u) - if not g.has_node(v): - g.add_node(v) - g.add_edge(u, v) - make_svg(g, options.filename, options.format) + if u not in disallowed: + transitions.add_node(u) + if v not in disallowed: + transitions.add_node(v) + for (u, v) in source: + if not transitions.has_node(u) or not transitions.has_node(v): + continue + transitions.add_edge(u, v) + + graph_name = "%s states" % source_type + g = pydot.Dot(graph_name=graph_name, rankdir='LR', + nodesep='0.25', overlap='false', + ranksep="0.5", size="11x8.5", + splines='true', ordering='in') + node_attrs = { + 'fontsize': '11', + } + nodes = {} + nodes_order = [] + edges_added = [] + for (u, v) in nx.bfs_edges(transitions, source=start_node): + if u not in nodes: + nodes[u] = pydot.Node(u, **node_attrs) + g.add_node(nodes[u]) + nodes_order.append(u) + if v not in nodes: + nodes[v] = pydot.Node(v, **node_attrs) + g.add_node(nodes[v]) + nodes_order.append(v) + for u in nodes_order: + for v in transitions.successors_iter(u): + if (u, v) not in edges_added: + g.add_edge(pydot.Edge(nodes[u], nodes[v])) + edges_added.append((u, v)) + start = pydot.Node("__start__", shape="point", width="0.1", + xlabel='start', fontcolor='green', **node_attrs) + g.add_node(start) + g.add_edge(pydot.Edge(start, nodes[start_node], style='dotted')) + + print("*" * len(graph_name)) + print(graph_name) + print("*" * len(graph_name)) + print(g.to_string().strip()) + + g.write(options.filename, format=options.format) print("Created %s at '%s'" % (options.format, options.filename)) + # To make the svg more pretty use the following: + # $ xsltproc ../diagram-tools/notugly.xsl ./states.svg > pretty-states.svg + # Get diagram-tools from https://github.com/vidarh/diagram-tools.git + if __name__ == '__main__': main() diff --git a/tox-tmpl.ini b/tox-tmpl.ini index 96df6f62..8ad9f332 100644 --- a/tox-tmpl.ini +++ b/tox-tmpl.ini @@ -10,8 +10,7 @@ skipsdist = True usedevelop = True install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = -r{toxinidir}/test-requirements.txt alembic>=0.4.1 psycopg2 kazoo>=1.3.1 @@ -26,7 +25,7 @@ commands = flake8 {posargs} [testenv:pylint] setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt pylint==0.26.0 commands = pylint --rcfile=pylintrc taskflow @@ -39,6 +38,8 @@ commands = python setup.py testr --coverage --testr-args='{posargs}' commands = {posargs} [flake8] +# H904 Wrap long lines in parentheses instead of a backslash +ignore = H904 builtins = _ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools @@ -48,21 +49,23 @@ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools deps = {[testenv:py26-sa7-mysql-ev]deps} [testenv:py27] -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt -r{toxinidir}/optional-requirements.txt -r{toxinidir}/test-requirements.txt + doc8>=0.3.4 commands = python setup.py testr --slowest --testr-args='{posargs}' sphinx-build -b doctest doc/source doc/build - python tools/check_doc.py doc/source + doc8 doc/source [testenv:py33] deps = {[testenv]deps} + -r{toxinidir}/requirements-py3.txt SQLAlchemy>=0.7.8,<=0.9.99 # NOTE(imelnikov): psycopg2 is not supported on pypy [testenv:pypy] -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt -r{toxinidir}/test-requirements.txt SQLAlchemy>=0.7.8,<=0.9.99 alembic>=0.4.1 @@ -78,10 +81,12 @@ eventlet = ev,* [axis:python:py26] basepython = python2.6 deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt [axis:python:py27] basepython = python2.7 deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt [axis:eventlet:ev] deps = diff --git a/tox.ini b/tox.ini index 4768beaa..0283c14d 100644 --- a/tox.ini +++ b/tox.ini @@ -39,8 +39,7 @@ envlist = cover, usedevelop = True install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt - -r{toxinidir}/test-requirements.txt +deps = -r{toxinidir}/test-requirements.txt alembic>=0.4.1 psycopg2 kazoo>=1.3.1 @@ -55,7 +54,7 @@ commands = flake8 {posargs} [testenv:pylint] setenv = VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt pylint==0.26.0 commands = pylint --rcfile=pylintrc taskflow @@ -68,6 +67,7 @@ commands = python setup.py testr --coverage --testr-args='{posargs}' commands = {posargs} [flake8] +ignore = H904 builtins = _ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools @@ -75,20 +75,22 @@ exclude = .venv,.tox,dist,doc,./taskflow/openstack/common,*egg,.git,build,tools deps = {[testenv:py26-sa7-mysql-ev]deps} [testenv:py27] -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt -r{toxinidir}/optional-requirements.txt -r{toxinidir}/test-requirements.txt + doc8>=0.3.4 commands = python setup.py testr --slowest --testr-args='{posargs}' sphinx-build -b doctest doc/source doc/build - python tools/check_doc.py doc/source + doc8 doc/source [testenv:py33] deps = {[testenv]deps} + -r{toxinidir}/requirements-py3.txt SQLAlchemy>=0.7.8,<=0.9.99 [testenv:pypy] -deps = -r{toxinidir}/requirements.txt +deps = -r{toxinidir}/requirements-py2.txt -r{toxinidir}/test-requirements.txt SQLAlchemy>=0.7.8,<=0.9.99 alembic>=0.4.1 @@ -97,6 +99,7 @@ deps = -r{toxinidir}/requirements.txt [testenv:py26-sa7-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 MySQL-python eventlet>=0.13.0 @@ -104,12 +107,14 @@ basepython = python2.6 [testenv:py26-sa7-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 MySQL-python basepython = python2.6 [testenv:py26-sa7-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 pyMySQL eventlet>=0.13.0 @@ -117,12 +122,14 @@ basepython = python2.6 [testenv:py26-sa7-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 pyMySQL basepython = python2.6 [testenv:py26-sa8-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 MySQL-python eventlet>=0.13.0 @@ -130,12 +137,14 @@ basepython = python2.6 [testenv:py26-sa8-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 MySQL-python basepython = python2.6 [testenv:py26-sa8-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 pyMySQL eventlet>=0.13.0 @@ -143,12 +152,14 @@ basepython = python2.6 [testenv:py26-sa8-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 pyMySQL basepython = python2.6 [testenv:py26-sa9-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 MySQL-python eventlet>=0.13.0 @@ -156,12 +167,14 @@ basepython = python2.6 [testenv:py26-sa9-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 MySQL-python basepython = python2.6 [testenv:py26-sa9-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 pyMySQL eventlet>=0.13.0 @@ -169,12 +182,14 @@ basepython = python2.6 [testenv:py26-sa9-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 pyMySQL basepython = python2.6 [testenv:py27-sa7-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 MySQL-python eventlet>=0.13.0 @@ -182,12 +197,14 @@ basepython = python2.7 [testenv:py27-sa7-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 MySQL-python basepython = python2.7 [testenv:py27-sa7-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 pyMySQL eventlet>=0.13.0 @@ -195,12 +212,14 @@ basepython = python2.7 [testenv:py27-sa7-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.7.8,<=0.7.99 pyMySQL basepython = python2.7 [testenv:py27-sa8-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 MySQL-python eventlet>=0.13.0 @@ -208,12 +227,14 @@ basepython = python2.7 [testenv:py27-sa8-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 MySQL-python basepython = python2.7 [testenv:py27-sa8-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 pyMySQL eventlet>=0.13.0 @@ -221,12 +242,14 @@ basepython = python2.7 [testenv:py27-sa8-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.8,<=0.8.99 pyMySQL basepython = python2.7 [testenv:py27-sa9-mysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 MySQL-python eventlet>=0.13.0 @@ -234,12 +257,14 @@ basepython = python2.7 [testenv:py27-sa9-mysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 MySQL-python basepython = python2.7 [testenv:py27-sa9-pymysql-ev] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 pyMySQL eventlet>=0.13.0 @@ -247,6 +272,7 @@ basepython = python2.7 [testenv:py27-sa9-pymysql] deps = {[testenv]deps} + -r{toxinidir}/requirements-py2.txt SQLAlchemy>=0.9,<=0.9.99 pyMySQL basepython = python2.7