diff --git a/congress/api/webservice.py b/congress/api/webservice.py index 99884b2a2..c1101189d 100644 --- a/congress/api/webservice.py +++ b/congress/api/webservice.py @@ -37,7 +37,7 @@ def error_response(status, error_code, description, data=None): Args: status: The HTTP status code of the response. error_code: The application-specific error code. - description: Friendly G11N-enabled string corresponding ot error_code. + description: Friendly G11N-enabled string corresponding to error_code. data: Additional data (not G11N-enabled) for the API consumer. """ raw_body = { @@ -145,9 +145,9 @@ class ElementHandler(AbstractApiHandler): Args: path_regex: A regular expression that matches the full path to the element. If multiple handlers match a request path, - the handler with the highhest registration search_index wins. + the handler with the highest registration search_index wins. model: A resource data model instance - collection_handler: The collection handler this elemeent + collection_handler: The collection handler this element is a member of or None if the element is not a member of a collection. (Used for named creation of elements) allow_read: True if element supports read diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/dfa.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/dfa.py index 95ad15aba..105386245 100644 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/dfa.py +++ b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/dfa.py @@ -177,7 +177,7 @@ class DFA(object): Python does not have any size restrictions, but the compilation of such large source files seems to be pretty memory hungry. The memory consumption of the python process grew to >1.5GB when importing a - 15MB lexer, eating all my swap space and I was to impacient to see, + 15MB lexer, eating all my swap space and I was to impatient to see, if it could finish at all. With packed initializers that are unpacked at import time of the lexer module, everything works like a charm. diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/recognizers.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/recognizers.py index 3fdb593cc..3dceccec1 100644 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/recognizers.py +++ b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/recognizers.py @@ -553,7 +553,7 @@ class BaseRecognizer(object): exits normally returning to rule a. Now it finds the ']' (and with the successful match exits errorRecovery mode). - So, you cna see that the parser walks up call chain looking + So, you can see that the parser walks up call chain looking for the token that was a member of the recovery set. Errors are not generated in errorRecovery mode. @@ -994,7 +994,7 @@ class TokenSource(object): def __iter__(self): - """The TokenSource is an interator. + """The TokenSource is an iterator. The iteration will not include the final EOF token, see also the note for the __next__() method. diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tree.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tree.py index 0a3214b34..50a6eed4f 100644 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tree.py +++ b/thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tree.py @@ -1653,7 +1653,7 @@ class TreeNodeStream(IntStream): Return null for LT(0) and any index that results in an absolute address that is negative. - This is analogus to the LT() method of the TokenStream, but this + This is analogous to the LT() method of the TokenStream, but this returns a tree node instead of a token. Makes code gen identical for both parser and tree grammars. :) """ diff --git a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testdfa.py b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testdfa.py index 7ae362d16..45e7a50e9 100644 --- a/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testdfa.py +++ b/thirdparty/antlr3-antlr-3.5/runtime/Python3/unittests/testdfa.py @@ -10,7 +10,7 @@ class TestDFA(unittest.TestCase): def setUp(self): """Setup test fixure. - We need a Recognizer in order to instanciate a DFA. + We need a Recognizer in order to instantiate a DFA. """ diff --git a/thirdparty/antlr3/debug.py b/thirdparty/antlr3/debug.py index 6668fa58e..5a578411c 100644 --- a/thirdparty/antlr3/debug.py +++ b/thirdparty/antlr3/debug.py @@ -496,7 +496,7 @@ class DebugEventListener(object): def rewind(self, marker=None): - """After an arbitrairly long lookahead as with a cyclic DFA (or with + """After an arbitrarily long lookahead as with a cyclic DFA (or with any backtrack), this informs the debugger that stream should be rewound to the position associated with marker. @@ -526,12 +526,12 @@ class DebugEventListener(object): def recognitionException(self, e): """A recognition exception occurred such as NoViableAltException. I made - this a generic event so that I can alter the exception hierachy later + this a generic event so that I can alter the exception hierarchy later without having to alter all the debug objects. Upon error, the stack of enter rule/subrule must be properly unwound. If no viable alt occurs it is within an enter/exit decision, which - also must be rewound. Even the rewind for each mark must be unwount. + also must be rewound. Even the rewind for each mark must be unwound. In the Java target this is pretty easy using try/finally, if a bit ugly in the generated code. The rewind is generated in DFA.predict() actually so no code needs to be generated for that. For languages @@ -594,7 +594,7 @@ class DebugEventListener(object): def endResync(self): """Indicates that the recognizer has finished consuming tokens in order - to resychronize. There may be multiple beginResync/endResync pairs + to resynchronize. There may be multiple beginResync/endResync pairs before the recognizer comes out of errorRecovery mode (in which multiple errors are suppressed). This will be useful in a gui where you want to probably grey out tokens that are consumed